blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9751c62ae2f0b7c0cb8d4e43a291b082ce27b46d
|
8306bfb1438a2516d3f9e7bea51f9d805793798d
|
/tests/testthat/test-all-na.R
|
f914eab53bfda5afec41ee96228dbbf3a1296ac4
|
[] |
no_license
|
cran/naniar
|
849ad432eea4e343ffc4302b3ae7612759f9a552
|
30710de1ca289d1dd994e203c650bebc62a61a0f
|
refs/heads/master
| 2023-02-21T19:46:58.125455
| 2023-02-02T08:50:02
| 2023-02-02T08:50:02
| 99,764,801
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 638
|
r
|
test-all-na.R
|
misses <- c(NA, NA, NA)
complete <- c(1, 2, 3)
mixture <- c(NA, 1, NA)
test_that("all_na returns TRUE when all NA",{
expect_true(all_na(misses))
})
test_that("all_complete returns FALSE when all missing",{
expect_false(all_complete(misses))
})
test_that("all_complete returns TRUE when all complete",{
expect_true(all_complete(complete))
})
test_that("all_na returns FALSE when all complete",{
expect_false(all_na(complete))
})
test_that("all_na returns FALSE when mixture of missings",{
expect_false(all_na(mixture))
})
test_that("all_complete returns FALSE when mixture of missings",{
expect_false(all_na(mixture))
})
|
cd4cdcb8311d48cf146a4147890f52397790c00b
|
488854749b8d6c1e5f1db64dd6c1656aedb6dcbd
|
/man/xmlDOMApply.Rd
|
0575931aa041b9af78bc6f11381b5b28bd8b0beb
|
[] |
no_license
|
cran/XML
|
cd6e3c4d0a0875804f040865b96a98aca4c73dbc
|
44649fca9d41fdea20fc2f573cb516f2b12c897e
|
refs/heads/master
| 2023-04-06T18:52:11.013175
| 2023-03-19T10:04:35
| 2023-03-19T10:04:35
| 17,722,082
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,245
|
rd
|
xmlDOMApply.Rd
|
\name{xmlDOMApply}
\alias{xmlDOMApply}
\title{Apply function to nodes in an XML tree/DOM.}
\description{
This recursively applies the specified function to each node in an
XML tree, creating a new tree,
parallel to the original input tree.
Each element in the new tree is the return
value obtained from invoking the specified function
on the corresponding element
of the original tree.
The order in which the function is recursively applied
is "bottom-up". In other words,
function is first applied to each of the children
nodes first and then to the parent node
containing the newly computed results for the children.
}
\usage{
xmlDOMApply(dom, func)
}
\arguments{
\item{dom}{a node in the XML tree or DOM on which to recursively
apply the given function.
This should not be the \code{XMLDocument}
itself returned from
\code{\link{xmlTreeParse}}
but an object of class \code{XMLNode}.
This is typically obtained by
calling \code{\link{xmlRoot}} on the
return value from \code{\link{xmlTreeParse}}.
}
\item{func}{
the function to be applied to each node in the XML tree.
This is passed the node object for the and the return
value is inserted into the new tree that is to be returned
in the corresponding position as the node being processed.
If the return value is \code{NULL}, this node is dropped from the tree.}
}
\details{
This is a native (C code) implementation that
understands the structure of an XML DOM returned
from \code{\link{xmlTreeParse}} and iterates
over the nodes in that tree.
}
\value{
A tree that parallels the structure in the
\code{dom} object passed to it.
}
\author{Duncan Temple Lang}
\references{\url{https://www.w3.org/XML//}, \url{http://www.jclark.com/xml/},
\url{https://www.omegahat.net} }
\seealso{\link{xmlTreeParse}}
\examples{
dom <- xmlTreeParse(system.file("exampleData","mtcars.xml", package="XML"))
tagNames <- function() {
tags <- character(0)
add <- function(x) {
if(inherits(x, "XMLNode")) {
if(is.na(match(xmlName(x), tags)))
tags <<- c(tags, xmlName(x))
}
NULL
}
return(list(add=add, tagNames = function() {return(tags)}))
}
h <- tagNames()
xmlDOMApply(xmlRoot(dom), h$add)
h$tagNames()
}
\keyword{file}
|
66e38e3072e840bd67e8be4f7f89abf32a863f4c
|
9f9038d285ae8e3d3772e49a8b3115f06e0a4f89
|
/man/bdImport_text_to_hdf5.Rd
|
2f2b0b87a32a8888694d9a38bca6292450f1548e
|
[] |
no_license
|
isglobal-brge/BigDataStatMeth
|
e09bfcb2ca7e1abce253083177a30167e694a2af
|
27948557f53ec6fa26450272339c7788b6c6bc54
|
refs/heads/master
| 2023-06-24T23:56:06.548394
| 2022-10-09T20:03:25
| 2022-10-09T20:03:25
| 147,813,286
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,408
|
rd
|
bdImport_text_to_hdf5.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{bdImport_text_to_hdf5}
\alias{bdImport_text_to_hdf5}
\title{Converts text file to hdf5 data file}
\usage{
bdImport_text_to_hdf5(
filename,
outputfile,
outGroup,
outDataset,
sep = NULL,
header = FALSE,
rownames = FALSE,
overwrite = FALSE
)
}
\arguments{
\item{filename}{string file name with data to be imported}
\item{outputfile}{file name and path to store imported data}
\item{outGroup}{group name to store the dataset}
\item{outDataset}{dataset name to store the input file in hdf5}
\item{sep}{(optional), by default = "\\t". The field separator string. Values within each row of x are separated by this string.}
\item{header}{(optional) either a logical value indicating whether the column names of x are to be written along with x, or a character vector of column names to be written. See the section on ‘CSV files’ for the meaning of col.names = NA.}
\item{rownames}{(optional) either a logical value indicating whether the row names of x are to be written along with x, or a character vector of row names to be written.}
\item{overwrite}{(optional) either a logical value indicating whether the output file can be overwritten or not.}
}
\value{
none value returned, data are stored in a dataset inside an hdf5 data file.
}
\description{
Converts text file to hdf5 data file
}
|
4c390db4f8c42f52add21eb6db6d69c081bc1cec
|
e86eef8d2532368f0d22fb3c06719b4fbb9aa708
|
/curve-fitting2.R
|
c36b4c8caa0125655a47cf040ac026d613017b6e
|
[] |
no_license
|
khoadley/Fluorometer
|
61e8fbb4750c1190b07ac94274cab394463bca64
|
e1dc221c4f1454e8985f1277890acaee68400f1b
|
refs/heads/master
| 2021-01-21T21:09:59.886318
| 2017-09-28T16:18:18
| 2017-09-28T16:18:18
| 92,315,422
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,501
|
r
|
curve-fitting2.R
|
library(ggplot2)
library(car)
library(MVN)
library(MASS)
//library(doBy)
rm(list=ls())
#establish x-axis
x<-c(0)
n <-0
for (i in 1:400)
{
n <- (n + 1)
x <- append(x, n)
}
xvalue <- as.matrix(x)
#arrary for removing .5 usec intervals from data set
xer<-c(0)
s <-0
for (i in 1:401)
{
s <- (s + 2)
xer <- append(xer, s)
}
xskip <- as.matrix(xer)
# 1. Read In Excitation Profiles for all 8 LED channels
ex<-read.table("aug15-excit-profile.csv",skip = 1, header = FALSE, sep=",")
ex567 <-ex[4:14,]
ex597 <-ex[19:29,]
ex615 <-ex[34:44,]
ex625 <-ex[49:59,]
ex447 <-ex[64:74,]
ex475 <-ex[79:88,]
ex505 <-ex[93:103,]
ex530 <-ex[108:117,]
excitations <-list(ex447, ex475, ex505, ex530, ex567, ex597, ex615, ex625)
finalprofiles <-list()
for(P in 1:8)
{ file<-excitations[[P]]
num<-dim(file)[[1]]
averaged <- list()
for(N in 1:num)
{ #Sys.sleep(.1)
for (i in 40:80)
{ if (file[N,i]>0.2)
{ reducer<-as.matrix(file[N,i:1000])
y1 <- list()
y1[[N]]<-(reducer[xskip])
normalized<-data.matrix(do.call(rbind,y1))
averaged[[N]] <-c(normalized)
#plot(xvalue, normalized)
#Sys.sleep(.3)
break
}
}
}
harvest<-data.matrix(do.call(rbind,averaged))
res<-colMeans(harvest, na.rm=TRUE)
finalprofiles[[P]] <- c(res)
}
profile447<-finalprofiles[[1]]
profile475<-finalprofiles[[2]]
profile505<-finalprofiles[[3]]
profile530<-finalprofiles[[4]]
profile567<-finalprofiles[[5]]
profile597<-finalprofiles[[6]]
profile615<-finalprofiles[[7]]
profile625<-finalprofiles[[8]]
##check each profile
profile<-profile447
resu<-data.frame(xvalue, profile)
pl<-ggplot(resu, aes(xvalue, profile))+geom_point()
pl+scale_y_continuous(limits = c(0, 6.5))
#2. Read in folders with individual file/samples and normalize to respective excitation profiles
filenames <- list.files("LvsH-light", pattern=".csv", full.names=TRUE)
Results <- list()
SIG <- list()
Namer <- list()
seconds<-0
Foo<-summary(filenames)[[1]]
F <- Foo
#F <- 30
for(F in 1:Foo)
{
if(file.info(filenames[F])$size>0)
{ data<-read.table(filenames[F],skip = 1, header = FALSE, sep=",")
file<-data
num<-dim(file)[[1]]
averaged <- list()
royal447<-regexpr("447nm.csv", filenames[F], ignore.case=FALSE)
royal475<-regexpr("475nm.csv", filenames[F], ignore.case=FALSE)
royal505<-regexpr("505nm.csv", filenames[F], ignore.case=FALSE)
royal530<-regexpr("530nm.csv", filenames[F], ignore.case=FALSE)
royal567<-regexpr("567nm.csv", filenames[F], ignore.case=FALSE)
royal597<-regexpr("597nm.csv", filenames[F], ignore.case=FALSE)
royal615<-regexpr("615nm.csv", filenames[F], ignore.case=FALSE)
royal625<-regexpr("625nm.csv", filenames[F], ignore.case=FALSE)
if (royal447[[1]]>0)
{ seconds <- 90
prof <-royal447
excit <-34172
}
if (royal475[[1]]>0)
{ seconds <- 90
prof <-royal475
excit <-38545
}
if (royal505[[1]]>0)
{ seconds <- 175
prof <-royal505
excit <-16209
}
if (royal530[[1]]>0)
{ seconds <- 175
prof <-royal530
excit <-18401
}
if (royal567[[1]]>0)
{ seconds <- 175
prof <-royal567
excit <-20352
}
if (royal597[[1]]>0)
{ seconds <- 175
prof <-royal597
excit <-24089
}
if (royal625[[1]]>0)
{ seconds <- 175
prof <-royal625
excit <-43662
}
if (royal615[[1]]>0)
{ seconds <- 175
prof <-royal615
excit <-35983
}
for(N in 2:num)
{ #Sys.sleep(.1)
for (i in 40:80)
{ if (file[N,i]>0.1)
{ reducer<-as.matrix(file[N,i:800])
y1 <- list()
y1[[N]]<-(reducer[xskip])
normalized<-data.matrix(do.call(rbind,y1))
averaged[[N]] <-c(normalized/prof)
#plot(x1, normalized)
break
}
}
}
norm<-data.matrix(do.call(rbind,averaged))
normal<-colMeans(norm, na.rm=TRUE)
x1<-xvalue[3:seconds]
yvalue<-normal[3:seconds]
v <- -999
try(v <- nls(yvalue ~ A + B*(1-exp(-(C*x1)/1)),
start = list(A = 2, B = 2, C = .04),
trace = TRUE,control = list(maxiter = 500), algorithm = "port"), silent = TRUE)
if(mode(v)=="numeric")
{ try(v <- nls(yvalue ~ A + B*(1-exp(-(C*x1)/1)),
start = list(A = 2, B = 2, C = .02),
trace = TRUE,control = list(maxiter = 500), algorithm = "port"), silent = TRUE)
}
if(mode(v)=="numeric")
{ coef <- -999
Aest <- -999
Best <- -999
Cest <- -999
Aerr <- -999
Berr <- -999
Cerr <- -999
sigma_se <- -999
Fv_se <- -999
Fm_se <- -999
Fo_se <- -999
Fv_Fm_se <- -999
maxp <- -999
Fv <- -999
Fm <- -999
Fo <- -999
sigma <- -999
Fv_Fm <- -999
yest <- yvalue
plot(x1, yvalue)
lines(yest, col = "red")
}
else
{ coef <- summary(v)$coefficients
Aest <- coef[1,1]
Best <- coef[2,1]
Cest <- coef[3,1]
Aerr <- coef[1,2]
Berr <- coef[2,2]
Cerr <- coef[3,2]
sigma_se <- Cerr
Fv_se <- Berr
Fm_se <- sqrt(Aerr^2+Berr^2)
Fo_se <- sqrt(Fm_se^2+Fv_se^2)
Fv_Fm_se <- (Best/(Aest+Best))*sqrt((Fv_se/Best)^2+(Fm_se/(Aest+Best))^2)
maxp <- max(coef[,4])
Fo <- Aest + Best*(1-exp(-(Cest*1)/1))
Fm <- Aest + Best*(1-exp(-(Cest*x1[length(x1)])/1))
Fv <- Fm-Fo
sigma <- Cest/(excit/1000000)
Fv_Fm <- Fv/Fm
yest <- Aest + Best*(1-exp(-(Cest*x1)/1))
#yest <- 1.6482 + 1.45*(1-exp(-(0.047*x1)/1))
plot(x1, yvalue)
grid(col = "grey")
lines(yest, col ="red")
#Sys.sleep(.2)
}
# conest <- 0.5
# fmest <-Fm
# foest <- Fo
# bigC <-list()
# for (i in 1:length(yvalue))
# {
# Cestimator <- (yvalue[i] - foest)/((1-conest)*(fmest-foest)+conest*(yvalue[i]-foest))
# bigC[[i]]<-c(Cestimator)
# }
# Cnvalue<-data.matrix(do.call(rbind,bigC))
# Fn <- nls(yvalue ~ foest + (B-foest)*(Cnvalue)*((1-P)/(1-Cnvalue*P)),
# start = list(B = .5, P = 0.05),
# trace = TRUE,control = list(maxiter = 500), algorithm = "port")
# Cn <- nls(Cnvalue ~ Cnvalue[x1-1] + Rsig*((1-Cnvalue[x1-1])/((1-Cnvalue[x1-1])*P)),
# start = list(Rsig = sigma, P = 0.05),
# trace = TRUE,control = list(maxiter = 500), algorithm = "port")
Results[[F]] <-c(Fo,Fm,Fv_Fm,sigma)
Namer[[F]] <-c(filenames[F])
}
else
{}
}
Finals<-data.matrix(do.call(rbind,Results))
Namecode<-data.matrix(do.call(rbind,Namer))
Finalresults = data.frame(Namecode, Finals)
Out<-capture.output(Finalresults)
Finalresults
write(Out, file="121116-lvsh-light", sep="\n", append=FALSE)
|
fef46940420e6d3328c6c60a971863f559235141
|
551653ce2ea82e0e74cbb8844c09066650a15dce
|
/src/pcxn_res_go03.R
|
0cedc4fcbe860f863f3424ca85708ab22bc8c962
|
[] |
no_license
|
yeredh/PCxN_GOBP
|
cb9eafbc23f76b5b3767556b184ed2b59c9285be
|
6ad58a328818801aaf8f479e1a212c3bb3e61ab9
|
refs/heads/master
| 2020-12-24T15:13:23.436178
| 2015-07-07T03:36:50
| 2015-07-07T03:36:50
| 37,874,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,505
|
r
|
pcxn_res_go03.R
|
rm(list=ls())
# ==== Experiment-level correlation estimates ====
r_mat = cbind(readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat1.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat2.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat3.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat4.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat5.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat6.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat7.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat8.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat9.RDS"),
readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_mat10.RDS"))
# sample size per experiment
n_vec = readRDS("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/n_vec.RDS")
# weighted average for the correlation estimates
n_mult = n_vec/sum(n_vec)
rm(n_vec)
r_bar = r_mat%*%n_mult
saveRDS(r_bar,paste0("/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/output/GO/res/r_bar.RDS"))
rm(list=ls())
|
e1ad079c922026db35358c6ef61f244cdaf80c29
|
3d36b989d2e0be9c3954f3ded454436be7074870
|
/resource_tracking/prep/archive/old_code/gtm_prep/prep_fpm_summary_budget.R
|
b35129604fb5194cdb434d7fdbae24e84fd46517
|
[] |
no_license
|
Guitlle/gf
|
32bd9e0a5e176c01240f612f305cff740965a7f9
|
c96403d7d53398cb8d8d55fa5ea2d794e0094080
|
refs/heads/develop
| 2022-04-03T03:04:07.075040
| 2019-02-04T18:52:33
| 2019-02-04T18:52:33
| 109,864,623
| 0
| 0
| null | 2017-11-07T16:53:05
| 2017-11-07T16:53:05
| null |
UTF-8
|
R
| false
| false
| 2,743
|
r
|
prep_fpm_summary_budget.R
|
# ----------------------------------------------
# Irena Chen
#
# 11/8/2017
# Template for prepping GF UGA budgets where the data is in "summary" form
# Inputs:
# inFile - name of the file to be prepped
# Outputs:
# budget_dataset - prepped data.table object
# ----------------------------------------------
##download necessary packages:
library(lubridate)
library(data.table)
library(readxl)
library(stats)
library(stringr)
library(rlang)
library(zoo)
# ----------------------------------------------
##function to clean the data:
prep_fpm_summary_budget = function(dir, inFile, sheet_name, start_date, qtr_num, grant, disease, period, recipient){
if(!is.na(sheet_name)){
gf_data <- data.table(read_excel(paste0(dir, inFile), sheet=as.character(sheet_name), col_names = FALSE))
} else {
gf_data <- data.table(read_excel(paste0(dir, inFile)))
}
##we don't need the first three columns
gf_data <- gf_data[, -c(1:3)]
colnames(gf_data)[1] <- "cost_category"
## this type of budget data should always have 13 cost categories
gf_data <- gf_data[c((grep("service deliv",tolower(gf_data$cost_category))):(grep("implementing", tolower(gf_data$cost_category)))),]
## drop the first and last row of the data
gf_data <- head(gf_data,-1)
gf_data <- gf_data[-1,]
## drop the first row now that we renamed the columns
toMatch <- c("%", "Phase", "Total", "Year", "RCC")
drop.cols <- grep(paste(toMatch, collapse="|"), ignore.case=TRUE, gf_data)
gf_data <- gf_data[, (drop.cols) := NULL]
##remove values from the "cost category" that we dont want:
gf_data <- gf_data[!grepl("Presupuesto", gf_data$cost_category),]
gf_data[[1]][1] <- "cost_category"
##only keep data that has a value in the "category" column
gf_data <- na.omit(gf_data, cols=1, invert=FALSE)
##remove blank columns:
gf_data <- gf_data[-1,]
## also drop columns containing only NA's
gf_data<- Filter(function(x) !all(is.na(x)), gf_data)
dates <- rep(start_date, qtr_num) #
for (i in 1:length(dates)){
if (i==1){
dates[i] <- start_date
} else {
dates[i] <- dates[i-1]%m+% months(3)
}
}
col_num = qtr_num+1
gf_data <- gf_data[, 1:col_num]
setnames(gf_data, c("cost_category", as.character(dates)))
setDT(gf_data)
budget_dataset<- melt(gf_data,id="cost_category", variable.name = "start_date", value.name="budget")
budget_dataset$start_date <- as.Date(budget_dataset$start_date, "%Y-%m-%d")
budget_dataset$budget <- as.numeric(budget_dataset$budget)
##add categories
budget_dataset$disease <- disease
budget_dataset$period <- period
budget_dataset$grant_number <- grant
budget_dataset$recipient <- recipient
return(budget_dataset)
}
|
06161a67160945117c5806446b179a5d7b1d2826
|
0d1dbe187fcd1a0231cbed0b31f060770be47e9c
|
/man/get_artist_albums.Rd
|
4e91dd2a0160b10ec22e301ff4b174cded51e90e
|
[] |
no_license
|
AlicjaGrzadziel/spotifyr
|
359c035c119010eedf8128f69c69535bf0119259
|
b9192ebc0d932da7781cbc01203902d64dcbb112
|
refs/heads/master
| 2020-03-20T07:03:45.023042
| 2018-06-11T00:02:00
| 2018-06-11T00:02:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,266
|
rd
|
get_artist_albums.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_artist_albums.R
\name{get_artist_albums}
\alias{get_artist_albums}
\title{Get Artist Albums}
\usage{
get_artist_albums(artist_name = NULL, artist_uri = NULL,
use_artist_uri = FALSE, return_closest_artist = TRUE, message = FALSE,
studio_albums_only = TRUE, access_token = get_spotify_access_token())
}
\arguments{
\item{artist_name}{String of artist name}
\item{artist_uri}{String of Spotify artist URI. Will only be applied if \code{use_arist_uri} is set to \code{TRUE}. This is useful for pulling artists in bulk and allows for more accurate matching since Spotify URIs are unique.}
\item{use_artist_uri}{Boolean determining whether to search by Spotify URI instead of an artist name. If \code{TRUE}, you must also enter an \code{artist_uri}. Defaults to \code{FALSE}.}
\item{studio_albums_only}{Logical for whether to remove album types "single" and "compilation" and albums with mulitple artists. Defaults to \code{TRUE}}
\item{access_token}{Spotify Web API token. Defaults to spotifyr::get_spotify_access_token()}
}
\description{
This function returns an artist's discography on Spotify
}
\examples{
\dontrun{
albums <- get_artist_albums('radiohead')
}
}
\keyword{albums}
|
4c8cd06d35bb3bf271f2f8d8a92a2b90a34949f3
|
259256f1befb13890c929f6727f7621d5587b394
|
/R/RegionalGoF.R
|
a412f8e8049ac038cb58340f79e18a160410c1dc
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
jlthomps/NWCCompare
|
739bab60679687d1ac21ac0c57315485e0c85496
|
6d1efeef3e207bd4b94353adf7763a8976951986
|
refs/heads/master
| 2021-01-18T05:21:12.731754
| 2014-03-27T17:06:19
| 2014-03-27T17:06:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,044
|
r
|
RegionalGoF.R
|
#' Function to calculate GoF statistics for given observed and modeled statistics
#'
#' This function accepts data frames of statistics for observed and modeled daily flow time-series
#' and returns a data frame of calculated GoF statistics
#'
#' @param GagedFlowStats data frame of flow stats for observed data
#' @param ModeledFlowStats data frame of flow stats for modeled data
#' @return Output data frame of calculated statistics
#' @export
RegionalGoF <- function(GagedFlowStats,ModeledFlowStats) {
Output<-matrix(nrow=6,ncol=ncol(GagedFlowStats))
for (i in 1:ncol(GagedFlowStats)) {
Output[1,i] <- nse(GagedFlowStats[,i],ModeledFlowStats[,i])
Output[2,i] <- nselog(GagedFlowStats[,i],ModeledFlowStats[,i])
Output[3,i] <- rmse(GagedFlowStats[,i],ModeledFlowStats[,i])
Output[4,i] <- pbias(ModeledFlowStats[,i],GagedFlowStats[,i])
Output[5,i] <- cor(GagedFlowStats[,i],ModeledFlowStats[,i],method='pearson')
Output[6,i] <- cor(GagedFlowStats[,i],ModeledFlowStats[,i],method='spearman')
}
return(Output)
}
|
dbdaa7a689dffdd6c69b89444b1242fde14a35bd
|
189ef03fd836f6ed9a2d443e79161e84da3fecbd
|
/02-Plot_habitat_effects.R
|
3e229aa9c0f02675969f7d6e483480610f36ebb1
|
[
"CC0-1.0"
] |
permissive
|
qureshlatif/CFLRP-analysis-scripts
|
af0bdcdcf9ae9ee32811c29429bec4fd5083a346
|
12627b08490e00c0030a8340d780a7d0b43277a5
|
refs/heads/master
| 2021-06-26T02:06:56.119795
| 2020-12-11T15:44:11
| 2020-12-11T15:44:11
| 186,690,852
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,683
|
r
|
02-Plot_habitat_effects.R
|
library(jagsUI)
library(stringr)
library(dplyr)
library(R.utils)
library(QSLpersonal)
library(ggplot2)
library(cowplot)
setwd("C:/Users/Quresh.Latif/files/projects/FS/CFLRP")
load("Data_compiled.RData")
mod <- loadObject("mod_habitat_d0yr_reduced")
spp_trt_effects <- c("WISA", "RECR", "WEWP", "CAFI", "CONI",
"AMRO", "CAJA", "PIGR", "EVGR", "CLNU",
"PYNU", "LISP", "BRCR", "DEJU", "WBNU",
"OSFL", "STJA", "BHCO", "COFL", "HAWO",
"MGWA", "RCKI", "YEWA", "VGSW", "YRWA",
"SOSP", "HETH", "VIWA")
# Tabulate parameter estimates
pars <- c("psi", "bd.TWI", "bd.heatload", "bd.ForAR", "bd.PACC10_3km", "bd.PACC40_3km", "bd.mnPerArRatio_Opn3km",
"theta", "bb.CanCov", "bb.CanHt", "bb.NumSnags", "bb.RCOV_PP", "bb.RCOV_DF", "bb.RCOV_AS", "bb.shvol",
"bb.RSCV_Ladder", "bb.HerbGrassVol")
cols <- (c("", ".lo", ".hi") %>%
expand.grid(pars, stringsAsFactors = F) %>%
select(Var2, Var1) %>%
mutate(Var3 = str_c(Var2, Var1, sep = "")))$Var3
tbl_pars <- matrix(NA, nrow = length(spp.list), ncol = length(cols), dimnames = list(NULL, cols))
for(par in pars[-which(pars %in% c("psi", "theta"))]) {
parm <- mod$sims.list[[par]]
tbl_pars[, par] <- apply(parm, 2, median)
tbl_pars[, str_c(par, ".lo")] <- apply(parm, 2, function(x) quantile(x, prob = 0.025, type = 8))
tbl_pars[, str_c(par, ".hi")] <- apply(parm, 2, function(x) quantile(x, prob = 0.975, type = 8))
}
rm(par)
parm <- expit(mod$sims.list[["d0"]])
tbl_pars[, "psi"] <- apply(parm, 2, median)
tbl_pars[, "psi.lo"] <- apply(parm, 2, function(x) quantile(x, prob = 0.025, type = 8))
tbl_pars[, "psi.hi"] <- apply(parm, 2, function(x) quantile(x, prob = 0.975, type = 8))
parm <- expit(mod$sims.list[["b0"]])
tbl_pars[, "theta"] <- apply(parm, 2, median)
tbl_pars[, "theta.lo"] <- apply(parm, 2, function(x) quantile(x, prob = 0.025, type = 8))
tbl_pars[, "theta.hi"] <- apply(parm, 2, function(x) quantile(x, prob = 0.975, type = 8))
rm(parm)
tbl_pars_all <- tbl_pars
tbl_pars <- tbl_pars[, c(1:3, 13:51)]
beta.cols <- dimnames(tbl_pars)[[2]][-which(dimnames(tbl_pars)[[2]] %in% c("psi", "psi.lo", "psi.hi",
"theta", "theta.lo", "theta.hi"))]
ind.spp <- c(which(spp.list %in% spp_trt_effects),
tbl_pars[, beta.cols[beta.cols %>% str_detect(".lo") %>% which]] %>%
apply(1, function(x) any(x > 0)) %>%
which,
tbl_pars[, beta.cols[beta.cols %>% str_detect(".hi") %>% which]] %>%
apply(1, function(x) any(x < 0)) %>%
which) %>% unique %>% sort
spp.plt <- spp.list[ind.spp]
dat.plt <- tbl_pars %>% tbl_df() %>%
mutate(Spp = spp.list) %>%
filter(Spp %in% spp.plt) %>%
mutate(index = row_number()) %>%
mutate(index = (max(index) - index) + 1)
dat.plt$Spp[which(dat.plt$Spp %in% spp_trt_effects)] <-
dat.plt$Spp[which(dat.plt$Spp %in% spp_trt_effects)] %>% str_c("*")
cols <- pars[-c(1:4, 8)] %>% str_c(".supp")
dat.supp <- matrix("none", nrow = nrow(dat.plt), ncol = length(cols),
dimnames = list(NULL, cols))
for(i in 1:length(cols)) {
col.chck <- str_sub(cols[i], 1, -6)
chck <- dat.plt[, which(str_detect(names(dat.plt), col.chck))]
dat.supp[which(chck[, 2] > 0), cols[i]] <- "pos"
dat.supp[which(chck[, 3] < 0), cols[i]] <- "neg"
}
rm(col.chck, chck)
dat.plt <- dat.plt %>%
bind_cols(dat.supp %>% as.data.frame)
rm(dat.supp)
theme_set(theme_cowplot())
## Grid level relationships ##
p.psi <- ggplot(dat = dat.plt, aes(x = index, y = psi)) +
geom_errorbar(aes(ymin = psi.lo, ymax = psi.hi), size=1, width=0) +
geom_point(size = 2.5) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(0, 1)) +
ylab(expression(hat(psi)["mean"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25))
p.PACCGap <- ggplot(dat = dat.plt, aes(x = index, y = bd.PACC10_3km, color = bd.PACC10_3km.supp)) +
geom_errorbar(aes(ymin = bd.PACC10_3km.lo, ymax = bd.PACC10_3km.hi, color = bd.PACC10_3km.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bd.PACC10_3km.lo), max(dat.plt$bd.PACC10_3km.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(delta)["PACCGap"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.PACCOpn <- ggplot(dat = dat.plt, aes(x = index, y = bd.PACC40_3km, color = bd.PACC40_3km.supp)) +
geom_errorbar(aes(ymin = bd.PACC40_3km.lo, ymax = bd.PACC40_3km.hi, color = bd.PACC40_3km.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bd.PACC10_3km.lo), max(dat.plt$bd.PACC10_3km.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(delta)["PACCOpn"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.PAROpn <- ggplot(dat = dat.plt, aes(x = index, y = bd.mnPerArRatio_Opn3km, color = bd.mnPerArRatio_Opn3km.supp)) +
geom_errorbar(aes(ymin = bd.mnPerArRatio_Opn3km.lo, ymax = bd.mnPerArRatio_Opn3km.hi, color = bd.mnPerArRatio_Opn3km.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bd.PACC10_3km.lo), max(dat.plt$bd.PACC10_3km.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(delta)["PAROpn"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p <- ggdraw() +
draw_plot(p.psi, x = 0.05, y = 0, width = 0.2375, height = 1) +
draw_plot(p.PACCGap, x = 0.2875, y = 0, width = 0.2375, height = 1) +
draw_plot(p.PACCOpn, x = 0.5250, y = 0, width = 0.2375, height = 1) +
draw_plot(p.PAROpn, x = 0.7625, y = 0, width = 0.2375, height = 1) +
draw_plot_label("Species", x = 0, y = 0.5, size = 40, angle = 90, hjust = 0)
save_plot("Plot_landscape_effects.tiff", p, ncol = 3, nrow = 4, dpi = 200)
## Point level relationships 1 ##
p.theta <- ggplot(dat = dat.plt, aes(x = index, y = theta)) +
geom_errorbar(aes(ymin = theta.lo, ymax = theta.hi), size=1, width=0) +
geom_point(size = 2.5) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(0, 1)) +
ylab(expression(hat(theta)["mean"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25))
p.CanCov <- ggplot(dat = dat.plt, aes(x = index, y = bb.CanCov, color = bb.CanCov.supp)) +
geom_errorbar(aes(ymin = bb.CanCov.lo, ymax = bb.CanCov.hi, color = bb.CanCov.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.CanCov.lo), max(dat.plt$bb.CanCov.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["CanCov"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.CanHt <- ggplot(dat = dat.plt, aes(x = index, y = bb.CanHt, color = bb.CanHt.supp)) +
geom_errorbar(aes(ymin = bb.CanHt.lo, ymax = bb.CanHt.hi, color = bb.CanHt.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.CanHt.lo), max(dat.plt$bb.CanHt.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["CanHt"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.NSnag <- ggplot(dat = dat.plt, aes(x = index, y = bb.NumSnags, color = bb.NumSnags.supp)) +
geom_errorbar(aes(ymin = bb.NumSnags.lo, ymax = bb.NumSnags.hi, color = bb.NumSnags.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.NumSnags.lo), max(dat.plt$bb.NumSnags.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["NSnag"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p <- ggdraw() +
draw_plot(p.theta, x = 0.0500, y = 0, width = 0.2375, height = 1) +
draw_plot(p.CanCov, x = 0.2875, y = 0, width = 0.2375, height = 1) +
draw_plot(p.CanHt, x = 0.5250, y = 0, width = 0.2375, height = 1) +
draw_plot(p.NSnag, x = 0.7625, y = 0, width = 0.2375, height = 1) +
draw_plot_label("Species", x = 0, y = 0.5, size = 40, angle = 90, hjust = 0)
save_plot("Plot_veg_canStruct_effects.tiff", p, ncol = 3, nrow = 3, dpi = 200)
p.PIPO <- ggplot(dat = dat.plt, aes(x = index, y = bb.RCOV_PP, color = bb.RCOV_PP.supp)) +
geom_errorbar(aes(ymin = bb.RCOV_PP.lo, ymax = bb.RCOV_PP.hi, color = bb.RCOV_PP.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_PP.lo), max(dat.plt$bb.RCOV_PP.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["PIPO"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.PSME <- ggplot(dat = dat.plt, aes(x = index, y = bb.RCOV_DF, color = bb.RCOV_DF.supp)) +
geom_errorbar(aes(ymin = bb.RCOV_DF.lo, ymax = bb.RCOV_DF.hi, color = bb.RCOV_DF.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.RCOV_DF.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["PSME"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.POTR5 <- ggplot(dat = dat.plt, aes(x = index, y = bb.RCOV_AS, color = bb.RCOV_AS.supp)) +
geom_errorbar(aes(ymin = bb.RCOV_AS.lo, ymax = bb.RCOV_AS.hi, color = bb.RCOV_AS.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_AS.lo), max(dat.plt$bb.RCOV_AS.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["POTR5"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.ShrbVol <- ggplot(dat = dat.plt, aes(x = index, y = bb.shvol, color = bb.shvol.supp)) +
geom_errorbar(aes(ymin = bb.shvol.lo, ymax = bb.shvol.hi, color = bb.shvol.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.shvol.lo), max(dat.plt$bb.shvol.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["ShrbVol"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.LadFuel <- ggplot(dat = dat.plt, aes(x = index, y = bb.RSCV_Ladder, color = bb.RSCV_Ladder.supp)) +
geom_errorbar(aes(ymin = bb.RSCV_Ladder.lo, ymax = bb.RSCV_Ladder.hi, color = bb.RSCV_Ladder.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.RSCV_Ladder.lo), max(dat.plt$bb.RSCV_Ladder.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["LadFuel"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p.Herb <- ggplot(dat = dat.plt, aes(x = index, y = bb.HerbGrassVol, color = bb.HerbGrassVol.supp)) +
geom_errorbar(aes(ymin = bb.HerbGrassVol.lo, ymax = bb.HerbGrassVol.hi, color = bb.HerbGrassVol.supp), size=1, width=0) +
geom_point(size = 2.5) +
geom_hline(yintercept = 0) +
coord_flip() +
scale_x_continuous(breaks = 1:nrow(dat.plt), labels = dat.plt$Spp %>% rev, expand=c(0, 1)) +
scale_y_continuous(lim = c(min(dat.plt$bb.RCOV_DF.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
# scale_y_continuous(lim = c(min(dat.plt$bb.HerbGrassVol.lo), max(dat.plt$bb.HerbGrassVol.hi))) +
scale_color_manual(values = c("#0072B2", "dark gray", "#D55E00")) +
ylab(expression(hat(beta)["Herb"])) + xlab(NULL) +
theme(axis.title.x=element_text(size=25)) +
guides(color = F)
p <- ggdraw() +
draw_plot(p.PIPO, x = 0.05, y = 0, width = 0.1583333, height = 1) +
draw_plot(p.PSME, x = 0.2083333, y = 0, width = 0.1583333, height = 1) +
draw_plot(p.POTR5, x = 0.3666667, y = 0, width = 0.1583333, height = 1) +
draw_plot(p.ShrbVol, x = 0.5250000, y = 0, width = 0.1583333, height = 1) +
draw_plot(p.LadFuel, x = 0.6833333, y = 0, width = 0.1583333, height = 1) +
draw_plot(p.Herb, x = 0.8416667, y = 0, width = 0.1583333, height = 1) +
draw_plot_label("Species", x = 0, y = 0.5, size = 40, angle = 90, hjust = 0)
save_plot("Plot_veg_CanComp&Understory_effects.tiff", p, ncol = 3, nrow = 4, dpi = 200)
p <- ggdraw() +
draw_plot(p.theta, x = 0.03, y = 0, width = 0.097, height = 1) +
draw_plot(p.CanCov, x = 0.127, y = 0, width = 0.097, height = 1) +
draw_plot(p.CanHt, x = 0.224, y = 0, width = 0.097, height = 1) +
draw_plot(p.NSnag, x = 0.321, y = 0, width = 0.097, height = 1) +
draw_plot(p.PIPO, x = 0.418, y = 0, width = 0.097, height = 1) +
draw_plot(p.PSME, x = 0.515, y = 0, width = 0.097, height = 1) +
draw_plot(p.POTR5, x = 0.612, y = 0, width = 0.097, height = 1) +
draw_plot(p.ShrbVol, x = 0.709, y = 0, width = 0.097, height = 1) +
draw_plot(p.LadFuel, x = 0.806, y = 0, width = 0.097, height = 1) +
draw_plot(p.Herb, x = 0.903, y = 0, width = 0.097, height = 1) +
draw_plot_label("Species", x = 0, y = 0.5, size = 40, angle = 90, hjust = 0)
save_plot("Plot_veg_all_effects.tiff", p, ncol = 5, nrow = 4, dpi = 200)
|
f1d31a50f3013a663c0fc2c6dfd5484d91b8a51e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mgcv/examples/ocat.Rd.R
|
d47e394d8ba81e5f4a023c0fae0e3d1200270713
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
r
|
ocat.Rd.R
|
library(mgcv)
### Name: ocat
### Title: GAM ordered categorical family
### Aliases: ocat ordered.categorical
### Keywords: models regression
### ** Examples
library(mgcv)
## Simulate some ordered categorical data...
set.seed(3);n<-400
dat <- gamSim(1,n=n)
dat$f <- dat$f - mean(dat$f)
alpha <- c(-Inf,-1,0,5,Inf)
R <- length(alpha)-1
y <- dat$f
u <- runif(n)
u <- dat$f + log(u/(1-u))
for (i in 1:R) {
y[u > alpha[i]&u <= alpha[i+1]] <- i
}
dat$y <- y
## plot the data...
par(mfrow=c(2,2))
with(dat,plot(x0,y));with(dat,plot(x1,y))
with(dat,plot(x2,y));with(dat,plot(x3,y))
## fit ocat model to data...
b <- gam(y~s(x0)+s(x1)+s(x2)+s(x3),family=ocat(R=R),data=dat)
b
plot(b,pages=1)
gam.check(b)
summary(b)
b$family$getTheta(TRUE) ## the estimated cut points
## predict probabilities of being in each category
predict(b,dat[1:2,],type="response",se=TRUE)
|
d47a15bdccdaac905a345faa649920e3bbde8930
|
322737d934a4697320224ab97bdccddf936a7729
|
/ Kaggle Happiness Predictor/showofhands/data_transformation.R
|
81dcb7d73aa2d902ba103512c17e61f2ae9a37fc
|
[] |
no_license
|
eleven-yi/Analytics-Edge
|
651461b48b374f19391ed56b071989cc0f5582c3
|
8997093e44bd72b5902900038a1faa727eb844d1
|
refs/heads/master
| 2020-12-11T07:34:22.358890
| 2014-06-05T01:03:34
| 2014-06-05T01:05:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,862
|
r
|
data_transformation.R
|
numerify <- function(vec, val1 = "Yes") {
ifelse(is.na(vec), 0,
ifelse(vec %in% val1, -1, 1))
}
completedataset <- function(dataset) {
dataset$YOB = suppressWarnings(as.numeric(as.character(dataset$YOB)))
if ("Happy" %in% colnames(dataset)) {
dataset2 = data.frame(UserID=dataset$UserID,
YOB=as.integer(dataset$YOB),
Happy=dataset$Happy)
} else {
dataset2 = data.frame(UserID=dataset$UserID,
YOB=as.integer(dataset$YOB))
}
dataset2$YOB = ifelse(is.na(dataset2$YOB), 1979, dataset2$YOB)
dataset2$Gender=ifelse(is.na(dataset$Gender), 0,
ifelse(dataset$Gender == "Male", 1, -1))
dataset2$AvgIncome[ is.na(dataset$Income) ] = 0
dataset2$AvgIncome[ dataset$Income == "$100,001 - $150,000"] = 125000
dataset2$AvgIncome[ dataset$Income == "$25,001 - $50,000"] = 37500
dataset2$AvgIncome[ dataset$Income == "$50,000 - $74,999"] = 62500
dataset2$AvgIncome[ dataset$Income == "$75,000 - $100,000"] = 87500
dataset2$AvgIncome[ dataset$Income == "over $150,000"] = 200000
dataset2$AvgIncome[ dataset$Income == "under $25,000"] = 12500
# dataset2$HouseholdStatus = as.integer(ifelse(is.na(dataset$HouseholdStatus),
# ifelse(ifelse(is.na(dataset$YOB), 1979, dataset$YOB)<1980, "4",
# "5"),
# dataset$HouseholdStatus))
HasKids = c(-1, 1, -1, 1, -1, 1)
dataset2$HasKids = HasKids[dataset$HouseholdStatus]
dataset2$HasKids[is.na(dataset2$HasKids)] = 0
juntado = c(1, 1, 1, 1,-1,-1)
dataset2$LivesTogether = juntado[dataset$HouseholdStatus]
dataset2$LivesTogether[is.na(dataset2$LivesTogether)] = 0
dataset2$HighSchool = numerify(dataset$EducationLevel, "High School Diploma")
dataset2$Bachelor = numerify(dataset$EducationLevel, "Bachelor's Degree")
dataset2$Undergraduate = numerify(dataset$EducationLevel,
c("Associate's Degree", "Current Undergraduate"))
dataset2$K12 = numerify(dataset$EducationLevel, "Current K-12")
dataset2$PostGraduate= numerify(dataset$EducationLevel,
c("Master's Degree", "Doctoral Degree"))
dataset2$Democrat = numerify(dataset$Party, "Democrat")
dataset2$Republican = numerify(dataset$Party, "Republican")
dataset2$InteractDislikes = numerify(dataset$Q124742)
dataset2$ParentsFight = numerify(dataset$Q124122)
dataset2$MinimumWage = numerify(dataset$Q123464)
dataset2$FullTime = numerify(dataset$Q123621)
dataset2$Collects = numerify(dataset$Q122769)
dataset2$WalletHas20 = numerify(dataset$Q122770)
dataset2$PublicSchool = numerify(dataset$Q122771, "Public")
dataset2$Jealous = numerify(dataset$Q122120)
dataset2$Relationship = numerify(dataset$Q121700)
dataset2$Alcohol = numerify(dataset$Q121699)
dataset2$SesameStreet = numerify(dataset$Q120978)
dataset2$StressfulEvts = numerify(dataset$Q121011)
dataset2$Masters = numerify(dataset$Q120379)
dataset2$ParentsMarried = numerify(dataset$Q120650)
dataset2$Science = numerify(dataset$Q120472, "Art")
dataset2$TryFirst = numerify(dataset$Q120194, "Try first")
dataset2$WeatherMatters = numerify(dataset$Q120012)
dataset2$Successful = numerify(dataset$Q120014)
dataset2$Exciting = numerify(dataset$Q119334)
dataset2$GoodBook = numerify(dataset$Q119851)
dataset2$Giver = numerify(dataset$Q119650,"Giving")
dataset2$Glasses = numerify(dataset$Q118892)
dataset2$SameState = numerify(dataset$Q118117)
dataset2$Idealist = numerify(dataset$Q118232, "Idealist")
dataset2$RiskofLife = numerify(dataset$Q118233)
dataset2$OverYourHead = numerify(dataset$Q118237)
dataset2$HotHead = numerify(dataset$Q117186, "Hot headed")
dataset2$OddHours = numerify(dataset$Q117193, "Odd hours")
dataset2$Vitamins = numerify(dataset$Q116797)
dataset2$HappyorRight = numerify(dataset$Q116881, "Happy")
dataset2$Rules = numerify(dataset$Q116953)
dataset2$TravelAbroad = numerify(dataset$Q116601)
dataset2$CarPymt = numerify(dataset$Q116441)
dataset2$NoLies = numerify(dataset$Q116448)
dataset2$Morning = numerify(dataset$Q116197, "A.M.")
dataset2$Obedient = numerify(dataset$Q115602)
dataset2$StartHabit = numerify(dataset$Q115777, "Start")
dataset2$PositiveThinking = numerify(dataset$Q115610)
dataset2$OwnGun = numerify(dataset$Q115611)
dataset2$TakesRespons = numerify(dataset$Q115899, "Me")
dataset2$Personality = numerify(dataset$Q115390)
dataset2$MoneyBuys = numerify(dataset$Q114961)
dataset2$TapWater = numerify(dataset$Q114748)
dataset2$CityLimits = numerify(dataset$Q115195)
dataset2$MorningNews = numerify(dataset$Q114517)
dataset2$Misterious = numerify(dataset$Q114386, "Mysterious")
dataset2$Gambles = numerify(dataset$Q113992)
dataset2$Charity = numerify(dataset$Q114152)
dataset2$TalkRadio = numerify(dataset$Q113583, "Talk")
dataset2$PeopleorTech = numerify(dataset$Q113584, "People")
dataset2$Meditates = sign(numerify(dataset$Q113181) +
numerify(dataset$Q98197))
dataset2$Phobic = numerify(dataset$Q112478)
dataset2$Skeptical = numerify(dataset$Q112512)
dataset2$LooksGood = numerify(dataset$Q112270)
dataset2$StraightA = numerify(dataset$Q111848)
dataset2$SuppParents = numerify(dataset$Q111580, "Supportive")
dataset2$AlarmAhead = numerify(dataset$Q111220)
dataset2$Mac = numerify(dataset$Q110740, "Mac")
dataset2$Poor = numerify(dataset$Q109367)
dataset2$Cautious = numerify(dataset$Q108950, "Cautious")
dataset2$Feminist = numerify(dataset$Q109244)
dataset2$LikesFamily = numerify(dataset$Q108855, "Yes!")
dataset2$SingleParent = numerify(dataset$Q108617)
dataset2$Sociable = numerify(dataset$Q108856, "Socialize")
dataset2$ParentsCollege = numerify(dataset$Q108754)
dataset2$Online = numerify(dataset$Q108342, "Online")
dataset2$HasDebt = numerify(dataset$Q108343)
dataset2$FeelsNormal = numerify(dataset$Q107869)
dataset2$Punctuates = numerify(dataset$Q107491)
dataset2$LikesName = numerify(dataset$Q106993)
dataset2$LikesPeople = numerify(dataset$Q106997, "Yay people!")
dataset2$PowerTools = numerify(dataset$Q106272)
dataset2$Overworks = numerify(dataset$Q106388)
dataset2$GoodLiar = numerify(dataset$Q106389)
dataset2$Medications = numerify(dataset$Q106042)
dataset2$RetailTherapy = numerify(dataset$Q105840)
dataset2$Alarm = numerify(dataset$Q105655)
dataset2$BrushesTeeth = numerify(dataset$Q104996)
dataset2$ManyPets = numerify(dataset$Q103293)
dataset2$Grudge = numerify(dataset$Q102906)
dataset2$CreditDebt = numerify(dataset$Q102674)
dataset2$EatsBreakfast = numerify(dataset$Q102687)
dataset2$HomeOwner = numerify(dataset$Q102089, "Own")
dataset2$Optimist = numerify(dataset$Q101162, "Optimist")
dataset2$MomRules = numerify(dataset$Q101163, "Mom")
dataset2$TreeHouse = numerify(dataset$Q101596)
dataset2$Overweight = numerify(dataset$Q100689)
dataset2$CryBaby = numerify(dataset$Q100680)
dataset2$LifeImproves = numerify(dataset$Q100562)
dataset2$CheckLists = numerify(dataset$Q99982, "Check!")
dataset2$WatchTV = numerify(dataset$Q100010)
dataset2$HomeAlone = numerify(dataset$Q99716)
dataset2$LeftHanded = numerify(dataset$Q99581)
dataset2$Spanked = numerify(dataset$Q99480)
dataset2$MeaningofLife = numerify(dataset$Q98869)
dataset2$Exercises = numerify(dataset$Q98578)
dataset2$Siblings = numerify(dataset$Q98059)
dataset2$Outlet = numerify(dataset$Q98078)
dataset2$GoodatMath = numerify(dataset$Q96024)
dataset2$YOB = (dataset2$YOB-mean(dataset2$YOB))/sd(dataset2$YOB)
dataset2$AvgIncome = (dataset2$AvgIncome-mean(dataset2$AvgIncome))/
sd(dataset2$AvgIncome)
return(dataset2)
}
|
d7280275ba9637b1a791d1b4ffc7c4bcb77fb596
|
dd8c7e34e56f4439c9823676356c87f23ae81a3e
|
/databaseConverter.R
|
51869cfc2b9fdee8dfab17ab036979a3d43fe212
|
[] |
no_license
|
Sumidu/reclab_api
|
031b3029fdd66471e98154134dd43603487f306b
|
d1ab1af90f4773dcc5077f9bce0ba680c6cdbd16
|
refs/heads/master
| 2020-06-17T14:54:10.733182
| 2019-10-01T14:20:43
| 2019-10-01T14:20:43
| 195,956,035
| 0
| 0
| null | 2019-09-25T09:35:31
| 2019-07-09T07:29:14
|
R
|
UTF-8
|
R
| false
| false
| 1,442
|
r
|
databaseConverter.R
|
##
library(tidyverse)
library(RSQLite)
library(rvest)
library(lubridate)
con <- dbConnect(RSQLite::SQLite(), "corpus.sqlite3")
dbListTables(con)
articles <- dbReadTable(con, "Articles")
strip_html <- function(x) {
html_text(read_html(x)) %>%
str_replace_all("\\n", " ") %>%
str_replace_all("\\t", "") %>%
str_replace_all("\\\"", "\"")
}
get_h2_header <- function(x){
x %>% read_html() %>% html_nodes("h2") %>% html_text() %>% paste(collapse = " ")
}
get_p_elems <- function(x, count = 5){
x %>% read_html() %>% html_nodes("p") %>% html_text() %>% head(count) %>% paste(collapse = " ")
}
cleanarticles <- articles %>%
mutate(has_javascript = str_match(Body, "text/javascript") ) %>%
filter(is.na(has_javascript)) %>% as_tibble() %>%
filter(!str_detect(Path, "Community")) %>%
filter(!str_detect(Path, "Diverses")) %>%
mutate(publishingDate = ymd_hms(publishingDate) %>%
lubridate::as_date() %>%
format("%d. %b %Y")) %>%
rowwise() %>%
mutate(subheading = get_h2_header(Body)) %>%
mutate(cleanbody = get_p_elems(Body,2))
# Debug get all Types
cleanarticles %>% mutate(path2 = factor(Path)) %>% count(path2) %>% arrange(-n) %>% View()
write_rds(cleanarticles, "articles.rds")
x <- runif(1,1,1000)
example <- cleanarticles[x,]
example$Title
example$Body %>% get_h2_header()
example$Body %>% get_p_elems(2)
example$Path
example$publishingDate
example$ID_Article
|
2dc54149a5be3809b83037015a0edbbed59d5588
|
cd7e5ebd49ad2bbfa73fbc684d3966bbe0213e42
|
/R/run_landcover_preprocessing.R
|
fb09ddaafa008065aa7c78c459512ff69b976e45
|
[
"Apache-2.0"
] |
permissive
|
SebaDro/rainfall-runoff-preprocessing
|
0e9250167f1e57b6d97e5a6a750f6f25a328c3c1
|
16fc6ba2c9a4a537de6a27771b4c85bdbe95c2d5
|
refs/heads/main
| 2023-04-12T03:35:47.253767
| 2021-05-17T14:30:46
| 2021-05-17T14:30:46
| 323,310,778
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,170
|
r
|
run_landcover_preprocessing.R
|
source("./R/setup.R")
##### Path parameters #####
output_path <- "./output/" # output path to save results
subbasin_file <- "./data/wv_subbasins.geojson"
class_config_file <- "./config/clc-codelist.csv"
# TODO Download CORINE Land Cover from https://land.copernicus.eu/pan-european/corine-land-cover
# and copy the annual GeoTIFF files to the './data' folder
raster_paths <- c("./data/clc1990.tif",
"./data/clc2000.tif",
"./data/clc2006.tif",
"./data/clc2012.tif",
"./data/clc2018.tif")
##### Execution parameters #####
# Suffix for resulting files
res_file_suff <- "wv"
# Name of the ID column
id_col <- "id"
# Should land cover classes that are not present be kept? If so, missing classes
# will be filled with zero percentage
fill_missing_classes <- FALSE
# Load codelist for CORINE Land Cover classes
codelist <-
read_csv(class_config_file, col_types = cols(code = col_character()))
codes <- codelist %>% pull(code)
subbasins <- load_catchments_as_sf(subbasin_file)
for (path in raster_paths) {
cat(sprintf("\nStart processing raster file %s \n", path))
land_cover <- read_stars(path)
# Check if both, the raster data and the features/polygons have the same CRS.
# If not, transform features/polygons to raster CRS
if(st_crs(subbasins) != st_crs(land_cover)) {
subbasins <- st_transform(subbasins, st_crs(land_cover))
}
res_tmp <-
calculate_land_cover_frequency(subbasins, land_cover, id_col, FALSE)
res <- res_tmp %>% pivot_wider(
id_cols = catchment_id,
names_from = class,
values_from = freq,
values_fill = 0
)
if (fill_missing_classes) {
# Fill up entries with missing classes and set frequency of zero
res[setdiff(codes, names(res))] <- 0
}
# Order columns ascending and round class freqeuncies
res <- res %>%
relocate(catchment_id, intersect(codes, names(res))) %>%
mutate(across(1:ncol(res), round, 4))
out_file <- paste0(output_path, file_path_sans_ext(basename(path)), "_", res_file_suff, ".csv")
write_csv(res, out_file)
cat(sprintf("/nSuccesfully wrote result to %s\n", out_file))
}
|
35e55b1f495a27aa2cb3de82950e23a9c9a80f05
|
1d7e8eaef903f93803ad720c4914bff2e03e5fcc
|
/TTR_Interpolation/0_Utility.R
|
214700c2899252dfdd68c6a117d51f327cb812cf
|
[] |
no_license
|
onthejeep/TTR_Interpolation
|
48950518db2d8da19361637b420358c0dc158b7c
|
b6ff24de57194a0bf9f7f7734c7f6630d7c1fe29
|
refs/heads/master
| 2021-05-07T00:52:35.656806
| 2017-12-14T22:04:19
| 2017-12-14T22:04:19
| 110,288,687
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,041
|
r
|
0_Utility.R
|
Parse.Structure = function(structure = '32,32,16,8')
{
SplitStr = strsplit(structure, split = ',', fixed = TRUE);
Structure = as.numeric(unlist(SplitStr));
return(Structure);
}
Find.HotDestination = function(sqlite)
{
SelectCommand = sprintf("
select [UnLoading.ColIndex], [UnLoading.RowIndex], Number
from [hot_destination]
order by Number desc");
Result = dbGetQuery(sqlite, SelectCommand);
View(Result);
dbClearResult(Result);
}
# The conversion follows the standard of the built-in function dataprt(WEEKDAY, [timestamp]) in SQL Server
Find.DOW = function(dowName = 'Monday')
{
DowNumber = NA;
switch(dowName,
'Sunday' = { DowNumber = 1 },
'Monday' = { DowNumber = 2 },
'Tuesday' = { DowNumber = 3 },
'Wednesday' = { DowNumber = 4 },
'Thursday' = { DowNumber = 5 },
'Friday' = { DowNumber = 6 },
'Saturday' = { DowNumber = 7 },
);
return(DowNumber);
}
Find.TimeIndex = function(time)
{
StringSplit = strsplit(time, ':')[[1]];
NumHour = as.numeric(StringSplit[1]);
NumMinute = 1;
switch(StringSplit[2],
'firsthalf' = { NumMinute = 1 },
'secondhalf' = { NumMinute = 2 }
);
return(NumHour * 2 + NumMinute);
}
# colIndex and rowIndex ranges from [1, 100]
Find.FID = function(colIndex, rowIndex)
{
FID = 100 * (colIndex - 1) + (rowIndex - 1);
return(FID);
}
Find.Row.Col = function(fid)
{
Col = ceiling(fid / 100);
Row = (fid - 100 * Col) %% 100 + 1;
return(c(Col, Row));
}
# ---- Unit Test ----
UnitTest.Find.HotDestination = function()
{
Connection = Database.Open.Sqlite('Config/hot_od.sqlite');
Tables = dbListTables(Connection);
print(Tables);
Find.HotDestination(Connection);
dbDisconnect(Connection);
}
UnitTest.Find.HotOrigin = function()
{
Connection = Database.Open.SqlServer();
Result = Find.HotOrigin(Connection, 45, 47, '3:secondhalf', 'Tuesday', 5);
close(Connection);
return(Result);
}
|
a8d816d5234ecf9448ec89c32e168ec699e75460
|
ed2aa34036f33d1af64648b5cbafddaa77552ec8
|
/notebooks/data_trimming_util.R
|
e54d0e17b85bf43fcaa180c628c1b72df8e39777
|
[] |
no_license
|
shihuang047/SoH_manuscript
|
a86cf8fb20bc2c68b884b74ad2fb01a40e6093c4
|
3d338bad3f7dcadf26eae206e472963b9a2ae424
|
refs/heads/master
| 2022-12-11T17:06:07.312339
| 2020-09-09T20:07:08
| 2020-09-09T20:07:08
| 276,764,667
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,036
|
r
|
data_trimming_util.R
|
#--------------------------------------------------
p <- c("biomformat","reshape2","randomForest", "optparse", "ade4", "doMC",
"ggplot2", "RColorBrewer", "vegan", "xgboost", "caret")
usePackage <- function(p) {
if (!is.element(p, installed.packages()[,1]))
install.packages(p, dep = TRUE, repos = "http://cran.us.r-project.org")
suppressWarnings(suppressMessages(invisible(require(p, character.only = TRUE))))
}
invisible(lapply(p, usePackage))
normalize_NA_in_metadata<-function(md){
apply(md, 1, function(x) {
idx <- which(x=="not provided" | x=="Not provided" | x=="Not Provided"
| x=="not applicable" | x=="Not applicable"
| x=="NA" | x=="na" | x=="Na"
| x=="none" | x=="None" | x=="NONE")
x[idx]<-NA
})
md
}
discard_uninfo_columns_in_metadata<-function(md){
noninfo_idx<-apply(md, 2, function(x) length(unique(x))==1)
md<-md[!noninfo_idx]
md
}
trim_metadata<-function(md, filter_cols_by_class=NULL){
if(length(md)==1){
md<-data.frame(md[order(rownames(md)),])
test_all_group<-colnames(md)<-colnames(md)
}else{
md<-md[order(rownames(md)), ]
md<-normalize_NA_in_metadata(md)
md<-discard_uninfo_columns_in_metadata(md)
if(!is.null(filter_cols_by_class)){
idx_numeric<-sapply(md,class)=="numeric"
idx_factor<-sapply(md,class)=="factor"
if(filter_cols_by_class=="numeric"){
md<-md[, idx_numeric]
}else if(filter_cols_by_class=="factor"){
md<-md[, idx_factor]
}else{
stop("The filter_cols_by_class should be one of numeric and factor.")
}
}
}
md
}
filter_features_allzero<-function(data, samples=TRUE, features=TRUE){
if(samples & features){
result<-data[which(apply(data, 1, sum)!=0), ]
result<-data[, which(apply(result, 2, sum)!=0)]
}else if(samples & !features){
result<-data[which(apply(data, 1, sum)!=0), ]
}else if(!samples & features){
result<-data[, which(apply(data, 2, sum)!=0)]
}else{
stop("Nothing has been done!")
}
result
}
filter_features_by_prev <- function(data, prev=0.001){
data<-data[, which(colSums(data!=0) > prev * nrow(data))]
data
}
# sink('./tests/test_data.txt')
# cat('\t')
# write.table(test_x, quote=FALSE, sep='\t', row.names = T)
# sink()
# test_metadata0<-test_metadata
# test_metadata<-test_metadata0[, 1:5]
# sink('./tests/test_metadata0.txt')
# cat('SampleID\t')
# write.table(test_metadata0, quote=FALSE, sep='\t', row.names = T)
# sink()
#
filter_samples_by_NA_in_y <- function(data, y){
y_k<-y[which(!is.na(y))]
data_k<-data[which(!is.na(y)) ,]
result<-list()
result$data_k<-data_k
result$y_k<-y_k
result
}
filter_samples_by_NA_in_target_field_of_metadata <- function(data, metadata, target_field){
if(!identical(rownames(data), rownames(metadata)))stop("The sample IDs should be idenical in feature table and metadata!")
idx<-which(metadata[, target_field]!='not applicable' &
metadata[, target_field]!="not provided" &
!is.na(metadata[, target_field]) )
metadata_k<-metadata[idx, ]
data_k<-data[idx, ]
cat("The number of samples kept (after filtering out samples with NA values in ",target_field,"): ", nrow(metadata_k) ,"\n")
result<-list()
result$data<-data_k
result$metadata<-metadata_k
result
}
filter_samples_by_sample_ids_in_metadata <- function(data, metadata){
shared_idx<-intersect(rownames(data), rownames(metadata))
data_matched<-data[shared_idx, ]
cat("The number of samples in feature table (after filtering out samples with no metadata): ", nrow(data_matched) ,"\n")
metadata_matched<-metadata[shared_idx, ]
cat("The number of samples metadata (after filtering out samples with no metadata): ", nrow(metadata_matched) ,"\n")
cat("The sample IDs are idenical in feature table and metadata: ", identical(rownames(data_matched), rownames(metadata_matched)), "\n")
result<-list()
result$data<-data_matched
result$metadata<-metadata_matched
return(result)
}
filter_samples_by_seq_depth<-function(data, metadata, cutoff=1000){
seq_dep_idx<-which(rowSums(data) > cutoff)
cat("The number of kept samples with more than ",cutoff," reads: ", length(seq_dep_idx), "\n")
if(length(seq_dep_idx)>0){
metadata_k<-metadata[seq_dep_idx, ]
data_k<-data[seq_dep_idx, ]
}else{
stop("The read count of all samples is less than sequencing depth threshold!")
}
#metadata_k$Seq_depth<-rowSums(df_k)
#p<-ggplot(metadata_k, aes(Seq_depth)) + geom_histogram() + xlim(1000, 70000) + theme_bw()
#p
result<-list()
result$data<-data_k
result$metadata<-metadata_k
return(result)
}
convert_y_to_numeric<-function(y, reg=TRUE){
if(reg & !is.numeric(y)){
y=as.numeric(as.character(y))
}else if(!reg){
y=factor(y)
}else{
y=y
}
y
}
keep_shared_features<-function(train_x, test_x){
common_idx<-intersect(colnames(train_x), colnames(test_x))
train_x_shared<-train_x[, common_idx]
test_x_shared<-test_x[, common_idx]
cat("Number of features kept:", length(common_idx), "\n")
cat("The proportion of commonly shared features in train and test dataset respectively: \n")
cat("Train data: ", length(common_idx)/ncol(train_x), "\n")
cat("Test data: ", length(common_idx)/ncol(test_x), "\n")
result<-list()
result$train_x_shared<-train_x_shared
result$test_x_shared<-test_x_shared
result
}
#' feature metadata
add_ann<-function(tab, fmetadata, tab_id_col=1, fmetadata_id_col=1){
fmetadata_matched<-fmetadata[which(fmetadata[, fmetadata_id_col] %in% tab[, tab_id_col]),]
out<-merge(tab, fmetadata_matched, by.x=tab_id_col, by.y=fmetadata_id_col)
out
}
rbind.na<-function(l){
max_len<-max(unlist(lapply(l, length)))
c_l<-lapply(l, function(x) {c(x, rep(NA, max_len - length(x)))})
do.call(rbind, c_l)
}
expand_Taxon<-function(df, Taxon){
taxa_df <- rbind.na(strsplit(as.character(df[, Taxon]), '; '))
colnames(taxa_df) <- c("kingdom","phylum","class","order","family","genus","species") #"kingdom",
data.frame(df, taxa_df)
}
|
c317bb0b36efdd0c97a627b1da9904e32bb0defa
|
0d92a20f2f35dcfcd572c52f5e3b4184279bfa04
|
/esm_emics.R
|
3084879c8c018aa38548ddf8333583358c22702e
|
[] |
no_license
|
richardcode/cmip5_anal
|
a578158209a67e2cae796a1d12d71d5afc8f1661
|
1d5588f177ac4d1eeb92d2d958a86b819724e1b0
|
refs/heads/master
| 2020-06-16T14:31:59.559643
| 2017-04-12T14:31:03
| 2017-04-12T14:31:03
| 75,091,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,319
|
r
|
esm_emics.R
|
source("tcre_var_functs.R")
#Load in the data from the CMIP5 ensembles
cmip5_data <- read.csv('./Data/ESM_cmip5_tempems.csv')
esms <- c('Bern3D','DCESS','GENIE','IGSM','UVi')
emics_data <- cmip5_data[cmip5_data$Model %in% emics,]
esms_data <- cmip5_data[!(cmip5_data$Model %in% emics),]
emics_rcp26_data <- emics_data[(emics_data$Scenario=='RCP26'),-c(6:(2004-1850+6))]
emics_rcp45_data <- emics_data[(emics_data$Scenario=='RCP45'),-c(6:(2004-1850+6))]
emics_rcp6_data <- emics_data[(emics_data$Scenario=='RCP6'),-c(6:(2004-1850+6))]
emics_rcp85_data <- emics_data[(emics_data$Scenario=='RCP85'),-c(6:(2004-1850+6))]
esms_rcp26_data <- esms_data[(esms_data$Scenario=='RCP26'),-c(6:(2004-1850+6))]
esms_rcp45_data <- esms_data[(esms_data$Scenario=='RCP45'),-c(6:(2004-1850+6))]
esms_rcp6_data <- esms_data[(esms_data$Scenario=='RCP6'),-c(6:(2004-1850+6))]
esms_rcp85_data <- esms_data[(esms_data$Scenario=='RCP85'),-c(6:(2004-1850+6))]
library(ggplot2)
cbPalette <- c("#101FDD", "#0DCAEA", "#DDB510", "#EA0D0D")
warm_threshs <- c(0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0)
for (warm_thresh in warm_threshs) {
out_rcp26_emics <-calc_budget_dist_allmods(warm_thresh,emics_rcp26_data,'Temperature|rel to 1861-80','Total anthropogenic carbon flux',c(2005:2159))
out_rcp45_emics <-calc_budget_dist_allmods(warm_thresh,emics_rcp45_data,'Temperature|rel to 1861-80','Total anthropogenic carbon flux',c(2005:2159))
out_rcp6_emics <-calc_budget_dist_allmods(warm_thresh,emics_rcp6_data,'Temperature|rel to 1861-80','Total anthropogenic carbon flux',c(2005:2159))
out_rcp85_emics <-calc_budget_dist_allmods(warm_thresh,emics_rcp85_data,'Temperature|rel to 1861-80','Total anthropogenic carbon flux',c(2005:2159))
out_rcp26_esms <-calc_budget_dist_allmods(warm_thresh,esms_rcp26_data,'Temperature|rel to 1861-80','Total anthropogenic carbon flux',c(2005:2159))
out_rcp45_esms <-calc_budget_dist_allmods(warm_thresh,esms_rcp45_data,'Temperature|rel to 1861-80','Total anthropogenic carbon flux',c(2005:2159))
out_rcp6_esms <-calc_budget_dist_allmods(warm_thresh,esms_rcp6_data,'Temperature|rel to 1861-80','Total anthropogenic carbon flux',c(2005:2159))
out_rcp85_esms <-calc_budget_dist_allmods(warm_thresh,esms_rcp85_data,'Temperature|rel to 1861-80','Total anthropogenic carbon flux',c(2005:2159))
df_all <- data.frame(matrix(nrow=0,ncol=4))
df_all <- rbind(df_all,matrix(c(rep('EMICs',length(out_rcp26_emics[[2]])),rep('RCP2.6',length(out_rcp26_emics[[2]])),out_rcp26_emics[[1]],out_rcp26_emics[[2]]/warm_thresh),ncol=4))
df_all <- rbind(df_all,matrix(c(rep('EMICs',length(out_rcp45_emics[[2]])),rep('RCP4.5',length(out_rcp45_emics[[2]])),out_rcp45_emics[[1]],out_rcp45_emics[[2]]/warm_thresh),ncol=4))
df_all <- rbind(df_all,matrix(c(rep('EMICs',length(out_rcp6_emics[[2]])),rep('RCP6.0',length(out_rcp6_emics[[2]])),out_rcp6_emics[[1]],out_rcp6_emics[[2]]/warm_thresh),ncol=4))
df_all <- rbind(df_all,matrix(c(rep('EMICs',length(out_rcp85_emics[[2]])),rep('RCP8.5',length(out_rcp85_emics[[2]])),out_rcp85_emics[[1]],out_rcp85_emics[[2]]/warm_thresh),ncol=4))
df_all <- rbind(df_all,matrix(c(rep('ESMs',length(out_rcp26_esms[[2]])),rep('RCP2.6',length(out_rcp26_esms[[2]])),out_rcp26_esms[[1]],out_rcp26_esms[[2]]/warm_thresh),ncol=4))
df_all <- rbind(df_all,matrix(c(rep('ESMs',length(out_rcp45_esms[[2]])),rep('RCP4.5',length(out_rcp45_esms[[2]])),out_rcp45_esms[[1]],out_rcp45_esms[[2]]/warm_thresh),ncol=4))
df_all <- rbind(df_all,matrix(c(rep('ESMs',length(out_rcp6_esms[[2]])),rep('RCP6.0',length(out_rcp6_esms[[2]])),out_rcp6_esms[[1]],out_rcp6_esms[[2]]/warm_thresh),ncol=4))
df_all <- rbind(df_all,matrix(c(rep('ESMs',length(out_rcp85_esms[[2]])),rep('RCP8.5',length(out_rcp85_esms[[2]])),out_rcp85_esms[[1]],out_rcp85_esms[[2]]/warm_thresh),ncol=4))
df_all[,3] <- as.numeric(as.vector(df_all[,3]))
df_all[,4] <- as.numeric(as.vector(df_all[,4]))
colnames(df_all) <- c('Ensemble','Scenario','Duration','Budget')
p <- ggplot(df_all, aes(Budget, colour = Scenario,linetype=Ensemble)) + geom_density(alpha=0.1) + labs(x = "Budget (GtC/°C)") + scale_colour_manual(values=cbPalette) + ggtitle(paste('Warming: ',as.character(warm_thresh),'°C',sep=''))
ggsave(paste('Figures/esmsemics_rcps_',as.character(warm_thresh),'.png',sep=''),plot=p,dpi=300,width=5,height=5)
}
|
f91da436bd0140535b7dc5d37dcb717cb79fb279
|
22f54f44eea58554d5599b0a2a99e53ca660ec54
|
/src/analysis_k562ctcf/get_data.R
|
ab0b1f4798fbbce5ed60ad22ea872d4acaffc04a
|
[] |
no_license
|
zrxing/sequence_clustering
|
294fe4bdef4bed29fa96e7b0fcdb922feb852b41
|
1858157b7724eeb7d35a89ba9adb45476cd30700
|
refs/heads/master
| 2020-04-06T07:05:21.015737
| 2016-07-21T19:06:05
| 2016-07-21T19:06:05
| 34,276,466
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 785
|
r
|
get_data.R
|
dir.name = "~/projects/sequence_clustering"
library(multiseq)
lreg = 2^10
peaks = read.table(file.path(dir.name, "data", "k562ctcf", "wgEncodeBroadHistoneK562CtcfStdAlnRep0.bed"))
samplesheet = file.path(dir.name, "src/analysis_k562ctcf", "samplesheet_k562ctcf")
peak.chr = peaks[, 1]
peak.center = ceiling((peaks[, 2] + peaks[, 3])/2)
peak.start = peak.center - lreg/2
peak.end = peak.start + lreg - 1
nreg = length(peak.chr)
data.sig = matrix(0, nreg, lreg)
for(i in 1:nreg){
region = paste0(peak.chr[i], ":", peak.start[i], "-", peak.end[i])
temp.data = get.counts(samplesheet, region)
data.sig[i, ] = colSums(temp.data)
print(i)
}
save(data.sig, peak.chr, peak.center, peak.start, peak.start, file = file.path(dir.name, "results", "analysis_k562ctcf", "data.sig.Robj"))
|
e15bee23b5cdbf9ebfd81347145733acaf2689c6
|
79ec1f24c76048d81027c5d5069a0772d4d44597
|
/utilis.R
|
e4e487880788e8dfce3f528bc208e37034f40d10
|
[] |
no_license
|
ashakru/lymphDDX3X
|
19be776f60b70cf7c4c72ecd741b83ca5c7a93ad
|
983e16043d528ac655d29506653007a4e764742e
|
refs/heads/main
| 2023-06-14T12:21:39.440518
| 2021-07-08T14:53:38
| 2021-07-08T14:53:38
| 351,418,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,036
|
r
|
utilis.R
|
require(RColorBrewer)
# # Colour palettes
divergingPal <- c(brewer.pal(11, "Spectral"), "grey")
divergingPal_long <- c("grey",
brewer.pal(9, "YlOrRd")[c(3,5,7,9)],
brewer.pal(9, "YlGnBu")[c(2,3,4,5,6,7,8,9)],
brewer.pal(9, "YlGn")[c(8,7,5)],
brewer.pal(9, "PuRd")[c(3,5,6,7)],
brewer.pal(9, "BuPu")[c(9,8,7,6,5,4,3,2)])
divergingPalFull <- divergingPal_long[c(5,4,3,2,6,7,16:14,13:8,17:30)]
divergingPalMed <- divergingPal_long[c(5,4,3,2,13:8,7,16:14,17:28)]
# # Vectrorized strsplit
splitvec <- function(vector, split, select, merge = "_"){
processed <- sapply(vector, function(x){
separated <- unlist(strsplit(x, split = split))[select]
if (length(separated) > 1){
return(paste(separated, collapse = merge))
} else
return(separated)
})
processed <- unname(processed)
return(processed)
}
# # Style for barplots
nature_barplot <- function(base_size = 11,
base_family = "",
base_line_size = base_size / 170,
base_rect_size = base_size / 170){
theme_classic(base_size = base_size,
base_family = base_family,
base_line_size = base_line_size) %+replace%
theme(panel.border= element_blank(),
axis.title.x = element_text(color="black", size=15, margin = margin(t=0, r=5, b=0, l=0)),
axis.title.y = element_text(color="black", size=15, angle = 90, margin = margin(t=0, r=5, b=0, l=0)),
axis.text.y = element_text(color="black", size=15, hjust=0.95,vjust=0.2),
axis.text.x = element_text(color="black", size=15),
#axis.line.x = element_line(color="black", size = 0.5),
#axis.line.y = element_line(color="black", size = 0.5, hjust = 1),
axis.ticks.y = element_blank(),
#axis.ticks.x = element_blank(),
legend.title = element_text(color="black", size=15),
legend.text = element_text(color="black", size=15),
legend.position = "right",
strip.text = element_text(color="black", size=15, margin = margin(2,0,2,0, "mm")))
}
nature_point <- function(base_size = 11,
base_family = "",
base_line_size = base_size / 22,
base_rect_size = base_size / 22){
theme_classic(base_size = base_size,
base_family = base_family,
base_line_size = base_line_size) %+replace%
theme(panel.grid.major = element_line(colour = "grey",size=0.2),
panel.grid.minor = element_line(colour = "grey",size=0.1),
strip.text = element_text(color="black", size=11, margin = margin(2,0,2,0, "mm")))
}
# # TMM normalization
tmm <- function(counts_matrix){
require(edgeR)
DGE <- edgeR::DGEList(counts_matrix)
tmmTab <- edgeR::calcNormFactors(DGE, method="TMM")
tmmTab <- edgeR::cpm(tmmTab)
return(tmmTab)
}
# # Function to compute TPM
tpm <- function(counts, lengths) {
density <- counts/lengths
tpm <- t( t(density) * 10^6 / colSums(density, na.rm = T))
return(tpm)
}
# # Low counts filtering
remove_low <- function(counts, min_counts, min_samples, annotcolumns=c()){
cols <- (1:ncol(counts))[!(1:ncol(counts) %in% annotcolumns)]
select <- rowSums(counts[,cols] > min_counts) > ceiling(min_samples*dim(counts[,cols])[2])
counts_df <- as.data.frame(counts[select,])
return(counts_df)
}
# # Wraper for Gene Ontology Anlaysis (simple case scenario)
# Functions
doGO <- function(signifList, universeList, name){
toSave <- c()
ontologies <- c("BP","CC", "MF")
ontologyTab <- tibble()
for (o in 1:3){
goTab<- enrichGO(gene = as.character(signifList),
universe = as.character(universeList),
OrgDb = org.Hs.eg.db,
ont = ontologies[o],
pAdjustMethod = "BH",
pvalueCutoff = 0.1,
qvalueCutoff = 0.2,
readable = T, minGSSize = 5)
goTab <- as.data.frame(goTab) %>%
separate(GeneRatio, c("n", "N"), "/", remove = F) %>%
separate(BgRatio, c("b", "B"), "/", remove = F) %>%
mutate(n = as.numeric(n),
N = as.numeric(N),
b = as.numeric(b),
B = as.numeric(B),
EnrichmentScore = (n/N)/(b/B)) %>%
# filter(n > 10,
# b > 20) %>%
arrange(desc(EnrichmentScore)) %>%
mutate(group = paste("GO", ontologies[o]),
status = name,
sample = "U2932")
toSave <- goTab
if(nrow(ontologyTab) == 0){
ontologyTab <- toSave
} else {
ontologyTab <- ontologyTab %>%
full_join(toSave)
}
}
return(ontologyTab)
}
tableOrder <- function(vector, decreasing = F){
tab <- table(vector)
order <- names(tab)[order(as.numeric(tab),
decreasing = decreasing)]
return(order)
}
|
275dea06955ce8924096440de8d3669cc7bebeb7
|
bfc80df9cdabc1987f1b752a2c56c82929f5b7c8
|
/lab1_3.R
|
ef6517aa623b87b311d48c2e31f76779cd4e9310
|
[] |
no_license
|
leetimofey/RUDNprogs
|
672571fafea4b6825d98cc3b569807a64d6e4563
|
3836298ce1847a01a049550f62c76236288bb6a4
|
refs/heads/main
| 2023-02-17T17:32:52.722267
| 2021-01-12T15:15:52
| 2021-01-12T15:15:52
| 313,323,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
lab1_3.R
|
n=-5
seq(n-1)
seq(9)
rep(c("m", "w"), 5)
rep(c(1:4), 3)
rep(c(4:1), c(3, 3, 3, 3))
rep(c(1:5), c(1:5))
rep(seq(1,11,by=2),c(2,2,2,2,2,2))
|
670ba7ee30d3e4519501cd02359a0cdf7ea30320
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/UniIsoRegression/inst/testfiles/reg_2d/libFuzzer_reg_2d/reg_2d_valgrind_files/1612737184-test.R
|
4fb6453bd434fccf7fcb9c56ec5c770738f66d79
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
r
|
1612737184-test.R
|
testlist <- list(metric = 0L, vec = NULL, vec = NULL, w_vec = structure(c(2.74343508233833e-260, 8.00276746564406e-255, 8.29550922761073e-316, 7.2911220195564e-304, 0), .Dim = c(5L, 1L)), y_vec = structure(c(4.94065645841247e-324, NaN, NaN, 3.78576699573368e-270), .Dim = c(2L, 2L)))
result <- do.call(UniIsoRegression:::reg_2d,testlist)
str(result)
|
cf73ec5e2d4d487477a0ab78748c043426239a9c
|
48d96957e322918327beabe20f2391628c2318ae
|
/R/Best_Models.R
|
6aee072338e3db2833285b27f87e1b00c9868436
|
[] |
no_license
|
adw96/CatchAll
|
9be39ed4e693a2dae54d96cf46fb6e835ab594f6
|
45d3d86509e0920ab92ccb997c00524a1624d62e
|
refs/heads/master
| 2020-06-27T13:10:32.707202
| 2018-09-07T16:57:21
| 2018-09-07T16:57:21
| 97,057,555
| 3
| 1
| null | 2018-02-27T20:30:40
| 2017-07-12T22:36:42
|
R
|
UTF-8
|
R
| false
| false
| 9,190
|
r
|
Best_Models.R
|
Best_Models <- function(bestCount, maximumObservation, frequencyTau10, bestGOF0,
bestAICc, GOFTest, cvrare) {
## how to get best count...
bestModel <- rep(NA, maximumObservation + 1)
bestModelTau <- matrix(NA, nrow=10, ncol=2)
obsMax <- maximumObservation
flag <- -1
if (bestCount[1] > 0) {
flag <- 1
} else if (bestCount[2] > 0) {
flag <- 2
} else if (bestCount[0] > 0) {
flag <- 0
}
##Apply entirely different Criteria if all filters fail
if (flag == -1 & bestCount[3] > 0) {
flag <- 0
testValue <- rep(NA, obsMax + 1)
order <- rep(NA, obsMax + 1)
for(r in 1:obsMax) { #index?
minAICc <- 10000000
for(m in 1:6) {
if (bestAICc[flag, m, r] > 0.0 &
bestAICc[flag, m, r] < minAICc) {
bestModel[r] <- m
minAICc <- bestAICc[flag, m, r]
}
}
testValue[r] <- GOF5Test[bestModel[r], r]
order[r] <- r
}
#sort the arrays based on K-V pairings
testValueOrder <- order(testValue)
testValue[testValueOrder]
order[testValueOrder]
##Arrays.sort(testValue, order)
bm <- 0
b <- 0
while (bm < 4 & b <= maximumObservation) {
if(testValue[b] > 0) {
bestModelTau[bm, 0] <- bestModel[order[b]]
bestModelTau[bm, 1] <- order[b]
bm <- bm + 1
}
b <- b + 1
}
##If there are candidate models proceed to find best ones
} else if (flag >= 0) {
for(r in 1:maximumObservation) {
minAICc <- 10000000
for(m in 1:6) {
if (bestAICc[flag, m, r] > 0.0 &
bestAICc[flag, m, r] < minAICc) {
bestModel[r] <- m
minAICc <- bestAICc[flag, m, r]
}
}
}
maxGOF0 <- bestGOF0[flag, bestModel[frequencyTau10], frequencyTau10]
for(r in 1:maximumObservation) {
if (bestModel[r] > 0) {
##Find Best Model--largest tau with GOF0 >= 0.01
if (bestGOF0[flag, bestModel[r], r] >= 0.01) {
bestModelTau[0, 0] <- bestModel[r]
bestModelTau[0, 1] <- r
}
##Find Good Model 1--tau with largest GOF0
if (bestGOF0[flag, bestModel[r], r] >= maxGOF0) {
bestModelTau[1, 0] <- bestModel[r]
bestModelTau[1, 1] <- r
maxGOF0 <- bestGOF0[flag, bestModel[r], r]
}
##Find Good Model 2--largest tau with data
if (bestGOF0[flag, bestModel[r], r] > 0) {
bestModelTau[2, 0] <- bestModel[r]
bestModelTau[2, 1] <- r
}
}
}
##Find Good Model 3--tau closest to 10 with data
if (bestGOF0[flag, bestModel[frequencyTau10], frequencyTau10] > 0) {
bestModelTau[3, 0] <- bestModel[frequencyTau10]
bestModelTau[3, 1] <- frequencyTau10
} else {
##look > tau10
t <- frequencyTau10 + 1 ##how to get frequencyTau10
while (bestModelTau[3, 1] == 0 & t <= obsMax)
{
if (bestModel[t] > 0)
{
if (bestGOF0[flag, bestModel[t], t] > 0)
{
bestModelTau[3, 0] <- bestModel[t]
bestModelTau[3, 1] <- t
}
}
t <- t + 1
}
##look < tau10
t = frequencyTau10 - 1
while (bestModelTau[3, 1] == 0 & t > 0)
{
if (bestModel[t] > 0)
{
if (bestGOF0[flag, bestModel[t], t] > 0)
{
bestModelTau[3, 0] <- bestModel[t]
bestModelTau[3, 1] <- t
}
}
t <- t - 1
}
}
}
##Find Good Model 4--best WLRM
##if there are taus with G0FO >= 0.01, find the max tau
if (freqMax > 0) {
bestModelTau[4, 0] <- 6
maxGOF0WLRM <- 0.01
bestModelTau[4, 1] <- 0
for (r in 1:frequencyMaximum) {
##determine whether to use logged (6) or unlogged(7) version
if (WLRMSwitch[r] == 0) {
if (bestGOF0[0, 6, r] >= maxGOF0WLRM) {
bestModelTau[4, 0] <- 6
bestModelTau[4, 1] <- r
}
}
else if (WLRMSwitch[r] == 1) {
if (bestGOF0[0, 7, r] >= maxGOF0WLRM) {
bestModelTau[4, 0] <- 7
bestModelTau[4, 1] <- r
}
}
}
##if there are no taus with GOF0 >= 0.01
##use tau with highest GOF0
if (bestModelTau[4, 1] == 0) {
if (WLRMSwitch[1] == 0) {
maxGOF0WLRM <- bestGOF0[0, 6, 1]
} else if (WLRMSwitch[1] == 1)
maxGOF0WLRM <- bestGOF0[0, 7, 1]
for (r in 2:maximumFrequency) {
if (WLRMSwitch[r] == 0)
{
if (bestGOF0[0, 6, r] >= maxGOF0WLRM)
{
bestModelTau[4, 0] <- 6
bestModelTau[4, 1] <- r
maxGOF0WLRM <- bestGOF0[0, 6, r]
}
}
else if (WLRMSwitch[r] == 1) {
if (bestGOF0[0, 7, r] >= maxGOF0WLRM) {
bestModelTau[4, 0] <- 7
bestModelTau[4, 1] <- r
maxGOF0WLRM <- bestGOF0[0, 7, r]
}
}
}
}
}
##if there are none, leave blank
if (bestModelTau[4, 1] == 0) {
bestModelTau[4, 0] <- 0
}
##Find Chao1 Model--all taus give the same answer
bestModelTau[5, 0] <- 8
bestModelTau[5, 1] <- freqTau10
##Find ACE/ACE1 Model closest to tau = 10
if (cvrare <= 0.8) {
bestModelTau[6, 0] <- 9
} else {
bestModelTau[6, 0] <- 10
}
bestModelTau[6, 1] = freqTau10
##Max Tau for Best Model
if (flag >= 0) {
bestModelTau[7, 0] <- bestModelTau[0, 0]
bestModelTau[7, 1] <- maximumObservation
}
##Max Tau for WLRM Model
if (WLRMSwitch[freqMax] == 0) {
bestModelTau[8, 0] <- 6
} else if (WLRMSwitch[freqMax] == 1) {
bestModelTau[8, 0] <- 7
}
##if there are none
if (bestModelTau[4, 0] == 0) {
bestModelTau[8, 0] <- 0
}
bestModelTau[8, 1] <- freqMax
##ACE Model at tau = 10 or < 10 if necessary
bestModelTau[9, 0] <- 9
bestModelTau[9, 1] <- frequencyTau10
## some output thing
## OutputBestModelsAnalysis , need to pass in all the results?
}
# pass in what's already been outputted
OutputBestModelsAnalysis <- function(output) {
foundAnalysis <- rep(NA, 10)
bestAnalysis <- rep(NA, 10) #string, not sure how used
#output
bestModels <- list()
#reads in data from all of the outputs of the models output
numRows <- nrows(output)
currRow <- 0
## read the output
for(bm in 0:10) {
## look at the entire row
data <- output[currRow,]
while(foundAnalysis[bm] == 0 && currRow < numRows) {
##strcmp? is it a string
if(output[currRow,0] == aConditionHere && output[currRow, 1] == frequency[bestModelTau[bm,1]]) {
foundAnalysis[bm] <- 1
bestAnalysis[bm] <- data
}
currRow <- currRow + 1
}
}
#remember index starts at 1
bestDescription <- list("Best Parm Model", "Parm Model 2a ", "Parm Model 2b ", "Parm Model 2c ",
"WLRM ", "Non-P 1 ",
"Non-P 2 ", "Parm Max Tau ",
"WLRM Max Tau ", "Non-P 3 ",
"Best Discounted")
## for non-parametric?
T <- rep(NA, 7)
sHatTotal <- 0
SE <- 0
##Write out best analysis data for parametric models
for(bm in 0:10) {
if (bm < 5 || bm == 7 || bm == 8) {
#for each column in the row
analysis <- output[bm, ]
print(bestDescription[bm + 1])
if(foundAnalysis[bm] == 1) {
if(bm == 5) {
# do something
#analysis[1] = "2";
}
#wrong but print to output
print(analysis[0:2])
if(!is.isNull(analysis[3:6])){
bestModels <- c(bestModels, analysis[3:6])
}
##certain stats not available for non parametric models
if(!is.isNull(analysis[8:9])){
bestModels <- c(bestModels, analysis[8:9])
}
## save shatsubset and t's for best model
if(bm == 0) {
sHatTotal <- analysis[3]
SE <- analysis[4]
}
}
}
}
##Discounted Model for Best Model
if(foundAnalysis[0] == 1 && bm == 9) {
##Step down from Four Mixed to Three Mixed
if (bestModelTau[0, 0] == 5) {
#how to get these
cStar <- DiscountedTFourToThreeExponentialModel(r, s, t, obsMax, sHatTotal, se, lcb, ucb)$cStar
}
##Step down from Three Mixed to Two Mixed
if (bestModelTau[0, 0] == 4) {
cStar <- DiscountedThreeToTwoExponentialModel(r, s, t, obsMax, sHatTotal, se, lcb, ucb)$cStar
}
}
bestModels
# output <- data.frame("Total Number of Observed Species" = "dummy",
# "Model" = "dummy",
# "Tau" = "dummy",
# "Observed Sp" = "dummy",
# "Estimated Total Sp"= "dummy",
# "SE" = "dummy",
# "Lower CB" = "dummy",
# "Upper CB" = "dummy",
# "GOF0" = "dummy",
# "GOF5" = "dummy")
}
|
ae0b1e6b6749024e4e15d3b578165e40f51c66b6
|
a667617b4a9ba5149714e6285c00f741170655ed
|
/R/diff_plot.R
|
446ae52646e4f66ec3c473a15101f282e99f98a4
|
[] |
no_license
|
lix2k3/Rcpm
|
0f387939fe56d3de218e62e21e6a2b1b40166ff5
|
b09849e5487b5a2a1e02eb5920dde96cde2b0a2f
|
refs/heads/master
| 2022-12-02T13:41:06.931097
| 2020-08-20T12:20:05
| 2020-08-20T12:20:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,594
|
r
|
diff_plot.R
|
#' @title Create difference plots
#'
#' @description Create difference plots to show up- or down-regulation of certain lipids.
#'
#' @param data data frame containing all information. See details for more information on the structure.
#' @param x what to plot on the x-axis, often the variable e.g. lipid
#' @param y what to plot on the y-axis, the difference
#' @param fill_by column with factor for filling the bar plot
#' @param facet_x facet in x direction with column
#' @param facet_y facet in y direction with column
#'
#' @return A ggplot2 plot is returned.
#'
#' @details \code{data} should be data frame which contains at least all the parameters as columns.
#` No calculations are done within the function.
#'
#' @export
#' @importFrom dplyr %>%
#' @importFrom rlang enquo !!
#' @importFrom ggplot2 ggplot aes geom_bar facet_grid coord_flip guides xlab ylab vars
#' @importFrom methods is
#'
#' @author Rico Derks
#' @examples
#' set.seed(123)
#'
#' my_data <- data.frame(lipidname = c(paste("PA ", seq(30, 40, 2), ":0", sep = ""),
#' paste("PA ", seq(30, 40, 2), ":1", sep = ""),
#' paste("PA ", seq(30, 40, 2), ":2", sep = ""),
#' paste("PC ", seq(30, 40, 2), ":0", sep = ""),
#' paste("PC ", seq(30, 40, 2), ":1", sep = ""),
#' paste("PC ", seq(30, 40, 2), ":2", sep = ""),
#' paste("TG ", seq(50, 60, 1), ":0", sep = ""),
#' paste("TG ", seq(50, 60, 1), ":1", sep = ""),
#' paste("TG ", seq(50, 60, 1), ":2", sep = ""),
#' paste("TG ", seq(50, 60, 1), ":3", sep = ""),
#' paste("TG ", seq(50, 60, 1), ":4", sep = "")),
#' lipidclass = c(rep("PA", 18),
#' rep("PC", 18),
#' rep("TG", 55)),
#' difference = rnorm(n = 91,
#' mean = 0,
#' sd = 3e4),
#' versus = factor(x = "AvsB"))
#'
#' my_data$diff_grp <- as.factor(ifelse(my_data$difference > 0, "high", "low"))
#'
#' diff_plot(data = my_data,
#' x = lipidname,
#' y = difference,
#' fill_by = diff_grp,
#' facet_x = versus,
#' facet_y = lipidclass)
#'
diff_plot <- function(data, x, y, fill_by, facet_y, facet_x) {
## some error checking
if (length(match.call()) <= 6) {
stop("Not enough arguments passed... ")
}
## check if data is a data frame
if (!is(data, "data.frame")) {
stop("'data' does not appear to be a data frame!")
}
## is x a column in the dataframe
if (!deparse(substitute(x)) %in% names(data)) {
stop(paste0("'", deparse(substitute(x)), "' is not the name of a variable in '",deparse(substitute(data)),"'"))
}
## is y a column in the dataframe
if (!deparse(substitute(y)) %in% names(data)) {
stop(paste0("'", deparse(substitute(y)), "' is not the name of a variable in '", deparse(substitute(data)), "'"))
}
## is fill_by a column in the dataframe
if (!deparse(substitute(fill_by)) %in% names(data)) {
stop(paste0("'", deparse(substitute(fill_by)), "' is not the name of a variable in '", deparse(substitute(data)), "'"))
}
## is facet_y a column in the dataframe
if (!deparse(substitute(facet_y)) %in% names(data)) {
stop(paste0("'", deparse(substitute(facet_y)), "' is not the name of a variable in '", deparse(substitute(data)), "'"))
}
## is facet_x a column in the dataframe
if (!deparse(substitute(facet_x)) %in% names(data)) {
stop(paste0("'", deparse(substitute(facet_x)), "' is not the name of a variable in '", deparse(substitute(data)), "'"))
}
## quotation stuff
my_df <- enquo(data)
x <- enquo(x)
y <- enquo(y)
fill_by <- enquo(fill_by)
facet_y <- enquo(facet_y)
facet_x <- enquo(facet_x)
## create the plot
p <- data %>%
ggplot(aes(x = !!x,
y = !!y)) +
geom_bar(aes(fill = !!fill_by),
stat = "identity") +
coord_flip() +
facet_grid(rows = vars(!!facet_y),
cols = vars(!!facet_x),
scales = "free_y",
space = "free_y") +
ylab("Difference") +
xlab("Lipid name") +
guides(fill = FALSE)
## return the ggplot2 object
return(p)
}
|
d2a52cc27e4b48b04865fe806c6fa0c60e457022
|
cc7139789c2e524d61e11c92a033666246d2da32
|
/4_MultivariateAnalysis/notUsed/FA.R
|
19c7f314d9b432162b4c3e30b1a64c216c4f4bd4
|
[] |
no_license
|
dmpe/bachelor
|
e99dc0de17d8cf54455dfec08c15e32f1916f74c
|
7611bb2556cc728e64d7be50a47c32a055b6ce9d
|
refs/heads/master
| 2021-01-10T13:00:51.218870
| 2015-10-02T15:52:25
| 2015-10-02T15:52:25
| 36,564,092
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,314
|
r
|
FA.R
|
# Some small analysis using Factor analysis
library(psych)
library(GPArotation)
source("http://www.tcnj.edu/~ruscio/EFA%20Comparison%20Data.R")
# http://rtutorialseries.blogspot.de/2011/10/r-tutorial-series-exploratory-factor.html
solution <- fa(r = corelationMat, nfactors = 6, rotate = "oblimin", fm = "minres", SMC = FALSE)
solution
# http://personality-project.org/r/vss.html my.vss <- VSS(na.omit(joinedDB.8[2:7]),n=6, rotate='none',diagonal=FALSE)
# VSS.plot(my.vss)
VSS.scree(corelationMat, main = "scree plot")
# https://stats.stackexchange.com/questions/32669/vss-criterion-for-the-number-of-factors-in-rs-psych-package
# https://stats.stackexchange.com/questions/31948/looking-for-a-step-through-an-example-of-a-factor-analysis-on-dichotomous-data/32136#32136
faPCdirect <- fa.poly(corelationMat, nfactors = 2, rotate = "varimax") # polychoric FA
factor.plot(faPCdirect$fa, cut = 0.5)
fa.diagram(faPCdirect)
EFA.Comp.Data(Data = na.omit(joinedDB.8[2:7]), F.Max = 6, Graph = T)
vss(na.omit(joinedDB.8[2:7]), n = 2)
fa.parallel(na.omit(joinedDB.8[2:7]))
ev <- eigen(cor(joinedDB.6)) # get eigenvalues
ap <- parallel(subject = nrow(joinedDB.6), var = ncol(joinedDB.6), rep = 100, cent = 0.05)
nS <- nScree(x = ev$values, aparallel = ap$eigen$qevpea)
nS$Components
nS$Analysis
plotnScree(nS)
|
13d00be98aef62dc0a3234322678b41f2e964a10
|
80751ed622a43695c64ac86122f28c07cf5facfb
|
/R/plotGeneScatterPatientData.R
|
b4a6e2d9177c22bb1fd20de83ef6f4e3de108f5e
|
[] |
no_license
|
komalsrathi/marislab-webportal
|
4667c3a3d9fd5b22f75174bec47c2e9029b68b57
|
7c295a6f438a7c4ecd775c9fae0aa9266147e799
|
refs/heads/master
| 2020-09-11T06:34:29.479666
| 2020-02-25T19:43:21
| 2020-02-25T19:43:21
| 221,972,910
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,463
|
r
|
plotGeneScatterPatientData.R
|
####################################
# plot scatter plot of 2 genes
# Authors: Pichai Raman, Komal Rathi
# Organization: DBHi, CHOP
####################################
# plot scatter plot of 2 genes##################
plotGeneScatterPatientData <- function(datatype, gene1, gene2, myDataExp, myDataAnn, log, colorby, correlation, customtheme)
{
# get expression and annotation of the selected dataset
myDataExp <- myDataExp[rownames(myDataExp) %in% c(gene1, gene2),]
myDataExp$gene <- rownames(myDataExp)
myDataExp.m <- melt(data = myDataExp, id.vars = 'gene')
myDataExp.c <- dcast(data = myDataExp.m, formula = variable~gene, value.var = 'value')
colnames(myDataExp.c)[1] = "Sample"
#For title correlation and p-value
cor <- cor.test(myDataExp.c[,gene1], myDataExp.c[,gene2], method = correlation)
if(is.na(cor$p.value)){
cor.pval <- NA
} else if(cor$p.value==0){
cor.pval <- '< 2.2e-16'
} else if(cor$p.value>0){
cor.pval <- format(cor$p.value, scientific = T, digits = 3)
}
if(is.na(cor$estimate)){
cor.est <- NA
} else if(cor$estimate==1){
cor.est <- 1
} else if(cor$estimate!=1){
cor.est <- format(cor$estimate, scientific = T, digits = 3)
}
cor.title <- paste("Cor = ", cor.est, " | P-Val = ", cor.pval, sep="")
# modify gene name, dashes present
gene1.mut <- paste('`',gene1,'`',sep = '')
gene2.mut <- paste('`',gene2,'`',sep = '')
# plot log values?
if(length(grep('FPKM',datatype))==0) {
if(log==FALSE) {
y.axis <- "RMA"
if(gene1 == gene2){
myDataExp.c[,c(gene1)] <- 2^(myDataExp.c[,c(gene1)])
} else {
myDataExp.c[,c(gene1,gene2)] <- 2^(myDataExp.c[,c(gene1,gene2)])
}
} else {
y.axis <- "log2(RMA)"
}
} else {
if(log==FALSE)
{
y.axis <- "FPKM"
} else {
y.axis <- "log2(FPKM)"
if(gene1 == gene2){
myDataExp.c[,c(gene1)] <- log2(myDataExp.c[,c(gene1)]+1)
} else {
myDataExp.c[,c(gene1,gene2)] <- log2(myDataExp.c[,c(gene1,gene2)]+1)
}
}
}
# add annotation data to expression set
myDataExp.c <- merge(myDataExp.c, myDataAnn, by.x = "Sample", by.y = 'row.names')
# eliminate confusion between MYCN gene and status
if(length(grep('MYCN',colnames(myDataExp.c)))>1) {
coln <- grep("MYCN.x", colnames(myDataExp.c))
colnames(myDataExp.c)[coln] <- 'MYCN'
coln <- grep("MYCN.y", colnames(myDataExp.c))
colnames(myDataExp.c)[coln] <- 'MYCNS'
if(colorby=="MYCN")
{
colorby = "MYCNS"
}
}
if(colorby != "None"){
correlations <- plyr::ddply(.data = myDataExp.c, .variables = colorby, .fun = function(x) getCorr(dat = x, gene1 = gene1, gene2 = gene2, correlation = correlation))
p <- ggplot(data = myDataExp.c, aes_string(x = gene1.mut, y = gene2.mut, fill = colorby, label = 'Sample')) +
geom_point(size = 3, shape = 21, colour = 'black', stroke = 0.2) + customtheme + ggtitle(cor.title)
} else {
correlations <- data.frame(Cor = cor.est, Pval = cor.pval)
p <- ggplot(data = myDataExp.c, aes_string(x = gene1.mut, y = gene2.mut, label = 'Sample')) +
geom_point(size = 3, shape = 21, colour = 'black', stroke = 0.2, fill = "gray") + customtheme + ggtitle(cor.title)
}
p <- plotly_build(p)
p$x$layout$yaxis$title <- paste0(gene2,' (', y.axis,')')
p$x$layout$xaxis$title <- paste0(gene1,' (', y.axis,')')
newList <- list(p, correlations)
return(newList)
}
|
a041f494ba33ed92397fd3ba3e0e0100fc6ed25e
|
af4a387877fe45bee8cc8523f20d9c839e1c70d1
|
/man/vt.uniqtl.pop.C.Rd
|
79bb8895e23e0365a6999783743a19e3424f60cb
|
[] |
no_license
|
cran/STARSEQ
|
120868533ec0218bb1190d358e1ab248981a3abc
|
60670fb5c24b6cb676e5a80d89ff1a337b973ff4
|
refs/heads/master
| 2021-01-23T11:09:07.420572
| 2012-05-15T00:00:00
| 2012-05-15T00:00:00
| 17,693,619
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,035
|
rd
|
vt.uniqtl.pop.C.Rd
|
\name{vt.uniqtl.pop.C}
\alias{vt.uniqtl.pop.C}
\title{VT Test for Population-basd Studies of Quantitative Trait
}
\description{
This function implements the variable threshold test
}
\usage{
vt.uniqtl.pop.C(dat.ped, par.dat, maf.vec, maf.cutoff, no.perm = 1000, alternative = c("two.sided","greater","less"))
}
\arguments{
\item{dat.ped}{
A list of ped files.
}
\item{par.dat}{
A list of parameters for ascertainment. The default in an empty list.
}
\item{maf.vec}{
User specified minor allele frequency vector
}
\item{maf.cutoff}{
Upper minor allele frequency cutoff for rare variant analysis
}
\item{no.perm}{
The number of permutations. Set to 1000 is default for VT
test. Adaptive permutatoin is implemented
}
\item{alternative}{
Alternative hypothesis, default choice is two.sided. Other options
include greater or less.
}
}
\value{
\item{p.value}{P-value as determined by the alternative hypothesis tested}
\item{statistic}{Statistic value for the VT test}
}
\author{
Dajiang Liu
}
|
2e368aa32e9ed4deeb0607585d4ed73783a4f8ec
|
84eb5b028b41b0e224df9a9b050a707bd32b353b
|
/man/add_variables.Rd
|
79867e1abfdda34386cac5b1c10f5df6dd0cd31d
|
[
"MIT"
] |
permissive
|
minghao2016/workflows
|
af6fbf5e4a1c1d7e5b0e3038c52a63e43b192f75
|
25a422857f0940e90bbefdf8990464198f376cd6
|
refs/heads/master
| 2022-12-21T22:25:09.128867
| 2020-09-14T12:38:10
| 2020-09-14T12:38:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,768
|
rd
|
add_variables.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pre-action-variables.R
\name{add_variables}
\alias{add_variables}
\alias{remove_variables}
\alias{update_variables}
\title{Add variables to a workflow}
\usage{
add_variables(x, outcomes, predictors, ..., blueprint = NULL)
remove_variables(x)
update_variables(x, outcomes, predictors, ..., blueprint = NULL)
}
\arguments{
\item{x}{A workflow}
\item{outcomes, predictors}{Tidyselect expressions specifying the terms
of the model. \code{outcomes} is evaluated first, and then all outcome columns
are removed from the data before \code{predictors} is evaluated.
See \link[tidyselect:language]{tidyselect::select_helpers} for the full range of possible ways to
specify terms.}
\item{...}{Not used.}
\item{blueprint}{A hardhat blueprint used for fine tuning the preprocessing.
If \code{NULL}, \code{\link[hardhat:default_xy_blueprint]{hardhat::default_xy_blueprint()}} is used.
Note that preprocessing done here is separate from preprocessing that
might be done by the underlying model.}
}
\value{
\code{x}, updated with either a new or removed variables preprocessor.
}
\description{
\itemize{
\item \code{add_variables()} specifies the terms of the model through the usage of
\link[tidyselect:language]{tidyselect::select_helpers} for the \code{outcomes} and \code{predictors}.
\item \code{remove_variables()} removes the variables. Additionally, if the model
has already been fit, then the fit is removed.
\item \code{update_variables()} first removes the variables, then replaces the
previous variables with the new ones. Any model that has already been
fit based on the original variables will need to be refit.
}
}
\details{
To fit a workflow, exactly one of \code{\link[=add_formula]{add_formula()}}, \code{\link[=add_recipe]{add_recipe()}}, or
\code{\link[=add_variables]{add_variables()}} \emph{must} be specified.
}
\examples{
library(parsnip)
spec_lm <- linear_reg()
spec_lm <- set_engine(spec_lm, "lm")
workflow <- workflow()
workflow <- add_model(workflow, spec_lm)
# Add terms with tidyselect expressions.
# Outcomes are specified before predictors.
workflow1 <- add_variables(
workflow,
outcomes = mpg,
predictors = c(cyl, disp)
)
workflow1 <- fit(workflow1, mtcars)
workflow1
# Removing the variables of a fit workflow will also remove the model
remove_variables(workflow1)
# Variables can also be updated
update_variables(workflow1, mpg, starts_with("d"))
# The `outcomes` are removed before the `predictors` expression
# is evaluated. This allows you to easily specify the predictors
# as "everything except the outcomes".
workflow2 <- add_variables(workflow, mpg, everything())
workflow2 <- fit(workflow2, mtcars)
pull_workflow_mold(workflow2)$predictors
}
|
204d21dd1fc11e808e977b759a14d578dd51020a
|
051880099402393c9249d41526a5ac162f822f8d
|
/man/tg.removeTier.Rd
|
a841c6d2f9b1ed34bd23cca6b18686d3f3e9081f
|
[
"MIT"
] |
permissive
|
bbTomas/rPraat
|
cd2b309e39e0ee784be4d83a980da60946f4c822
|
4c516e1309377e370c7d05245f6a396b6d4d4b03
|
refs/heads/master
| 2021-12-13T19:32:38.439214
| 2021-12-09T18:42:48
| 2021-12-09T18:42:48
| 54,803,225
| 21
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 560
|
rd
|
tg.removeTier.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tg.R
\name{tg.removeTier}
\alias{tg.removeTier}
\title{tg.removeTier}
\usage{
tg.removeTier(tg, tierInd)
}
\arguments{
\item{tg}{TextGrid object}
\item{tierInd}{tier index or "name"}
}
\value{
TextGrid object
}
\description{
Removes tier of the given index.
}
\examples{
\dontrun{
tg <- tg.sample()
tg.plot(tg)
tg2 <- tg.removeTier(tg, "word")
tg.plot(tg2)
}
}
\seealso{
\code{\link{tg.insertNewIntervalTier}}, \code{\link{tg.insertNewPointTier}}, \code{\link{tg.duplicateTier}}
}
|
5aa46760dbe71c94856c18869b5a94ab41b51ab2
|
0826f40877bfa10084e1a565a3056001d5b3bafa
|
/man/Vdgraph.Rd
|
fa6a43169fbabbd062c8d476c16c2bc810bfdd7f
|
[] |
no_license
|
cran/Vdgraph
|
9eec47b95cf64f45b0c02ac17f993f7cee6323fc
|
a83a4a363f7f0d753635f15f9f29503f0e950cd4
|
refs/heads/master
| 2023-06-12T08:42:07.090755
| 2023-06-02T11:10:08
| 2023-06-02T11:10:08
| 17,694,064
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,196
|
rd
|
Vdgraph.Rd
|
\name{Vdgraph}
\alias{Vdgraph}
\title{ this function makes a Variance Dispersion Graph of a response surface design
}
\description{
This function calls the function Vardsgr which uses Vining's (1993) fortran
code to get the coordinates of a variance dispersion graph, and then makes
the plot.
}
\usage{
Vdgraph(des)
}
\arguments{
\item{des}{ des is a matrix or a data frame containing a response surface design in coded or uncoded units.
There should be one column for each factor in the design, and one row for each run in the design.
The maximum number of rows allowed is 99, and the maximum number of columns is 7.}
}
\value{ vdgpl
\item{vdgpl}{This is a graph containing the Variance Dispersion Graph}
}
\references{
1. Vining, G. "A Computer Program for Generating Variance Dispersion Graphs"
Journal of Quality Technology, Vol 25, No. 1, pp. 45-58, 1993.
2. Vining, G. "Corrigenda" Journal of Quality Technology, Vol 25, No. 4,
pp 333-335. 1993.
}
\note{ This function calls the function Vardsgr to get the coordinates for the plot.}
\author{ John S. Lawson \email{lawson@byu.edu}}
\examples{
data(D310)
Vdgraph(D310)
}
\keyword{hplot }
|
65bcb3feb282e02f3a0b2830c454eacd6915e7d3
|
09eb39939021ebedb5af9b7539982ba49a3626d0
|
/Plot1.R
|
75d1bddc49cc138cebebe19255c06f268a600d71
|
[] |
no_license
|
laurentBesnainou/Exploratory-Data-Analysis-Project2
|
bc1810e389680c0a46bc27de01f927df21c1aef1
|
995674ceef0863beb4687bfdfe16289759fb10d5
|
refs/heads/master
| 2021-04-29T22:37:49.527677
| 2018-02-15T14:54:57
| 2018-02-15T14:54:57
| 121,641,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 733
|
r
|
Plot1.R
|
library(dplyr)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##### Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
png(file = "plot1.png",width = 800, height = 600)
##### base plotting system, make a plot showing the total PM2.5 emission from all
##### sources for each of the years 1999, 2002, 2005, and 2008
yearNEI <- with(NEI,tapply(Emissions, year, sum, na.rm=TRUE))
plot(names(yearNEI),yearNEI/1000000,pch=20,xlab = "Year", ylim=c(3,8),
ylab="Tons of PM2.5 emission (in millions)", main="PM2.5 emitted in the United States from 1999 to 2008")
dev.off() ## Close the file device
|
d12c67ae993709985b442bcfb7ec680808af1332
|
7991d8802959a649ab92cb8b5caf3efe1ef61c26
|
/circos_draft_genome_nocol.R
|
9ce73a00d2a642d1164041bb43b28e988545ccdb
|
[] |
no_license
|
euba/genome_example
|
c832a4916f980c09d27f3019cfbe86f7dbca84e3
|
697dfba2316eb280cbc19b8525ed3ac1bf6a5c4a
|
refs/heads/master
| 2020-08-07T09:44:16.661620
| 2020-05-19T18:32:18
| 2020-05-19T18:32:18
| 213,396,701
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,277
|
r
|
circos_draft_genome_nocol.R
|
#package installations
install.packages("RJSONIO")
install.packages("seqinr")
install.packages("RColorBrewer")
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("OmicCircos")
BiocManager::install("KEGGREST")
#load the packages
library(RJSONIO)
library(seqinr)
library(RColorBrewer)
library(OmicCircos)
library(KEGGREST)
setwd("/Users/euba/Downloads/Dvul_symbiont_annotate.JSON/") #set your working directory
#######################################################################################
########################### assemble all genome info - use this part if you have the annotation as json from kbase
#######################################################################################
lvl1cat = c("Genetic Information Processing","Metabolism",
"Environmental Information Processing") #select 3 categories from level 1 of the annotation
lvl2cat = c("Metabolism of cofactors and vitamins","Amino acid metabolism",
"Carbohydrate metabolism") #select 3 categories from level 2 of the annotation
catcol = c(brewer.pal(7,"Accent"),"grey90") #define the colors to be used for the genes
names(catcol) = c(lvl1cat,"RNA",lvl2cat,"Other") #define which colors correspond to which category
json_file <- fromJSON("26.json") #import gene annotation
annot <- data.frame(id=NA,chr=NA,annot=NA,start=NA,end=NA,length=NA,mid=NA,
ko=NA,lvl1=NA,lvl2=NA,lvl3=NA) #create a table with all neccessary gene information
allfeat = c(json_file$features,json_file$non_coding_features[grep("RNA",unlist(lapply(json_file$non_coding_features,function(x){x$type})),ignore.case=T)])
for(i in 1:length(allfeat)){ #iterate through all features and fill the table with information
annot[i,"gsize"] = json_file$dna_size
annot[i,"id"] = allfeat[[i]]$id
if(length(allfeat[[i]]$functions)!=0){
annot[i,"annot"] = allfeat[[i]]$functions[1]
}
annot[i,"chr"] = allfeat[[i]]$location[[1]][[1]]
if(allfeat[[i]]$location[[1]][[3]]=="+"){
annot[i,"start"] = allfeat[[i]]$location[[1]][[2]]
annot[i,"end"] = allfeat[[i]]$location[[1]][[2]]+allfeat[[i]]$location[[1]][[4]]
}else{
annot[i,"start"] = allfeat[[i]]$location[[1]][[2]]-allfeat[[i]]$location[[1]][[4]]
annot[i,"end"] = allfeat[[i]]$location[[1]][[2]]
}
annot[i,"orientation"] = allfeat[[i]]$location[[1]][[3]]
annot[i,"length"] = allfeat[[i]]$location[[1]][[4]]
annot[i,"mid"] = annot[i,"start"]+(annot[i,"length"]/2)
if(is.null(allfeat[[i]]$type)){allfeat[[i]]$type="other"}
if(allfeat[[i]]$type %in% c("tRNA","RNA","rRNA","rna","rrna","trna")){
annot[i,"lvl1"] = "RNA"
annot[i,"lvl2"] = "RNA"
annot[i,"lvl3"] = "RNA"
}
}
#Assign color of right category for each gene
annot$class = NA
for(i in lvl2cat){annot$class[which(annot$lvl2==i)]=i}
for(i in lvl1cat){annot$class[intersect(which(annot$lvl1==i),which(is.na(annot$class)))]=i}
annot$class[intersect(which(annot$lvl1=="RNA"),which(is.na(annot$class)))] = "RNA"
annot$class[which(is.na(annot$class))] = "Other"
annot$color = catcol[as.character(annot$class)]
#######################################################################################
########################### get GC skew information
#######################################################################################
gseq = read.fasta("Dvul_gc_selected_contigs.fa") #import the genome fasta file
slide = 1000 #define the sliding window for GC calculation
gcdat = data.frame(chr=NA,seg.po=NA,name1=NA)
for(i in 1:length(gseq)){
numelement <- ceiling(length(gseq[[i]])/slide)
pos1 = 0
pos2 = 0
start = 0
for(j in 1:numelement){
pos1[j] = start
pos2[j] = pos1[j] + slide
start = start + slide
}
pos2[length(pos2)] = length(gseq[[i]])
gcs = vector()
for(j in 1:length(pos1)){
gcs[j] = GC(gseq[[i]][pos1[j]:pos2[j]])
}
gccont = data.frame(chr=names(gseq)[i],seg.po=1:length(gcs),name1=gcs)
gcdat = rbind(gcdat,gccont)
}
gcdat = gcdat[-1,]
#######################################################################################
########################### make circos plot
#######################################################################################
par(mar=c(2, 2, 2, 2));# create a canvas
plot(c(1,800), c(1,800), type="n", axes=FALSE, xlab="", ylab="", main="");
#add the different circles of the genome plot
clengths = unlist(lapply(gseq,length))
#set a list of very short contigs you want to remove --> use at your own risk!
tabu <- names(which(clengths<1000))
if(length(tabu) != 0){
clengths = clengths[-which(names(clengths) %in% tabu)]
annotsel = annot[-which(annot$chr %in% tabu),]
}else{annotsel = annot}
degree2 = 270+cumsum(360*(clengths/sum(clengths)))
degree1 = c(270,degree2)
degree1 = degree1[-length(degree1)]
#set a short separator degree between contigs --> uset at your own risk!
degree2 = degree2 - 0.5
chrdat = cbind(names(clengths),
as.character(degree1),
as.character(degree2),
"0",clengths/slide,
"0",clengths/slide)
colnames(chrdat) = c("seg.name","angle.start","angle.end","seg.sum.start","seg.sum.end","seg.start","seg.end")
genomedat <- data.frame("chr"=annotsel$chr,"start"=annotsel$start/1000,
"end"=annotsel$end/1000,"value"=factor(annotsel$class),
"orientation"=annotsel$orientation,"color"=annotsel$color)
circos(R=355,W=20,cir=chrdat,type="chr",col="black",scale=F,print.chr.lab=F,cex=4,lwd=10)
circos(R=320,cir=chrdat,mapping=genomedat[which(genomedat$orientation=="+"),],W=0,type="arc2",scale=T,print.chr.lab=F,cex=4,
col=as.character(genomedat[which(genomedat$orientation=="+"),"color"]),lwd=50,cutoff=0)#8x8
circos(R=270,cir=chrdat,mapping=genomedat[which(genomedat$orientation=="-"),],W=0,type="arc2",scale=T,print.chr.lab=F,cex=4,
col=as.character(genomedat[which(genomedat$orientation=="-"),"color"]),lwd=50,cutoff=0)#8x8
circos(R=175,cir=chrdat,W=65,mapping=gcdat,col.v=3,type="ls",B=F,col="grey",lwd=1.5,scale=F);
#save this plot as 8x8 PDF file
#plot the legend
par(mar=c(0,0,0,0))
plot(c(1,800), c(1,800), type="n", axes=FALSE, xlab="", ylab="", main="");
legend("top",legend=names(catcol),col=catcol,lty=1,cex=1,box.lwd=0,lwd=14)
|
00fb96e7b57e2ceff0df041670961599cbd6b287
|
b2d32cb57604a26e31f0c4947ee866f59a7aa8ba
|
/man/modNamePaste.Rd
|
8c4ad7d56251e145eac5c45ea9ee46c197870bcf
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
atredennick/GenEst
|
fe1f95ca844b2bf9cf0f3358e810439b0a964410
|
38b73a290c074872c0651926bd6de283072aa8e6
|
refs/heads/master
| 2020-06-10T20:24:52.460911
| 2019-06-25T16:52:44
| 2019-06-25T16:52:44
| 193,736,013
| 0
| 0
| null | 2019-06-25T15:35:03
| 2019-06-25T15:35:03
| null |
UTF-8
|
R
| false
| true
| 540
|
rd
|
modNamePaste.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app_utilities.R
\name{modNamePaste}
\alias{modNamePaste}
\title{Paste the parts of a model's name back together}
\usage{
modNamePaste(parts, type = "SE", tab = FALSE)
}
\arguments{
\item{parts}{the component parts of the model's name}
\item{type}{"SE" or "CP"}
\item{tab}{logical for if it's the table output for CP}
}
\value{
the pasted name
}
\description{
Paste the component parts of a model's name back together
for presentation
}
|
440d5adc7fbd5a05b37bedc9c0ff0efae0d9475a
|
26242ef504b67621386f0e6a9d5988e5671cf956
|
/multiway_partitions.R
|
5e4448bc2e5f349fa70658cac409f670785e49cb
|
[
"MIT"
] |
permissive
|
mbbruch/Multiway_Partitioning
|
f96d3cf313343080e827f4dada28b499e1a7e6bd
|
74447b556867f9c3292ad33536629f0802bf9b92
|
refs/heads/main
| 2023-08-10T19:11:55.770190
| 2021-09-30T03:07:24
| 2021-09-30T03:07:24
| 411,904,770
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,025
|
r
|
multiway_partitions.R
|
library(gurobi)
library(data.table)
library(dplyr)
multiway_partitions <- function(I, K) {
sizes <- data.table(i=1:length(I),size=I)
combos <- data.table(crossing(sizes,k=1:K))
model = list()
A <- NULL; b <- NULL;
sense <- NULL;
current.row=1;
#Each item can only be in one partition
rows <- combos[,i]
columns <- combos[,(i-1)*max(k)+k]
entries <- rep(1,times=combos[,.N])
new.for.b <- rep(1,times=length(I))
new.for.sense <- rep("=",times=length(I))
b <- c(b, c(new.for.b));
sense <- c(sense,new.for.sense)
new.for.A = data.frame(cbind(rows, columns, entries));
A <- rbindlist(list(A,new.for.A), use.names=FALSE, fill=FALSE, idcol=NULL)
new.for.A <- c(); new.for.b <- c(); new.for.sense <- c();
rows <- c(); columns <- c(); entries <- c();
current.row <- max(A$rows)+1
#Define size of each partition
rows <- c(current.row-1 + combos[,k],
current.row-1 + 1:K)
columns <- c(combos[,(i-1)*max(k)+k],
combos[,.N] + 1:K)
entries <- c(combos[,size],
rep(-1,times=K))
new.for.b <- rep(0,times=K)
new.for.sense <- rep("=",times=K)
b <- c(b, c(new.for.b));
sense <- c(sense,new.for.sense)
new.for.A = data.frame(cbind(rows, columns, entries));
A <- rbindlist(list(A,new.for.A), use.names=FALSE, fill=FALSE, idcol=NULL)
new.for.A <- c(); new.for.b <- c(); new.for.sense <- c();
rows <- c(); columns <- c(); entries <- c();
current.row <- max(A$rows)+1
#Break symmetry and define min/max sized partitions
rows <- c(current.row-1 + 1:(K-1),
current.row-1 + 1:(K-1))
columns <- c(combos[,.N] + 1:(K-1),
combos[,.N] + 2:K)
entries <- c(rep(-1,times=K-1),
rep(1,times=K-1))
new.for.b <- rep(0,times=K-1)
new.for.sense <- rep(">=",times=K-1)
b <- c(b, c(new.for.b));
sense <- c(sense,new.for.sense)
new.for.A = data.frame(cbind(rows, columns, entries));
A <- rbindlist(list(A,new.for.A), use.names=FALSE, fill=FALSE, idcol=NULL)
new.for.A <- c(); new.for.b <- c(); new.for.sense <- c();
rows <- c(); columns <- c(); entries <- c();
current.row <- max(A$rows)+1
model$vtype <- c(
rep('B',times=combos[,.N]),
rep('C',times=K))
model$lb <- c(
rep(0,times=combos[,.N]),
rep((sizes[,sum(size)]/K)*0,times=K))
model$ub <- c(
rep(1,times=combos[,.N]),
rep((sizes[,sum(size)]/K)*5,times=K))
model$obj <- c(
rep(0,times=combos[,.N]),
-1,
rep(0,times=K-2),
1
)
model$A <- sparseMatrix(i=A$rows,j=A$columns,x=A$entries)
model$modelsense <- 'min'
model$rhs <- b
model$sense <- sense
rm(A); rm(b); rm(sense);
gc();
solution <- gurobi(model,params=list(
TimeLimit=2700,
Heuristics=0.5,
Threads=6,
PreDepRow=1,
Presolve=2,
PreDual=2,
PrePasses=20,
PreSparsify=1,
Cuts=2,
CutPasses=500,
CutAggPasses=1
))
if(length(solution$x) == length(model$obj)){
return(which(solution$x[1:combos[,.N]] ==1)%%K + 1)
}
}
|
82ab2c9f14a4570efecf8dd0688f8787eeff4d49
|
0cc22eef828da4740c8309bfb9231c92e27cb110
|
/LCMM1.R
|
b2fd145ae2749b08583f7955b5a6e46f027cd15b
|
[] |
no_license
|
gl2458/Practicum
|
6893370c9050fa36b2c8de6f85dd5949757180b7
|
886fa13c873188e8173f4b2e4d536671867347a5
|
refs/heads/master
| 2021-05-21T09:39:16.549688
| 2020-08-14T18:35:09
| 2020-08-14T18:35:09
| 252,641,054
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,874
|
r
|
LCMM1.R
|
install.packages("lcmm")
install.packages("xlsx")
library(lcmm)
library(plyr)
library(tidyverse)
library(writexl)
#data
cmpst_all2 <- read_csv(file = "/Users/rachellee/Google Drive/Practicum/Data/cmpst_all2.csv") %>%
janitor::clean_names() %>%
filter(visit == "Baseline" | visit == "Week3" | visit == "Week6" | visit == "Week9" | visit == "Week14")
cmpst_all2$visit <- revalue(cmpst_all2$visit, c("Baseline"=0, "Week3" = 3, "Week6" =6, "Week9"=9, "Week14"=14)) %>%
as.numeric(cmpst_all2$visit)
cmpst_all2 <- data.frame(cmpst_all2)
head(cmpst_all2)
head(cmpst_all2$patient_id)
class(cmpst_all2$patient_id)
summary(cmpst_all2)
#####################################################
# ng = 2
#####################################################
#linear link function
hamd_linear_2 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 2, data = cmpst_all2, link = "linear", idiag = FALSE, na.action = 1)
summary(hamd_linear_2)
plot(hamd_linear_2, which="linkfunction", bty="l")
plot(hamd_linear_2, which="postprob", bty="l")
summary(hamd_linear_2$pprob)
names(hamd_linear_2)
count(unique(hamd_linear_2$pprob))
#beta link function
hamd_beta_2 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 2, data = cmpst_all2, link = "beta", idiag = FALSE, na.action = 1)
summary(hamd_beta_2)
plot(hamd_beta_2, which="linkfunction",bty="l")
plot(hamd_beta_2, which="postprob", bty="l")
count(unique(hamd_beta_2$pprob))
class_hamd_beta_2 <- left_join(cmpst_all2, data.frame(hamd_beta_2$pprob), by = "patient_id", copy = FALSE)
# I-splines with 3 equidistant nodes
hamd_spline3_2 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 2, data = cmpst_all2, link="3-equi-splines", idiag = FALSE, na.action = 1)
summary(hamd_spline3_2)
plot(hamd_spline3_2, which = "linkfunction", bty = "l")
plot(hamd_spline3_2, which="postprob", bty="l")
# I-splines with 5 nodes at quantiles
hamd_spline5_2 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 2, data = cmpst_all2, link="5-quant-splines", idiag = FALSE, na.action = 1)
summary(hamd_spline5_2)
plot(hamd_spline5_2, which = "linkfunction", bty = "l")
plot(hamd_spline5_2, which="postprob", bty="l")
#class extraction
class_hamd_linear_2 <- left_join(cmpst_all2, data.frame(hamd_linear_2$pprob), by = "patient_id", copy = FALSE)
class_hamd_linear_2$class <- as.factor(class_hamd_linear_2$class)
class_hamd_beta_2 <- left_join(cmpst_all2, data.frame(hamd_beta_2$pprob), by = "patient_id", copy = FALSE)
class_hamd_beta_2$class <- as.factor(class_hamd_beta_2$class)
class_hamd_spline3_2 <- left_join(cmpst_all2, data.frame(hamd_spline3_2$pprob), by = "patient_id", copy = FALSE)
class_hamd_spline3_2$class <- as.factor(class_hamd_spline3_2$class)
class_hamd_spline5_2 <- left_join(cmpst_all2, data.frame(hamd_spline5_2$pprob), by = "patient_id", copy = FALSE)
class_hamd_spline5_2$class <- as.factor(class_hamd_spline5_2$class)
write_xlsx(class_hamd_linear_2,"/Users/rachellee/Google Drive/Practicum/LCMM/class extraction/class_hamd_linear_2.xlsx")
#plot
ggplot(class_hamd_linear_2, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("linear, ng=2") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_beta_2, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("beta, ng=2") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_spline3_2, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("spline3, ng=2") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_spline5_2, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("spline5, ng=2") +
theme(plot.title = element_text(hjust = 0.5))
#####################################################
# ng = 3
#####################################################
#linear link function
hamd_linear_3 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 3, data = cmpst_all2, link = "linear", idiag = FALSE, na.action = 1)
summary(hamd_linear_3)
plot(hamd_linear_3, which="linkfunction",bty="l")
plot(hamd_linear_3, which="postprob",bty="l")
#beta link function
hamd_beta_3 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 3, data = cmpst_all2, link = "beta", idiag = FALSE, na.action = 1)
summary(hamd_beta_3)
plot(hamd_beta_3, which="linkfunction",bty="l")
plot(hamd_beta_3, which="postprob",bty="l")
# I-splines with 3 equidistant nodes
hamd_spline3_3 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 3, data = cmpst_all2, link="3-equi-splines", idiag = FALSE, na.action = 1)
summary(hamd_spline3_3)
plot(hamd_spline3_3, which = "linkfunction", bty = "l")
plot(hamd_spline3_3, which="postprob",bty="l")
# I-splines with 5 nodes at quantiles
hamd_spline5_3 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 3, data = cmpst_all2, link="5-quant-splines", idiag = FALSE, na.action = 1)
summary(hamd_spline5_3)
plot(hamd_spline5_3, which = "linkfunction", bty = "l")
plot(hamd_spline5_3, which="postprob",bty="l")
#class extraction
class_hamd_linear_3 <- left_join(cmpst_all2, data.frame(hamd_linear_3$pprob), by = "patient_id", copy = FALSE)
class_hamd_linear_3$class <- as.factor(class_hamd_linear_3$class)
class_hamd_beta_3 <- left_join(cmpst_all2, data.frame(hamd_beta_3$pprob), by = "patient_id", copy = FALSE)
class_hamd_beta_3$class <- as.factor(class_hamd_beta_3$class)
class_hamd_spline3_3 <- left_join(cmpst_all2, data.frame(hamd_spline3_3$pprob), by = "patient_id", copy = FALSE)
class_hamd_spline3_3$class <- as.factor(class_hamd_spline3_3$class)
class_hamd_spline5_3 <- left_join(cmpst_all2, data.frame(hamd_spline5_3$pprob), by = "patient_id", copy = FALSE)
class_hamd_spline5_3$class <- as.factor(class_hamd_spline5_3$class)
#plot
ggplot(class_hamd_linear_3, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("linear, ng=3") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_beta_3, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("beta, ng=3") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_spline3_3, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("spline3, ng=3") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_spline5_3, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("spline5, ng=3") +
theme(plot.title = element_text(hjust = 0.5))
#####################################################
# ng = 4
#####################################################
#linear link function
hamd_linear_4 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 4, data = cmpst_all2, link = "linear", idiag = FALSE, na.action = 1)
summary(hamd_linear_4)
plot(hamd_linear_4, which="linkfunction",bty="l")
plot(hamd_linear_4, which="postprob",bty="l")
#beta link function
hamd_beta_4 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 4, data = cmpst_all2, link = "beta", idiag = FALSE, na.action = 1)
summary(hamd_beta_4)
plot(hamd_beta_4, which="linkfunction",bty="l")
plot(hamd_beta_4, which = "postprob", bty = "l")
# I-splines with 3 equidistant nodes
hamd_spline3_4 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 4, data = cmpst_all2, link="3-equi-splines", idiag = FALSE, na.action = 1)
summary(hamd_spline3_4)
plot(hamd_spline3_4, which = "linkfunction", bty = "l")
plot(hamd_spline3_4, which = "postprob", bty = "l")
# I-splines with 5 nodes at quantiles
hamd_spline5_4 <- lcmm(ham24tot ~ visit, random = ~visit, subject = 'patient_id', mixture = ~visit, ng = 4, data = cmpst_all2, link="5-quant-splines", idiag = FALSE, na.action = 1)
summary(hamd_spline5_4)
plot(hamd_spline5_4, which = "linkfunction", bty = "l")
plot(hamd_spline5_4, which = "postprob", bty = "l")
#class extraction
class_hamd_linear_4 <- left_join(cmpst_all2, data.frame(hamd_linear_4$pprob), by = "patient_id", copy = FALSE)
class_hamd_linear_4$class <- as.factor(class_hamd_linear_4$class)
class_hamd_beta_4 <- left_join(cmpst_all2, data.frame(hamd_beta_4$pprob), by = "patient_id", copy = FALSE)
class_hamd_beta_4$class <- as.factor(class_hamd_beta_4$class)
class_hamd_spline3_4 <- left_join(cmpst_all2, data.frame(hamd_spline3_4$pprob), by = "patient_id", copy = FALSE)
class_hamd_spline3_4$class <- as.factor(class_hamd_spline3_4$class)
class_hamd_spline5_4 <- left_join(cmpst_all2, data.frame(hamd_spline5_4$pprob), by = "patient_id", copy = FALSE)
class_hamd_spline5_4$class <- as.factor(class_hamd_spline5_4$class)
#plot
ggplot(class_hamd_linear_4, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("linear, ng=4") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_beta_4, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("beta, ng=4") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_spline3_4, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("spline3, ng=4") +
theme(plot.title = element_text(hjust = 0.5))
ggplot(class_hamd_spline5_4, aes(visit, ham24tot, group = patient_id, color = class)) + geom_point(alpha = 0.3) + geom_line(alpha = 0.3) +
ggtitle("spline5, ng=4") +
theme(plot.title = element_text(hjust = 0.5))
#summary table
sum_table <- data.frame(
summarytable(
hamd_beta_2,
hamd_linear_2,
hamd_spline3_2,
hamd_spline5_2,
hamd_beta_3,
hamd_linear_3,
hamd_spline3_3,
hamd_spline5_3,
hamd_beta_4,
hamd_linear_4,
hamd_spline3_4,
hamd_spline5_4,
which = c("G", "loglik", "npm", "conv", "AIC", "BIC", "%class")))
sum_table$link <- c("hamd_beta_2",
"linear",
"spline",
"spline5",
"beta",
"linear",
"spline3",
"spline5",
"beta",
"linear",
"spline3",
"spline5")
names(sum_table)
sum_table <- sum_table[ , c(11, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)]
sum_table
write_xlsx(sum_table,"/Users/rachellee/Google Drive/Practicum/LCMM/class extraction/class_sum_table.xlsx")
|
0975d0da6061d3a433623cb5248b0764dab34d32
|
6390c203df735c874044a8ffa0f3692bf6010a6a
|
/demo/demoTPP05.R
|
3518e2b907984b71042abcfa382e06e7d738fb65
|
[
"MIT"
] |
permissive
|
felixlindemann/HNUORTools
|
c8c61ec550e2c6673c8d3e158bd7bc21208b26ab
|
0cb22cc0da14550b2fb48c996e75dfdad6138904
|
refs/heads/master
| 2020-05-15T18:37:48.423808
| 2018-02-04T11:04:52
| 2018-02-04T11:04:52
| 16,206,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,044
|
r
|
demoTPP05.R
|
geo<-new("GeoSituation")
geo<-add(geo,new("Warehouse", id="L1", x=25, y=70, supply = 350 ))
geo<-add(geo,new("Warehouse", id="L2", x=150, y=115, supply = 450 ))
geo<-add(geo,new("Warehouse", id="L3", x=80, y=140, supply = 300 ))
geo<-add(geo,new("Warehouse", id="L4", x=160, y=10, supply = 120 ))
geo<-add(geo,new("Customer", id="K1", x=15, y=130, demand = 150 ))
geo<-add(geo,new("Customer", id="K2", x=60, y=80, demand = 300 ))
geo<-add(geo,new("Customer", id="K3", x=175, y=140, demand = 180 ))
geo<-add(geo,new("Customer", id="K4", x=50, y=100, demand = 120 ))
geo<-add(geo,new("Customer", id="K5", x=30, y=40, demand = 100 ))
geo<-add(geo,new("Customer", id="K6", x=140, y=80, demand = 40 ))
geo<-add(geo,new("Customer", id="K7", x=100, y=15, demand = 80 ))
geo<-add(geo,new("Customer", id="K8", x=155, y=55, demand = 120 ))
geo<-add(geo,new("Customer", id="K9", x=125, y=145, demand = 130 ))
# will produce an error. No Basis Solution.
geo<- TPP.MMM(geo)
|
6fadec7d8d0dd55afb620452c8f3f8e2f72f77d6
|
439933a3fb21a29240ab4b04aebaced0569248be
|
/_R code for processing raw data/Make CWT recover summary plots.R
|
4bbc13d95919a6481f128815fe664d821e0dafb5
|
[] |
no_license
|
nwfsc-cb/spring-chinook-distribution
|
e47b5e39f5ce2ab8f20413085bc13249ef3bec37
|
5bff26b6fe5102a16a9c3f2c13d659b7e831e03e
|
refs/heads/master
| 2023-08-08T03:35:36.302066
| 2023-08-01T16:35:04
| 2023-08-01T16:35:04
| 128,123,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,258
|
r
|
Make CWT recover summary plots.R
|
A <- data.frame(apply(C,c(1,4),sum))
A <- A %>% mutate(Tot = rowSums(.))
A$ID <- REL$ID
A$ID_numb <- REL$ID_numb
REL.mod <- REL %>% left_join(.,A) %>%
mutate(z.ind = ifelse(Tot ==0,"zero","pos"))
REL.sum <- REL.mod %>% group_by(ocean.region) %>%
reframe(N.tot= length(z.ind),N.zero = length(z.ind[z.ind=="zero"]))
REL.sum <- full_join(REL.sum,REL.mod)
BREAKS <- c(100,1000,10000,100000,1000000)
ggplot(REL.mod) +
geom_histogram(aes(N.released),bins=100) +
scale_x_continuous(trans="log",breaks=BREAKS)
BREAKS <- c(10,100,1000,10000,100000,1000000)
ggplot(REL.mod) +
geom_point(aes(y=Tot,x=N.released,color=release_year)) +
scale_x_continuous(trans="log",breaks= BREAKS) +
scale_y_continuous(trans="log",breaks= BREAKS) +
# scale_color_continuous()
facet_wrap(~ocean.region) +
ggtitle("All ocean recoveries")
#############
B <- data.frame(apply(C_ocean,c(1,4),sum))
B <-B %>% mutate(Tot = rowSums(.))
B$ID <- REL$ID
B$ID_numb <- REL$ID_numb
REL.mod.ocean <- REL %>% left_join(.,B) %>%
mutate(z.ind = ifelse(Tot ==0,"zero","pos"))
REL.sum.ocean <- REL.mod.ocean %>% group_by(ocean.region) %>%
summarise(N.tot= length(z.ind),N.zero = length(z.ind[z.ind=="zero"]))
REL.sum.ocean <- full_join(REL.sum.ocean,REL.mod.ocean)
BREAKS <- c(100,1000,10000,100000,1000000)
ggplot(REL.mod.ocean) +
geom_histogram(aes(N.released),bins=100) +
scale_x_continuous(trans="log",breaks= BREAKS,labels = BREAKS) +
geom_vline(xintercept=5000)
BREAKS <- round(c(10,100,1000,10000,100000,1000000),0)
ggplot(REL.mod.ocean) +
geom_point(aes(y=Tot,x=N.released,color=release_year)) +
scale_x_continuous(trans="log",breaks= BREAKS,labels = BREAKS) +
scale_y_continuous(trans="log",breaks= BREAKS,labels = BREAKS) +
# scale_color_continuous()
facet_wrap(~ocean.region) +
ggtitle("Select ocean recoveries (no nets, no terminal)")
A <- A[order(A$Tot,decreasing = F),]
# WHAT IF I CUT OUT THE RELEASES < 10K
REL.small <- REL.mod %>% filter(N.released < 10000)
REL.lg <- REL.mod %>% filter(N.released >= 10000)
DD <- REL.small %>% group_by(ocean.region) %>% summarise(N.sm = length(ocean.region))
EE <- REL.lg %>% group_by(ocean.region) %>% summarise(N.lg = length(ocean.region))
full_join(DD,EE) %>% as.data.frame()
|
b38d44f5c0d6029c8c7bae77fa0526a445c4f480
|
beeee47f9dbaa4a86bce21cb971b6eef6af805cd
|
/R/utilities.R
|
e84ee05c0484aaf3964c1cd37a64cd78d0413aae
|
[] |
no_license
|
assaron/GSEABase
|
1787667ce3e405bff56af3facae0ec8ba8afd409
|
aa8d43079c58cd627961764d3b755aae4b9f9061
|
refs/heads/master
| 2021-01-23T20:32:27.138842
| 2016-07-07T19:48:30
| 2016-07-07T19:48:30
| 62,832,828
| 0
| 0
| null | 2016-07-07T19:25:05
| 2016-07-07T19:25:05
| null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
utilities.R
|
## Placeholder 'till something appropriate decided
.uniqueIdentifier <- function()
{
paste(Sys.info()['nodename'], Sys.getpid(), Sys.time(),
sample(.Machine$integer.max, 1), sep=":")
}
## simplified unique for vectors, preserving attributes
.unique <- function(x, y) c(x, y[!(y %in% x)])
.glue <- function(x, y, op)
paste("(", x, op, y, ")", sep="")
.requireQ <- function(pkg)
suppressWarnings(require(pkg, quietly=TRUE, character.only=TRUE))
.nameAll <- function(x) {
## Add names to character vector x. Elements of x without names get
## a name matching the element.
##
if (!is.character(x))
stop("argument 'x' must be a character vector")
if (length(names(x)))
names(x) <- ifelse(nchar(names(x)) == 0, x, names(x))
else
names(x) <- x
x
}
.stopf <- function(...)
stop(simpleError(sprintf(...),
call=match.call(call=sys.call(sys.parent(1)))))
.warningf <- function(...)
warning(simpleWarning(sprintf(...),
call=match.call(call=sys.call(sys.parent(1)))))
|
1b5591d2612b9d7b1fbd59d46658991b35e7ecd6
|
125e942d1af8e002fe71051423d41490901b034e
|
/PracticalML/3_2_Forecasting.R
|
a158760af1610a23954b14a3bcb4cc0350db0eab
|
[] |
no_license
|
whitish/opendatakyiv
|
ac30089d12aac1fde444f42b42b35897680f17b6
|
aed90acbf7cb446778b4dbbd753f0c780fa3ba5e
|
refs/heads/master
| 2021-01-10T02:15:26.518229
| 2016-03-15T11:23:27
| 2016-03-15T11:23:27
| 52,305,768
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,141
|
r
|
3_2_Forecasting.R
|
library(quantmod)
library(PerformanceAnalytics)
library(FinTS)
library(rpart)
library(rpart.plot)
library(rattle)
library(TTR)
library(forecast)
$$$$$$$$$$$$$$$$$Importing time series from finance.yahoo$$$$$$$$$$$$$$$$$$$$$$$$
getSymbols(Symbols = "CHK", src = "yahoo",from = "2010-03-03", to = "2016-03-03")
CHK<-data.frame(CHK)
View(CHK)
str(CHK)
$$$$$$$$$$$$$$$$$$$$$$$$$Creating the matrix$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
CHK$Date<-rownames(CHK)
colnames(CHK)<-c("Open","High","Low","Close","Volume","Adjusted","Date")
CHK$Date<-as.Date(CHK$Date)
str(CHK)
CHK$Return<-c(NA,diff(CHK$Adjusted)/CHK$Adjusted[-1])
View(CHK)
$$$$$$$$$$$$$$$$$$$$Working with time series$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
data_1<-xts(CHK$Return, order.by = CHK$Date) # xts is used to create an xts object from raw data inputs.
str(data_1)
data<-ts(CHK$Return[-1], frequency = 10) # frequency - number of observations per unit per time
plot(data)
plot(decompose(data),xlab="Time")
$$$$$$$$$$$$$$$$$$$$$$$Analyzing time series$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
charts.PerformanceSummary(data_1)
table.AnnualizedReturns(data_1)
summary(data)
tstrain<-window(data,start = 1, end=20)
tstrain
tstest<-window(data, start=20, end=50)
####################Simple moving average###############
plot.ts(tstrain)
lines(ma(tstrain, order=11), col="red")
#####################Exponential smoothing#################################
library(forecast)
etstrain<-ses(tstrain,alpha = 0.6, initial="simple",h=5)
plot(etstrain)
etsfor<-forecast(etstrain)
accuracy(etsfor,tstest)
$$$$$$$$$$$$$$$$$$$$$$Choosing the strategy$$$$$$$$$$$$$$$$$$$$$$$$$$$
CHK$ReturnSign<-ifelse(CHK$Return<0,"Down","Up")
View(CHK)
$$$$$$$$$$$$$$$$$Creating a column to forecast tomorrow return$$$$$$$$$$
CHK$Forecast<-c(CHK$ReturnSign[-1],NA)
$$$$$$$$$$$$$$$$$Choosing dependent variables(day and sign)$$$$$$$$$$$$$
CHK$Day<-factor(weekdays(CHK$Date))
CHK$Day
CHK$DaySign<-factor(ifelse(CHK$Close-CHK$Open>0,1,-1))
CHK$DaySign
View(CHK)
CHK<-CHK[-c(1,nrow(CHK)),]
str(CHK)
$$$$$$$$$$$$$$$$$Creating data sets$$$$$$$$$$$$$$$$$$$$$$$$$
train<-CHK[1:750, ]
test<-CHK[-(1:750),]
$$$$$$$$$$$$$$$$$$Decision tree$$$$$$$$$$$$$$$$$$$$$$$$$$
tree<-rpart(data=train, Forecast ~ ReturnSign+Day+DaySign, method = "class")
tree
fancyRpartPlot(tree, sub = "Simple tree")
Prediction<-predict(tree, test, type="class")
Prediction
$$$$$$$$$$$$Caret package$$$$$$$$$$$$$$$$$$$
library(caret)
modFit<-train(Forecast~., method="rpart", data=train)
print(modFit$finalModel)
plot(modFit$finalModel, uniform =TRUE, main="Classification Tree")
text(modFit$finalModel, use.n=TRUE, all=TRUE)
library(ggplot2)
data(iris)
names(iris)
table(iris$Species)
plot(iris$Petal.Width, iris$Sepal.Width, pch=19, col=as.numeric(iris$Species))
inTrain<-createDataPartition(y=iris$Species, p=.7, list=F)
training<-iris[inTrain,]
testing<-iris[-inTrain,]
kc <- kmeans(subset(training, select=-c(Species)), centers=3)
kc
training$clusters<-as.factor(kc$cluster)
qplot(Petal.Width, Petal.Length, colour=clusters, data=training)
table(kc$cluster, training$Species)
|
3509fd62618f1594b9b9c78e5cfffc77a0e37840
|
432d68b44e60d0fa1c23efb71ffa69933aa89f8a
|
/R/Chapter 2/exercise-8.R
|
c80f9de2f6fe7c3829577d26bdecdf58c2508b5b
|
[] |
no_license
|
ShilpaGopal/ISLR-exercise
|
5a5951118a33ef7dd8a9b3a6f7493bfd7b89b5ff
|
9705905d36bd74086b133e4a5f937e7a4ab18e5e
|
refs/heads/master
| 2020-12-04T11:12:01.024951
| 2020-03-21T10:23:12
| 2020-03-21T10:23:12
| 231,741,200
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 508
|
r
|
exercise-8.R
|
college = read.csv("./data/College.csv")
fix(college)
rownames(college)=college[,1]
fix(college)
college=college[,-1]
fix(college)
summary(college)
pairs(college[,1:10])
college[1,]
attach(college)
plot(Outstate,Private)
Elite=rep("No",nrow(college))
Elite[Top10perc>50]="Yes"
Elite=as.factor(Elite)
college=data.frame(college,Elite)
summary(college)
boxplot(Outstate,Elite)
par(mfrow=c(2,2))
college[which.max(Top10perc),]
acceptance_rate <- college$Accept/college$Apps
college[which.min(acceptance_rate),]
|
0c238c9379ae7f5422ae2b54ac72ad0641243089
|
5217d14779a01179bfd440b689a4aea067d9e043
|
/MachineIntelligence/e2/multlines_sample.R
|
d0853b86262a8cf0958309dc374b6ea11cb2a7fc
|
[
"MIT"
] |
permissive
|
CFWLoader/supreme-bassoon
|
f0a960a29cf052b76d5b898b4b4151776efc7536
|
f20e45118a141084a0fb0d640e937e0e739cc3f6
|
refs/heads/master
| 2020-03-07T05:49:04.731876
| 2019-04-10T03:43:48
| 2019-04-10T03:43:48
| 127,306,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
multlines_sample.R
|
library(ggplot2)
slope = 1
x = seq(-3, 3, 0.1)
# print(x)
y = slope * x
y2 = x**2 + -3
y3 = 0.3 * x
# print(y)
data = data.frame(x = x,y = y, y2 = y2)
# print(head(data))
p = ggplot(data, aes(x = x)) + geom_line(aes(y = y), color = "red") + geom_line(aes(y = y2), color = "blue") + geom_line(aes(y = y3), color = "green")
ggsave("./sample1.png")
# for(angle in seq(0, 180, 10))
# {
# print(tan(angle))
# }
|
261ab937b26f7676d1f3d2680c76d11b888c16a7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GDELTtools/examples/GetAllOfGDELT.Rd.R
|
a34dc481d69d79f65322a01a5057a36502a17419
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 214
|
r
|
GetAllOfGDELT.Rd.R
|
library(GDELTtools)
### Name: GetAllOfGDELT
### Title: Download all the GDELT files to a local folder
### Aliases: GetAllOfGDELT
### ** Examples
## Not run:
##D GetAllOfGDELT("~/gdeltdata")
## End(Not run)
|
7f7ee15ab6d2ed80f5230a7a609f30b84752988b
|
1c2f5a50ab2b63a17adaac9af1dfaabaaf5d3091
|
/test_run.R
|
c6207ded9ad3b93d4f4881bd34820003495259ee
|
[] |
no_license
|
pra1981/PeakSegFPOP
|
196ce4f055a950cb51110faca9e0cc4ace96d77d
|
06d6e429520383ab42a7ced79b778ed5496b7df5
|
refs/heads/master
| 2020-04-25T14:08:59.951686
| 2018-02-07T20:48:23
| 2018-02-07T20:48:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 132
|
r
|
test_run.R
|
source("test_functions.R")
file.name <- getenv.or("TEST_SUITE", "test_cases.R")
test_file(file.name, reporter=c("summary", "fail"))
|
120e442e9cf79480dbc74ab1df5a091bff9e9304
|
c92e19489e4d8c40fe44ebb70ddf94b2c47fd56e
|
/man/print.cma.Rd
|
ee824ab69b658023e27bb433dc9db34040e59b76
|
[] |
no_license
|
rexmacey/AA2
|
0b1ee392c14690e36c38f50d3b5f1b968c6344bd
|
b9751f3ff71089a56edbecd7d6d24d02476d8dce
|
refs/heads/master
| 2021-01-04T03:46:58.743292
| 2020-03-02T18:53:45
| 2020-03-02T18:53:45
| 240,349,624
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 254
|
rd
|
print.cma.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cma.r
\name{print.cma}
\alias{print.cma}
\title{Print cma object}
\usage{
\method{print}{cma}(cma, ...)
}
\arguments{
\item{cma}{cma object}
}
\description{
Print cma object
}
|
aff20841a8e7518b8627edaa521c5187a809e096
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/libFuzzer_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1612736270-test.R
|
70216ba7f6f51ca5303062cf490ab034b3068b72
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,961
|
r
|
1612736270-test.R
|
testlist <- list(doy = 1.39065002449955e-309, latitude = c(7.2893454370416e-304, -8.08436732081049e-174, 3.30199188178139e-312, -2.36757568622891e-150, 5.43230890149031e-312, 0, -1.09007158655572e-175, 9.82871840871573e-306, 6.83631741178536e-304, 5.77591857965479e-275, 4.90971575050201e-315, -5.48612925997371e+303, -3.98030836541812e+49, 2.25252608503604e-23, 5.54016244867193e-302, 2.77448001762433e+180, -1.91426406366611e-130, 2.37814268724097e-24, 7.03104152329175e-24, 1.23548758799897e-258, -5.55309858871845e+303, -2.67250222695627e+154, 2.52467545024877e-321, -1.46694670543806e+266, -1.02357065452893e+270, 2.68373944684598e+199, -1.60275442450381e-180, 5.17262327081614e+160, -5.59813476042989e+303, NaN, 4.87604047608838e-153, 3.6459502693419e-63, -8.89435855579363e+298, -5.59813476042989e+303, 3.23785921002061e-319, 0, 0, 0, -5.48612406879369e+303, 7.41841675062135e-68, 1.48539705362628e-313, 4.78064451174013e-310, 2.77448001762435e+180, 2.77447960798337e+180, 2.77448001762435e+180, 8.1760557075343e-308, -5.48612406879369e+303, 2.77448001762442e+180, NaN, -4.93751166574633e+304, 2.77448002205057e+180, 6.34899725744383e-66, NaN, 1.76865539779448e-257, NaN, NaN, NaN, 7.2911220195564e-304, 2.37779874001328e-24, -5.9790152992453e+303, 1.65133240632716e-317, 0, 0, 0, 0, 0, NaN, 2.77448002229206e+180, 2.77448001762435e+180, -9.7746030286984e-150, -5.69871146017105e+306, 2.77448001762435e+180, 7.29112203029253e-304, -5.70159252079474e+303, 7.29111856797089e-304, 3.24208232615797e+178, 2.77448001762435e+180, 2.92406770015855e+179, 5.50814852228496e-310, -4.25657045508627e+149, 1.88499733249789e-24, 2.92435798496996e+179, 2.77478592360003e+180, -4.69526163440602e+305, 7.20656921293291e-304, 1.51479537907587e-307, -2.3676339983841e-150, 0, NaN, NaN, 3.06694703668884e-315, -5.48612925997371e+303, 1.09854950996419e+248 ), temp = NA_real_)
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
4acd112b185919b961cb70f9783ebee7edc98533
|
44cf65e7ab4c487535d8ba91086b66b0b9523af6
|
/data/Newspapers/1999.10.27.editorial.55258.0172.r
|
c78f83bd38bbd10bf08a0247256976ae513cad93
|
[] |
no_license
|
narcis96/decrypting-alpha
|
f14a746ca47088ec3182d610bfb68d0d4d3b504e
|
5c665107017922d0f74106c13d097bfca0516e66
|
refs/heads/master
| 2021-08-22T07:27:31.764027
| 2017-11-29T12:00:20
| 2017-11-29T12:00:20
| 111,142,761
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,528
|
r
|
1999.10.27.editorial.55258.0172.r
|
se intimpla des ca oameni despre care ne pregatim sa scriem un articol sa sune pentru a face demersuri de pace .
ei sint obisnuiti cu ideea ca totul se poate aranja .
cei mai coltosi ameninta in speranta ca vom da inapoi .
altii fac promisiuni .
cei mai abili trec totul intr - un limbaj de miere .
ai zice ca sint curati ca lacrima si numai ziarul se impotriveste ca sufletul lor sa ajunga in Rai .
domnul Mihail Ciupercescu ( pe care il cunosc doar reporterii " Evenimentului zilei " ) mi - a trimis o scrisoare de un umor nebun , dar si extrem de pretios .
domnia sa , surprins de redactorii nostri in timp ce incerca sa vinda niste frigidere donate din Italia pentru romanii saraci , imi scrie despre caritate si despre clementa , citindu - l pe Karl Jaspers .
caritatea fiind , in acceptiunea filosofului german , " cea care deschide spatiul unei dreptati nejuridice , contrapuse acelor reglementari umane , care , in aplicarea lor , prezinta neajunsuri si / sau nedreptati " .
am tot citit fraza trimisa de domnul Ciupercescu si data ca fiind definitia lui Jaspers asupra caritatii .
si jur ca , in afara de a ma scarpina dupa ureche , n - am inteles care poate fi legatura intre frigiderele italienesti , " afacerea " lui , saracii romani si " spatiul unei dreptati nejuridice , contrapuse acelor reglementari umane , care , in aplicarea lor , prezinta neajunsuri sau nedreptati " .
la ce era necesara toata aceasta aiureala de text pentru a justifica o poveste dubioasa ?
un functionar public de la Primaria Sectorului 5 Bucuresti a fost depistat de doi ziaristi incercind sa vinda 150 de frigidere , venite din Italia ca donatie pentru saraci .
un om serios , in fata unei asemenea situatii penibile , isi da demisia si pleaca invirtindu - se .
oricit ai rasuci opera unui mare filosof , tot nu poti gasi in ea o justificare pentru o gainarie .
si daca totusi izbutesti , atunci ea nu depaseste conditia unei proptele pentru o constiinta strimba .
tot domnul Mihail Ciupercescu zice ca " rolul Dumneavoastra ( n . n . - al ziaristilor ) se margineste doar la a indica o stare sau o actiune dubioasa , iar restul este problema justitiei sau a organelor idrept " .
zisa Domniei sale este extrem de corecta .
asta si facem .
scriem azi ca un functionar public s - a pretat la o indeletnicire dubioasa .
restul e treaba justitiei .
in plus , noi ne mai intrebam daca domnul Ciupercescu , in aceeasi postura , nu s - a dedat si la alte fapte veroase ?
si ca niste " animale " ( expresia ii apartine lui Ion Iliescu ) , mergem mai departe ! Nu ne limitam doar la a semnala .
zicem ca este o mare nerusinare ca , intr - o tara cu atitia saraci , un functionar public , in loc sa se bucure ca poate face un bine , sa vrea sa scoata bani si dintr - o pomana !
dar pentru ca sintem ai dracului si nu putem tacea , mergem si mai departe si zicem ca domnul Ciupercescu nu e unicat .
ca multi dintre functionarii publici se poarta ca niste lipitori .
ca pentru orice prestatie care intra in obligatia lor incearca sa obtina si un profit pentru ei insisi .
ba , cu incapatinare , mai facem un pas . Zicem ca aceasta atitudine s - a incetatenit de multi ani si asteptam ca CDR , o data venita la putere , sa izbuteasca o schimbare .
sa arate oamenilor ca functionarii importanti numiti de coalitie nu mai sint ca flaminzii guvernarilor anterioare . Dar constatam ca nu s - a schimbat decit culoarea roiului de muste infometate .
de fapt , zece ani de democratie n - au adus prea multe noutati in mintea unora .
afacere si economie de piata au inca sensuri grotesti .
sa vinzi ce nu e al tau , sa iei banii de la cineva si , daca se poate , sa nu - i dai nimic in schimb . Sa pirlesti pe oricine , indiferent daca e legal sau nu .
un fel de codru in care cine poate oase roade , cine nu , isi scutura buzunarele .
o disperare la limita salbaticiei si dincolo de orice perimetru al normelor morale ii mina orbeste in lupta pentru bani pe politicieni si functionari , incit nu stii unde duc toate .
cu siguranta nu la ordine , nu la lege si nu la normalitate .
sintem " doctori " in orice daca domnul Ciupercescu a revolutionat si caritatea .
in Dictionarul enciclopedic definitia este clara : " Atitudine miloasa , plina de generozitate fata de cineva " .
dat naibii trebuie sa mai fii ca , pentru a justifica o gainarie cu frigidere , sa schimbi si dictionarele , folosindu - te de un filosof german si zicind ca , de fapt , caritatea este " cea care deschide spatiul unei dreptati nejuridice " .
|
c16ef59189f65246bf80d8a42da405c501f70e8b
|
3c3e7e8e37a9806d8495a4a3d85488609a296870
|
/data.r
|
6cc760fb1b9376c634e7c388f5a2dc35a651a8fe
|
[] |
no_license
|
MikeGongolidis/CSMush
|
f2f507760eaf3b2fe44263a09bcba205f9666e0c
|
d8b321db63ccc168960b25d30045dc0f9e84b107
|
refs/heads/master
| 2020-04-10T17:52:26.016169
| 2018-12-10T21:44:55
| 2018-12-10T21:44:55
| 161,187,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,141
|
r
|
data.r
|
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
library(caret)
library(randomForest)
library(caTools) #<- For stratified split
library(rpart.plot)
library(gridExtra)
train01<-read.csv("mushrooms_v2.csv")
mushrooms <- train01
mushrooms$class <- NULL
names <- colnames(mushrooms)
z<-cbind.data.frame(Var=names(train01), Total_Class=sapply(train01,function(x){as.numeric(length(levels(x)))}))
print(z)
set.seed(12)
sample = sample.split(train01$class, SplitRatio = .7)
x_train = subset(train01, sample == TRUE)
x_test = subset(train01, sample == FALSE)
#__________________________
y_train<-x_train$class
y_test <- x_test$class
x_train$class<-NULL
x_test$class<-NULL
# Create a stratified sample for repeated cv
cv.10.folds<-createMultiFolds(y_train,k=10,times=2)
# create a control object for repeated cv in caret
ctrl.1<-trainControl(method="repeatedcv",number=10,repeats=2,index=cv.10.folds)
#GBM
gbm<-train(x=x_train,y=y_train,method="gbm", trControl=ctrl.1,tuneLength=3,verbose=FALSE)
plot(gbm,main="Gradient Boosting Model ")
y_predicted<-predict(gbm,x_test)
df1<-data.frame(Orig=y_test,Pred=y_predicted)
confusionMatrix(table(df1$Orig,df1$Pred))
#RPART
rpart <-train(x=x_train,y=y_train,method="rpart",trControl=ctrl.1,tuneLength=5)
plot(varImp(rpart),main="RPart - Variable Importance Plot")
plot(rpart,main="RPart")
rpart.plot(rpart$finalModel)
y_predicted<-predict(rpart,x_test)
df1<-data.frame(Orig=y_test,Pred=y_predicted)
confusionMatrix(table(df1$Orig,df1$Pred))
#RANDOM FOREST
rf<-train(x=x_train,y=y_train,method="rf",trControl=ctrl.1,tuneLength=3)
summary(rf) #acc =0.8907171
plot(varImp(rf),main="Random Forest - Variable Importance Plot")
y_predicted<-predict(rf,x_test)
df1<-data.frame(Orig=y_test,Pred=y_predicted)
confusionMatrix(table(df1$Orig,df1$Pred))
# TEST HERE OUR WORK
test_variable <- mushrooms[1322,]
predict(mod3,test_variable)
#y = data.frame("a","b","f","f","f","f","a","b","f","f","f","f","a","b","f","f","f","f","a","b","f","f")
#names(y)<- names
#predict(rf.1.cv,y)
###############################
# PLOTS #
###############################
m1 <- ggplot(aes(x = cap.shape), data = mushrooms) +
geom_histogram(stat = "count") +
facet_wrap(~class) +
xlab("Cap Shape")
m2 <- ggplot(aes(x = cap.surface), data = mushrooms) +
geom_histogram(stat = "count") +
facet_wrap(~class) +
xlab("Cap Surface")
m3 <- ggplot(aes(x = cap.color), data = mushrooms) +
geom_histogram(stat = "count") +
facet_wrap(~class) +
xlab("Cap Color")
m4 <- ggplot(aes(x = bruises), data = mushrooms) +
geom_histogram(stat = "count") +
facet_wrap(~class) +
xlab("Bruises")
m5 <- ggplot(aes(x = odor), data = mushrooms) +
geom_histogram(stat = "count") +
facet_wrap(~class) +
xlab("Odor")
grid.arrange(m1, m2, m3, ncol = 2)
grid.arrange(m4,m5,ncol=2)
p = ggplot(mushrooms,aes(x=odor,
y=spore.print.color,
color=class))
p + geom_jitter(alpha=0.3) +
scale_color_manual(breaks = c('Edible','Poisonous'),
values=c('darkgreen','red'))
|
cdd95dd72f26656f7791c98bf2354915a46dd7f6
|
59aa1bb6b73f544701bd462af230d08e1ffa6d1a
|
/smpredict/R/standardise.R
|
aefeba8805ecfe2d414e048ecd5a36b01fe7717a
|
[] |
no_license
|
rnaimehaom/smpredict
|
8cdbaad221b7ba41be38319fe4ea6da7dc2c529c
|
c498938bcf6dd43e6fd23eef959b0adf8d696c37
|
refs/heads/master
| 2023-03-16T03:43:06.804746
| 2015-04-24T21:26:10
| 2015-04-24T21:26:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,441
|
r
|
standardise.R
|
StandardiseMolecules <- function(structures.file, standardised.file, is.training = FALSE, name.file = "", limit = -1) {
# handle non-existant file
if (!file.exists(structures.file)) {
print("File does not exist")
}
# handle standardised.file not sdf
split <- strsplit(standardised.file, "\\.")[[1]]
filetype <- split[length(split)]
if (tolower(filetype) != "sdf") {
print("standardised.file should have SDF extension")
}
split <- strsplit(structures.file, "\\.")[[1]]
filetype <- split[length(split)]
if(tolower(filetype) == "sdf") {
print("Standardising Structures: Reading SDF (R)")
sink(file="standardisation.log", append=FALSE, split=FALSE)
.C("R_standardiseMolecules", structures.file, standardised.file, as.integer(1), as.integer(is.training), name.file, as.integer(limit))
sink()
}
else if(tolower(filetype) == "smi") {
print("Standardising Structures: Reading SMILES (R)")
sink(file="standardisation.log", append=FALSE, split=FALSE)
.C("R_standardiseMolecules", structures.file, standardised.file, as.integer(0), as.integer(is.training), name.file, as.integer(limit))
sink()
}
else {
print("Unrecognised file type")
}
}
StandardiseMoleculesNoProtonation <- function(structures.file, standardised.file, is.training = FALSE, name.file = "", limit = -1) {
# handle non-existant file
if (!file.exists(structures.file)) {
print("File does not exist")
}
# handle standardised.file not sdf
split <- strsplit(standardised.file, "\\.")[[1]]
filetype <- split[length(split)]
if (tolower(filetype) != "sdf") {
print("standardised.file should have SDF extension")
}
split <- strsplit(structures.file, "\\.")[[1]]
filetype <- split[length(split)]
if(tolower(filetype) == "sdf") {
print("Standardising Structures: Reading SDF (R)")
sink(file="standardisation.log", append=FALSE, split=FALSE)
.C("R_standardiseMolecules", structures.file, standardised.file, as.integer(1), as.integer(is.training), name.file, as.integer(limit))
sink()
}
else if(tolower(filetype) == "smi") {
print("Standardising Structures: Reading SMILES (R)")
sink(file="standardisation.log", append=FALSE, split=FALSE)
.C("R_standardiseMolecules", structures.file, standardised.file, as.integer(0), as.integer(is.training), name.file, as.integer(limit))
sink()
}
else {
print("Unrecognised file type")
}
}
|
7ca16d5bc4137513563af6030d84fed6d34af5a0
|
06eff7cef9e88eaad3d9f128efd509d67c7cef87
|
/man/download_demo.Rd
|
40e40f8ea17f92a9fcd436d509be09d7d8d97c83
|
[
"MIT"
] |
permissive
|
anastasiyaprymolenna/AlpsNMR
|
384ef42297232710175b0d25382b4247697a04e5
|
c870f9aa07b4a47646f742a13152ca747608f01d
|
refs/heads/master
| 2022-12-03T10:48:41.554384
| 2020-08-26T14:43:34
| 2020-08-26T14:43:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 537
|
rd
|
download_demo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_MTBLS242_demo.R
\name{download_demo}
\alias{download_demo}
\title{Download the MTBLS242 dataset}
\usage{
download_demo(to = ".")
}
\arguments{
\item{to}{A directory}
}
\value{
A folder with demo samples
}
\description{
The function downloads the NMR dataset from Gralka et al., 2015. DOI: 10.3945/ajcn.115.110536.
}
\examples{
\dontrun{ #it takes too much time
download_demo(to = ".")
}
}
\references{
\url{https://doi.org/10.3945/ajcn.115.110536}
}
|
94bacb6b796f567a6e7c33138aad0f1eb555dad5
|
ae7f33e2c00186f3bb551e2c27618a727a9df00b
|
/src/plot2.R
|
e2c5c48090d1bc2a0b8e7b19c3d4a18b9bbf2166
|
[] |
no_license
|
jsko0112/ExData_Plotting1
|
4c07dcb1a56e3d3868333c4e6328e340b059b3be
|
a1fe3173ca55c013da86d05d0788b14b2330472f
|
refs/heads/master
| 2020-11-30T12:31:52.155405
| 2014-06-08T04:44:21
| 2014-06-08T04:44:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 466
|
r
|
plot2.R
|
# 1. Get the data from URL
# 2. Make tidy data with subset
# 3. Cache the tidy data
source("./getting and cleaning.R")
#Plot Name : make the full plot name by common function(getPlotName())
plot.name <- "plot2"
#readData
data <- readData()
#save the plot as PNG
png(getPlotName(), width=480, height=480)
message("Making plot2!")
plot(x=ds$Time,
y=ds$Global_active_power,
ylab="Global Active Power (kilowatts)",
xlab="",
type="l")
dev.off()
|
1324f275f98d87612ff46185eb082b6952194ceb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PerformanceAnalytics/examples/SmoothingIndex.Rd.R
|
f794653e4c94473dadfbc309c549650c7f4550f0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
SmoothingIndex.Rd.R
|
library(PerformanceAnalytics)
### Name: SmoothingIndex
### Title: calculate Normalized Getmansky Smoothing Index
### Aliases: SmoothingIndex
### ** Examples
data(managers)
data(edhec)
SmoothingIndex(managers[,1,drop=FALSE])
SmoothingIndex(managers[,1:8])
SmoothingIndex(edhec)
|
37bee5fbb78aace85cd752f139d274aad2632709
|
54a74eed54ab34a9ea60287c7e8e233696d42c0e
|
/R/data_highschool.R
|
ffde27fe2a7caa24b35cc5dd637480761eb06446
|
[
"BSD-3-Clause"
] |
permissive
|
schochastics/ggraph
|
9cf86d7a040e3f4d1325a40670feef366ed74144
|
85b6df1a06df70746096e26244e9b28931d670f4
|
refs/heads/master
| 2020-07-01T18:58:36.741201
| 2019-08-15T12:39:19
| 2019-08-15T12:39:19
| 201,263,971
| 1
| 0
|
NOASSERTION
| 2019-08-14T17:33:03
| 2019-08-08T13:25:55
|
R
|
UTF-8
|
R
| false
| false
| 727
|
r
|
data_highschool.R
|
#' Friendship among high school boys
#'
#' This dataset shows the friendship among high school boys as assessed by the
#' question: "What fellows here in school do you go around with most often?".
#' The question was posed twice, with one year in between (1957 and 1958) and
#' shows the evolution in friendship between the two timepoints.
#'
#' @format
#' The graph is stored as an unnamed edgelist with a year attribute.
#' \describe{
#' \item{from}{The boy answering the question}
#' \item{to}{The boy being the answer to the question}
#' \item{year}{The year the friendship was reported}
#' }
#'
#' @source
#' Coleman, J. S. *Introduction to Mathematical Sociology*. New York: Free
#' Press, pp.450-451.
#'
'highschool'
|
f3c8f2cb063998766b69c584a7bae0c19b430ec8
|
d5f0e278606a16785a66ab2db91220cfdfd19d62
|
/R-Mini-Project Script.r
|
58b4cffa837732bb68ae87c5aa9d053f936c89a0
|
[] |
no_license
|
carolm5/MINI-PROJECT
|
7c896198c6facdfa1aa830c4c49d3c5e9b75aa12
|
c55f39a16d203ebc7067d3b031eefb548859f4c5
|
refs/heads/main
| 2023-08-15T06:17:24.494298
| 2021-09-17T12:45:27
| 2021-09-17T12:45:27
| 406,406,961
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 7,451
|
r
|
R-Mini-Project Script.r
|
## MSB7102 Mini-project, Semester I, 2021
**1.Import the data described above into R, provide descriptive summaries of the subject data (using appropriate graphics and statistical summary measures) given in the diabimmune_16s_t1d_metadata.csv file. In addition, use appropriate test(s) to check for association/independency between disease status and other variables (delivery mode, gender and age).**
```{r}
data <- read.csv("taxa_table_.csv");
#View(data)
```
```{r}
summary(data)
```
```{r}
head(data)
```
```{r}
dim(data)
```
```{r}
class(data)
```
**Find the number of cases and controls, males and females and the delivery routes.**
```{r}
table(data$Case_Control)
table(data$Gender)
table(data$Delivery_Route)
```
**Plot bargraphs showing the different associations.**
**Load the ggplot2 package.**
```{r}
library(ggplot2)
```
```{r}
qplot(data$Case_Control, fill = data$Gender) + geom_bar() + labs(title = "A bargraph showing the Cases and Controls against Gender", x = "Case_Control", y= "Frequency", fill = "Gender")
```
```{r}
qplot(data$Case_Control, fill = data$Delivery_Route) + geom_bar()+ labs(title = "A bargraph showing the Cases and Controls against Delivery Route", x = "Case_Control", y= "Frequency", fill = "Delivey Route")
```
```{r}
qplot(data$Age_at_Collection) + geom_bar() + labs(title = "A bargraph showing the Age of participants", x = "Age (Days)", y= "Frequency")
```
**Check for association/independency between disease status and age, gender and delivery mode.**
**Generate a contingency table and perform a Chi-square statistics.**
```{r}
tbAge <- table(data$Case_Control,data$Age_at_Collection)
chisq.test(tbAge)
```
**From the statistical analysis, the P value is 0.6115, which is greater than 0.05, therefore we accept the null hypothesis indicating that there is no association between the disease state and the ages of the participants at collection.**
```{r}
tbGender <- table(data$Case_Control, data$Gender)
chisq.test(tbGender)
```
**From the statistical analysis, the P value is 0.5796, which is greater than 0.05, therefore we accept the null hypothesis indicating that there is no association between the disease state and the gender.**
```{r}
tbDelivery_Route <- table(data$Case_Control, data$Delivery_Route)
chisq.test(tbDelivery_Route)
```
**From the statistical analysis, the P value is 3.949e-09, which is less than 0.05, therefore we accept the alternate hypothesis indicating that there is a strong association between the disease state and the delivery route.**
**2. Using phyloseq, create a phyloseq object. This will comprise the OTU abundance, taxonomy (provided in the .txt file) and sample data (provided in the .csv file).**
**Import the OTU table.**
```{r}
otuTable <- read.table("otu_table") #Importing the OTU table
head(otuTable, n=1)
```
**Import the taxonomy table.**
```{r}
taxaTable <- read.table("taxa_table") #Importing the Taxa table
head(taxaTable, n=1)
```
**Check the dimension and class of each table.**
```{r}
dim(otuTable)
```
```{r}
dim(taxaTable)
```
```{r}
class(otuTable)
```
```{r}
class(taxaTable)
```
**Convert the tables from dataframe to matrix. This is because, phyloseq works better with a matrix.**
```{r}
#Converting the Taxa and OTU Table into a Matrix
mtaxaTable <- as.matrix(taxaTable)
motuTable <- as.matrix(otuTable)
```
**Check the class of the matrices created.**
```{r}
class(motuTable)
```
```{r}
class(mtaxaTable)
```
**Data Cleansing.**
This is done to have consistent data across all the matrices. It involves making sure that the OTU/taxa row names match. Currently they don't as taxa have a trailing ";"
#rownames(mtaxaTable)[rownames(mtaxaTable) == "4333897;"] = "4333897"
```{r}
head(mtaxaTable, n=1)
Rnames <- rownames(mtaxaTable) #Extract rownames from the matrix
NRnames <- gsub(x = Rnames, pattern = ";", replacement = "") #Remove the ; from the extracted rownames
#NRnames
rownames(mtaxaTable) <- NRnames #Set the new rownames
head(mtaxaTable, n=1) #Check to confirm that changes have been made
```
**Load the phyloseq package.**
```{r}
library(phyloseq)
```
**Create a phyloseq object**
```{r}
#Tell phyloseq to load them into a phyloseq object
OTU = otu_table(motuTable, taxa_are_rows = TRUE)
TAX = tax_table(mtaxaTable)
#OTU
#TAX
#Generating the phyloseq object and view it
physeq = phyloseq(OTU, TAX)
physeq
#Plotting the phyloseq
plot_bar(physeq, fill = "Family.")
```
**3. Generate Alpha diversity plots and ordination plots. Examine any observed patterns by delivery mode, gender and disease status.**
**Generate a default plot from the plot_richness function.**
```{r}
plot_richness(physeq) #Default plot produced by the plot_richness function
```
**Merge the sample data into the phyloseq object to observe patterns across variables.**
```{r}
library(tibble)
s_data <- column_to_rownames(data, var="ï..Sample_ID")
s_data <- sample_data(s_data)
#s_data
mergedPhyseq <- merge_phyloseq(physeq, s_data)
mergedPhyseq
```
**Alpha diversity comparison between the Delivery route in cases and controls.**
```{r}
plot_richness(mergedPhyseq, x ="Case_Control", color="Delivery_Route")
```
**Alpha diversity comparison between the Gender in Cases and Controls.**
```{r}
plot_richness(mergedPhyseq, x ="Case_Control", color="Gender")
```
**Alpha diversity comparison between the Age in Cases and Controls.**
```{r}
plot_richness(mergedPhyseq, x ="Case_Control", color="Age_at_Collection")
```
**4. Perform differential abundance using DEseq2.**
**Load the DESeq2 package.**
```{r}
library("DESeq2")
```
**Convert the phyloseq object to deseq.**
```{r}
cacos = phyloseq_to_deseq2(mergedPhyseq, ~ Case_Control)
```
**Calculate geometric mean to estimate size factor.**
```{r}
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
geoMeans = apply(counts(cacos), 1, gm_mean)
cacos = estimateSizeFactors(cacos, geoMeans = geoMeans)
cacos = DESeq(cacos, fitType="local")
```
**Perform the test using the DESeq function.**
```{r}
cacos = DESeq(cacos, test="Wald", fitType="parametric")
```
**Investigate the test result table.**
```{r}
res = results(cacos, cooksCutoff = FALSE)
alpha = 0.01
sigtab = res[which(res$padj < alpha), ]
sigtab = cbind(as(sigtab, "data.frame"), as(tax_table(mergedPhyseq)[rownames(sigtab), ], "matrix"))
head(sigtab)
```
```{r}
dim(sigtab)
```
**Visualize the result.**
```{r}
library("ggplot2")
theme_set(theme_bw())
scale_fill_discrete <- function(palname = "Set1", ...) {
scale_fill_brewer(palette = palname, ...)
}
# Phylum order
x = tapply(sigtab$log2FoldChange, sigtab$Phylum, function(x) max(x))
x = sort(x, TRUE)
sigtab$Phylum = factor(as.character(sigtab$Phylum), levels=names(x))
# Genus order
x = tapply(sigtab$log2FoldChange, sigtab$Genus, function(x) max(x))
x = sort(x, TRUE)
sigtab$Genus = factor(as.character(sigtab$Genus), levels=names(x))
ggplot(sigtab, aes(x=Genus, y=log2FoldChange, color=Phylum)) + geom_point(size=6) +
theme(axis.text.x = element_text(angle = -90, hjust = 0, vjust=0.5))
```
|
3855d9246f5aaaae49948a2f6ba2d9c9e7997163
|
3376043d518eda22caabf0df3db554a9cc43eedd
|
/ggtext.R
|
7f6efe1e62a690665db08b6dcaee1f0039d977c5
|
[] |
no_license
|
MaxCodeXTC/youtube-r-snippets
|
23641522e3cbb2a15fe27386e1df03b296753a5d
|
2a367fa4bd6323ac2e8d8fb3f0084164ba659247
|
refs/heads/master
| 2022-11-09T19:43:20.757946
| 2020-06-23T20:54:26
| 2020-06-23T20:54:26
| 275,516,907
| 1
| 0
| null | 2020-06-28T05:52:16
| 2020-06-28T05:52:16
| null |
UTF-8
|
R
| false
| false
| 2,107
|
r
|
ggtext.R
|
library(ggtext)
library(ggplot2)
ggplot(iris) +
geom_point(aes(x = Sepal.Length,
y = Petal.Length)) +
facet_wrap(~ Species) +
theme(
strip.text = element_textbox(
size = 12,
color = "white", fill = "red", box.color = "black",
halign = 0.5, linetype = 1, r = unit(5, "pt"), width = unit(1, "npc"),
padding = margin(2, 0, 1, 0), margin = margin(3, 3, 3, 3)
)
)
library(ggplot2)
library(ggtext)
ggplot(iris) +
geom_point(aes(x = Sepal.Length,
y = Petal.Length)) +
labs(title = "**iris analysis** <br/>
<i style='color:red; padding-left:-4px'>super</i>") +
theme(
plot.title = element_textbox_simple(
fill = 'yellow',
padding = margin(5.5, 5.5, 5.5, 5.5) )
)
ggplot(mtcars, aes(disp, mpg)) +
geom_point() +
labs(
title = "<b>Fuel economy vs. engine displacement</b><br>
<span style = 'font-size:10pt'>Lorem ipsum *dolor sit amet,*
consectetur adipiscing elit, **sed do eiusmod tempor incididunt** ut
labore et dolore magna aliqua. <span style = 'color:red;'>Ut enim
ad minim veniam,</span> quis nostrud exercitation ullamco laboris nisi
ut aliquip ex ea commodo consequat.</span>",
x = "displacement (in<sup>3</sup>)",
y = "Miles per gallon (mpg)<br><span style = 'font-size:8pt'>A measure of
the car's fuel efficiency.</span>"
) +
theme(
plot.title.position = "plot",
plot.title = element_textbox_simple(
size = 13,
lineheight = 1,
padding = margin(5.5, 5.5, 5.5, 5.5),
margin = margin(0, 0, 5.5, 0),
fill = "cornsilk"
),
axis.title.x = element_textbox_simple(
width = NULL,
padding = margin(4, 4, 4, 4),
margin = margin(4, 0, 0, 0),
linetype = 1,
r = grid::unit(8, "pt"),
fill = "azure1"
),
axis.title.y = element_textbox_simple(
hjust = 0,
orientation = "left-rotated",
minwidth = unit(1, "in"),
maxwidth = unit(2, "in"),
padding = margin(4, 4, 2, 4),
margin = margin(0, 0, 2, 0),
fill = "lightsteelblue1"
)
)
|
3e3eef5f44d1f5e247b753e7934107c7be431060
|
cc3f9eeaec6c6b1b47525f9317c71bb5dc4e7ed3
|
/analysis/Fig1F S3C_TCGA_SignaturesBySubtype.R
|
9827426dbb5ec9fed612a747f74a0f2020d9bf16
|
[] |
no_license
|
HongyuanWu/LUAD.subtypes
|
9061d3b35cf6dd19b7f135afd48ed818983a12c5
|
fd82d06eb19c4d527c47eb233204200ebfff9330
|
refs/heads/master
| 2023-01-02T04:30:17.322022
| 2020-10-18T20:32:26
| 2020-10-18T20:32:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,288
|
r
|
Fig1F S3C_TCGA_SignaturesBySubtype.R
|
study <- "TCGA"
data1 <- cbind(read.table("../data/TCGA_KRASmut_subtypes.tsv", sep="\t", header=TRUE, check.names=FALSE), KRAS="MUT")
data2 <- cbind(read.table("../data/TCGA_KRASwt_subtypes.tsv", sep="\t", header=TRUE, check.names=FALSE), KRAS="WT")
data1 <- data1[,-which(colnames(data1) %in% "NMF")]
data <- rbind(data1, data2)
data <- data[!data$Subtype %in% "unclassified", ]
data$Subtype <- factor(data$Subtype, levels=c("MUC", "PRO", "MES"))
nbTumors <- nrow(data)
colors <- rep("cyan3", nbTumors)
colors[data$Subtype %in% "PRO"] <- "mediumpurple2"
colors[data$Subtype %in% "MES"] <- "green3"
pchs <- rep(19, nbTumors) # circle
pchs[data$KRAS %in% "WT"] <- 17 # triangle
pdf("../figures/Fig1_TCGA_STK11signatureBySubtype.pdf", width=5.5, height=6.5)
par(mar=c(4.5, 4.5, 3, 1))
boxplot(data$STK11_deficiency_signature ~ data$Subtype, outline=FALSE,
ylab="STK11 signature", xlab="Subtype", main=paste0(study," (n=",nbTumors,")"),
cex.lab=1.8, cex.axis=1.5, cex.main=1.8, ylim=range(data$STK11_deficiency_signature))
points(jitter(as.numeric(data$Subtype)), data$STK11_deficiency_signature,
col=colors, pch=pchs, cex=1.5)
pval <- signif(kruskal.test(data$STK11_deficiency_signature ~ data$Subtype)$p.value, 2)
text(x=3, y=max(data$STK11_deficiency_signature), labels=paste0("p=",pval), cex=1.5)
dev.off()
t.test(data$STK11_deficiency_signature[data$Subtype %in% "PRO"],
data$STK11_deficiency_signature[data$Subtype %in% c("MUC","MES")],
alternative = "greater")$p.value
t.test(data$STK11_deficiency_signature[data$Subtype %in% "PRO" & data$KRAS %in% "MUT"],
data$STK11_deficiency_signature[data$Subtype %in% c("MUC","MES") & data$KRAS %in% "MUT"],
alternative = "greater")$p.value
t.test(data$STK11_deficiency_signature[data$Subtype %in% "PRO" & data$KRAS %in% "WT"],
data$STK11_deficiency_signature[data$Subtype %in% c("MUC","MES") & data$KRAS %in% "WT"],
alternative = "greater")$p.value
pdf("../figures/FigS3_TCGA_MucinoussignatureBySubtype.pdf", width=5.5, height=6.5)
par(mar=c(4.5, 4.5, 3, 1))
boxplot(data$Mucinous_differentiation_signature ~ data$Subtype, outline=FALSE,
ylab="Mucinous differentiation signature", xlab="Subtype", main=paste0(study," (n=",nbTumors,")"),
cex.lab=1.8, cex.axis=1.5, cex.main=1.8, ylim=range(data$Mucinous_differentiation_signature))
points(jitter(as.numeric(data$Subtype)), data$Mucinous_differentiation_signature,
col=colors, pch=pchs, cex=1.5)
pval <- signif(kruskal.test(data$Mucinous_differentiation_signature ~ data$Subtype)$p.value, 2)
text(x=3, y=max(data$Mucinous_differentiation_signature), labels=paste0("p=",pval), cex=1.5)
dev.off()
t.test(data$Mucinous_differentiation_signature[data$Subtype %in% "MUC"],
data$Mucinous_differentiation_signature[data$Subtype %in% c("PRO","MES")],
alternative = "greater")$p.value
t.test(data$Mucinous_differentiation_signature[data$Subtype %in% "MUC" & data$KRAS %in% "MUT"],
data$Mucinous_differentiation_signature[data$Subtype %in% c("PRO","MES") & data$KRAS %in% "MUT"],
alternative = "greater")$p.value
t.test(data$Mucinous_differentiation_signature[data$Subtype %in% "MUC" & data$KRAS %in% "WT"],
data$Mucinous_differentiation_signature[data$Subtype %in% c("PRO","MES") & data$KRAS %in% "WT"],
alternative = "greater")$p.value
|
0e48d72aa2c906a705b4dabdb1ac776a1e79ff89
|
873394d6cd11b544d602a7038c6e4890073727c7
|
/Lab_4.R
|
3f762548abf6c412d0e6274089086be7a4b898d4
|
[] |
no_license
|
michaelchoie/Introduction-to-Statistical-Learning
|
bb44c1c48155034da0ed99af776bd0f7cb17a2c6
|
fb52ee1124bfe7c0267727790a9aec4cbe26c9a8
|
refs/heads/master
| 2021-08-19T23:43:46.172749
| 2017-11-27T17:55:50
| 2017-11-27T17:55:50
| 106,619,417
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,141
|
r
|
Lab_4.R
|
#####################################################
# LAB 4.6.1: Stock Market Data
#####################################################
# Load library & data
library(ISLR)
Smarket <- data.frame(Smarket)
# View structure
str(Smarket)
summary(Smarket)
# Create correlation matrix (appears to be little collinearity)
# Meaning, no relation between previous and current day's returns
cor(Smarket[, -9])
# Plot data of biggest correlation (volume and time)
par(mfrow = c(1,2))
plot(Smarket$Volume)
plot(Smarket$Year, Smarket$Volume)
#####################################################
# LAB 4.6.2: Logistic Regression
#####################################################
# Run logistic regression to predict direction
glm.fit <- glm(formula = Direction ~ . - Year - Today, data = Smarket, family = binomial)
summary(glm.fit)
coef(glm.fit)
summary(glm.fit)$coef
# Get p-values
summary(glm.fit)$coef[, 4]
# type="response" option tells R to output probabilities of the form P(Y = 1|X)
# as opposed to other information such as the logit.
# If no data set is supplied to the predict() function
# then probabilities are computed for the training data that was used to fit the logistic regression
# values corresponds with prob of market going up, as seen with contrasts()
glm.probs <- predict(glm.fit, type = "response")
glm.probs[1:10]
contrasts(Smarket$Direction)
# In order to make a prediction as to whether the market will go up or down on a particular day
# we must convert these predicted probabilities into class labels, Up or Down.
glm.pred <- rep("Down", 1250)
glm.pred[glm.probs > 0.5] <- "Up"
# table() can be used to produce a confusion matrix
# in order to determine how many observations were correctly or incorrectly classified.
# Accuracy rate is 52.16%
table(glm.pred, Smarket$Direction)
mean(glm.pred == Smarket$Direction)
Smarket.2005 <- Smarket[!Smarket$Year < 2005, ]
dim(Smarket.2005)
Direction.2005 <- Smarket$Direction[!Smarket$Year < 2005]
# Fit logistic regression on the subset of data
train <- (Smarket$Year < 2005)
glm.fit <- glm(formula = Direction ~ . - Year - Today, data = Smarket, family = "binomial",
subset = train)
glm.probs <- predict(glm.fit, Smarket.2005, type = "response")
glm.pred <- rep("Down", 252)
glm.pred[glm.probs > .5] <- "Up"
table(glm.pred, Smarket.2005$Direction)
mean(glm.pred == Smarket.2005$Direction)
glm.fit <- glm(formula = Direction ~ Lag1 + Lag2, data = Smarket, family = "binomial", subset = train)
glm.probs <- predict(glm.fit, Smarket.2005, type = "response")
glm.pred <- rep("Down", 252)
glm.pred[glm.probs > .5] <- "Up"
table(glm.pred, Smarket.2005$Direction)
mean(glm.pred == Smarket.2005$Direction)
predict(glm.fit, newdata = data.frame(Lag1 = c(1.2, 1.5), Lag2 = c(1.1, -0.8)), type = "response")
#####################################################
# LAB 4.6.3: Linear Discriminant Analysis
#####################################################
library(MASS)
lda.fit <- lda(formula = Direction ~ Lag1 + Lag2, data = Smarket, subset = train)
lda.fit
plot(lda.fit)
# If −0.642 × Lag1 − 0.514 × Lag2 is large, then the LDA classifier will predict a market increase,
# and if it is small, then the LDA classifier will predict a market decline.
lda.pred <- predict(lda.fit, Smarket.2005)
names(lda.pred)
lda.class <- lda.pred$class
table(lda.class, Direction.2005)
mean(lda.class == Direction.2005)
sum(lda.pred$posterior[, 1] >= 0.5)
sum(lda.pred$posterior[, 1] < 0.5)
lda.pred$posterior[1:20, 1]
lda.class[1:20]
sum(lda.pred$posterior[, 1] > 0.9)
#####################################################
# LAB 4.6.4: Quadratic Discriminant Analysis
#####################################################
qda.fit <- qda(formula = Direction ~ Lag1 + Lag2, data = Smarket, subset = train)
qda.fit
qda.class <- predict(qda.fit, Smarket.2005)$class
table(qda.class, Smarket.2005$Direction)
mean(qda.class == Smarket.2005$Direction)
#####################################################
# LAB 4.6.5: k Nearest Neighbors
#####################################################
# Rather than a two-step approach in which we first fit the model
# and then we use the model to make predictions,
# knn() forms predictions using a single command
# cbind binds together columns into single matrix
attach(Smarket)
library(class)
train.X <- cbind(Lag1, Lag2)[train, ]
test.X <- cbind(Lag1, Lag2 )[!train, ]
train.Direction <- Direction[train]
# A seed must be set in order to ensure reproducibility of results
set.seed(1)
knn.pred <- knn(train.X, test.X, train.Direction, k=1)
table(knn.pred, Smarket.2005$Direction)
mean(knn.pred == Smarket.2005$Direction)
# Try with 3 neighbors instead
knn.pred <- knn(train.X, test.X, train.Direction, k=3)
table(knn.pred, Smarket.2005$Direction)
mean(knn.pred == Smarket.2005$Direction)
#####################################################
# LAB 4.6.6: Caravan Insurance Data
#####################################################
# Need to standardize data so that distane metrics work well
# Means mean = 0 and standard deviation = 1
# scale() does exactly that
attach(Caravan)
summary(Purchase)
standardized.X <- scale(Caravan[, -86]) # exclude factor variable
var(Caravan[, 1])
var(Caravan[, 2])
var(standardized.X[, 1])
var(standardized.X[, 2])
# Evaluate performance on test data
test <- 1:1000
train.X <- standardized.X[-test, ]
test.X <- standardized.X[test, ]
train.Y <- Purchase[-test]
test.Y <- Purchase[test]
set.seed(1)
knn.pred <- knn(train.X, test.X, train.Y, k=1)
mean(knn.pred != test.Y)
mean(test.Y != "No")
table(knn.pred, test.Y)
mean(knn.pred == test.Y)
knn.pred <- knn(train.X, test.X, train.Y, k=3)
table(knn.pred, test.Y)
mean(knn.pred == test.Y)
knn.pred <- knn(train.X, test.X, train.Y, k=5)
table(knn.pred, test.Y)
mean(knn.pred == test.Y)
glm.fit <- glm(formula = Purchase ~ ., data = Caravan, family = binomial, subset = -test)
glm.probs <- predict(glm.fit, Caravan[test, ], type="response")
glm.pred <- rep("No", 1000)
glm.pred[glm.probs >.5] <- "Yes"
table(glm.pred, test.Y)
glm.pred <- rep("No",1000)
glm.pred[glm.probs > 0.25] <- " Yes"
table(glm.pred, test.Y)
|
d28fb9e943082dcd0e082eb871005b14c0f0cc43
|
b0f77cca265f871fa01914deb0e7c6c8582ed6c3
|
/R_Scripts/eda__df_cut.r
|
5a8901956ff8305b4fcd2b28db17b7d60bef83e6
|
[
"MIT"
] |
permissive
|
joshuakevinjones/Code_Files
|
2e94b8d0a79b73591e98828e89b5d80b8ed824d4
|
eefd7337ae10c743c80d79aaeacf4d5d54229b56
|
refs/heads/master
| 2021-01-22T13:26:37.565713
| 2020-06-11T18:02:44
| 2020-06-11T18:02:44
| 100,659,992
| 0
| 0
| null | 2017-08-25T21:37:34
| 2017-08-18T01:27:31
| null |
UTF-8
|
R
| false
| false
| 635
|
r
|
eda__df_cut.r
|
# df cuts
# create some simulated data
ID <- 1:10
Age <- c(26, 65, 15, 7, 88, 43, 28, 66 ,45, 12)
Sex <- c(1, 0, 1, 1, 0 ,1, 1, 1, 0, 1)
Weight <- c(132, 122, 184, 145, 118, NA, 128, 154, 166, 164)
Height <- c(60, 63, 57, 59, 64, NA, 67, 65, NA, 60)
Married <- c(0, 0, 0, 0, 0, 0, 1, 1, 0, 1)
# create a dataframe of the simulated data
mydata <- data.frame(ID, Age, Sex, Weight, Height, Married)
# cut up Age and label each category
mydata$Agecat1<-cut(mydata$Age, c(0,5,10,15,20,25,30,40,50,60,70,80), labels=c(1:11))
mydata$Agecat2<-cut(mydata$Age, breaks=10, labels=c(1:10))
# it is a factor
class(mydata$Agecat1)
[1] "factor"
|
4a68497bc1cfa61f6b8a95d55799c06d7204eaec
|
6cd87c1f8d0fb438d85a4bb0e25736ac2aaefd42
|
/February_strange_attractors/hopalong.R
|
ba095268f37de1cafbe90b7f8ba6f2acfd82ee7b
|
[] |
no_license
|
2008haas/aRt
|
c47ad1392f4393aaa86c21f9a65ddcae4ecf3e60
|
9585e845539acfc36792c084caed178ab4b0dd98
|
refs/heads/master
| 2022-12-13T13:40:54.677181
| 2020-09-19T02:36:45
| 2020-09-19T02:36:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,333
|
r
|
hopalong.R
|
library(ggplot2)
library(dplyr)
library(purrr)
#cream #FAF4E7
#charcoal #1E1E1E
opt = theme(legend.position = "none",
panel.background = element_rect(fill="white"),
axis.ticks = element_blank(),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.text = element_blank())
###########1
createTrajectory <- function(n, x0, y0, a, b, c) {
x <- vector(mode = "numeric", length = n)
y <- vector(mode = "numeric", length = n)
x[1] <- x0
y[1] <- y0
for(i in 2:n) {
x[i] <- y[i-1]-1-sqrt(abs(b*x[i-1]-c))*sign(x[i-1]-1)
y[i] <- a-x[i-1]-1
}
data.frame(x = x, y = y)
}
a=2
b=1
c=8
v=3
df=createTrajectory(3000000, 0, 0, a, b, c)
ggplot(df, aes(x, y)) + geom_point(color="#1E1E1E", shape=46, alpha=.05) + opt
ggsave("hopalong_1.png", device = "png")
###########2
createTrajectory <- function(n, x0, y0, t0, a, b, c, v) {
x <- vector(mode = "numeric", length = n)
y <- vector(mode = "numeric", length = n)
t <- vector(mode = "numeric", length = n)
x[1] <- x0
y[1] <- y0
t[1] <- t0
for(i in 2:n) {
x[i] <- y[i-1]-1-sqrt(abs(b*x[i-1]-c))*sign(x[i-1]-1)+t[i-1]
y[i] <- a-x[i-1]-1-t[i-1]
t[i] <- t[i-1] + v
}
data.frame(x = x, y = y)
}
a=9
b=6
c=8
v=0.1
df=createTrajectory(1000000, 0, 0, 0, a, b, c, v)
ggplot(df, aes(x, y)) + geom_point(color="#1E1E1E", shape=46, alpha=.05) + opt
ggsave("hopalong_2.png", device = "png")
###########3
createTrajectory <- function(n, x0, y0, t0, a, b, c, v) {
x <- vector(mode = "numeric", length = n)
y <- vector(mode = "numeric", length = n)
t <- vector(mode = "numeric", length = n)
x[1] <- x0
y[1] <- y0
t[1] <- t0
for(i in 2:n) {
x[i] <- y[i-1]-1-sqrt(abs(b*x[i-1]-c))*sign(x[i-1]-1)+t[i-1]
y[i] <- a-x[i-1]-1-t[i-1]
t[i] <- t[i-1] + v
}
data.frame(x = x, y = y)
}
a=5.5
b=1.2
c=8
v=6
df=createTrajectory(1000000, 0, 0, 0, a, b, c, v)
ggplot(df, aes(x, y)) + geom_point(color="#1E1E1E", shape=46, alpha=.05) + opt
ggsave("hopalong_3.png", device = "png")
###########4
createTrajectory <- function(n, x0, y0, t0, a, b, c, v) {
x <- vector(mode = "numeric", length = n)
y <- vector(mode = "numeric", length = n)
t <- vector(mode = "numeric", length = n)
x[1] <- x0
y[1] <- y0
t[1] <- t0
for(i in 2:n) {
x[i] <- y[i-1]-1-sqrt(abs(b*x[i-1]-c))*sign(x[i-1]-1)+a*t[i-1]
y[i] <- a-x[i-1]-1-a*t[i-1]
t[i] <- t[i-1] + v
}
data.frame(x = x, y = y)
}
a=10
b=1
c=1
v=0.1
df=createTrajectory(1000000, 0, 0, 0, a, b, c, v)
ggplot(df, aes(x, y)) + geom_point(color="#1E1E1E", shape=46, alpha=.05) + opt
ggsave("hopalong_4.png", device = "png")
###########6
createTrajectory <- function(n, x0, y0, t0, a, b, c, v) {
x <- vector(mode = "numeric", length = n)
y <- vector(mode = "numeric", length = n)
t <- vector(mode = "numeric", length = n)
x[1] <- x0
y[1] <- y0
t[1] <- t0
for(i in 2:n) {
x[i] <- y[i-1]-1-sqrt(abs(b*x[i-1]-c))*sign(x[i-1]-1)+a*cos(b*t[i-1])
y[i] <- a-sin(x[i-1])-1-a*cos(b*t[i-1])
t[i] <- t[i-1] + v
}
data.frame(x = x, y = y)
}
a=5.4
b=5
c=3
v=1
df=createTrajectory(1000000, 0, 0, 0, a, b, c, v)
ggplot(df, aes(x, y)) + geom_point(color="#1E1E1E", shape=46, alpha=.05) + opt
ggsave("hopalong_6.png", device = "png")
|
0d2edb0917aa9e45fe6a2f314b1aa042883feeb4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/R.oo/examples/print.Exception.Rd.R
|
e9effbe5be7949d0c0c80c01e61424263073509a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 274
|
r
|
print.Exception.Rd.R
|
library(R.oo)
### Name: print.Exception
### Title: Prints the Exception
### Aliases: print.Exception Exception.print print,Exception-method
### Keywords: programming methods error internal methods
### ** Examples
## Not run: For a complete example see help(Exception).
|
4cbc4147c579ed2390852c6f0a69372db9232afd
|
d08fd1c0a9969e8fa2847585adc23501da901c4f
|
/Pre-Processing/PreProcess.R
|
a29d1f22d4dc095374c4fe06dbda27e9eeb64c6f
|
[] |
no_license
|
ArunkumarRamanan/bosch-production-line-performance
|
124ebae622370041978fed5eff27378a19f4230f
|
d4ba078bc6449d40d1d7fb4e7129850f276f81a0
|
refs/heads/master
| 2020-05-09T23:01:27.686275
| 2016-11-21T16:52:52
| 2016-11-21T16:52:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,918
|
r
|
PreProcess.R
|
#Bosch Production Line Performance
#Preprocess Data
#Many columns are duplicated. Use digest to detect and filter duplicate columns
#Reduce RAM requirement by reading columns in batches
#Authors: Tyrone Cragg and Liam Culligan
#Date: October 2016
#Load required packages
library(data.table)
library(digest)
#Function for fread
read = function(file, colClasses, select) {
fread(file,
colClasses = colClasses,
na.strings = "",
showProgress = F,
select = select)
}
#CATEGORICAL VARIABLES
#Setup read parameters and lists to store outputs
n_batch = 5
col_idx = c(1:2141)
col_bat = cut(col_idx, n_batch, labels = c(1:n_batch))
all_features = vector("list", n_batch)
all_digests = vector("list", n_batch)
#Loop through each batch
#Only the feature names and digests of each feature are stored.
for(i in seq_along(all_features)) {
print(i)
dt = read(file = "train_categorical.csv", colClasses = "character", select = col_idx[col_bat == i])
all_features[[i]] = names(dt)
all_digests[[i]] = lapply(dt, digest)
rm(dt)
gc()
}
#Check summary of feature names and digests
#Appears to be over 1,900 duplicates, including empty columns
feature_summary = data.table(feature = unlist(all_features),
digest = unlist(all_digests))
#For the second duplicated value onwards, sets duplicated to TRUE
feature_summary$duplicate = duplicated(feature_summary$digest)
cat("There are an estimated", sum(feature_summary$duplicate), "duplicated columns")
names_to_keep = feature_summary[duplicate == F, feature]
#Read in data without duplictaed columns
train_categorical = fread("train_categorical.csv", select = names_to_keep)
test_categorical = fread("test_categorical.csv", select = names_to_keep)
#NUMERIC VARIABLES
#Setup read parameters and lists to store outputs
n_batch = 4
col_idx = c(1:970)
col_bat = cut(col_idx, n_batch, labels = c(1:n_batch))
all_features = vector("list", n_batch)
all_digests = vector("list", n_batch)
#Loop through each batch
#Only the feature names and digests of each feature are stored.
for(i in seq_along(all_features)) {
print(i)
dt = read(file = "train_numeric.csv", colClasses = "numeric", select = col_idx[col_bat == i])
all_features[[i]] = names(dt)
all_digests[[i]] = lapply(dt, digest)
rm(dt)
gc()
}
#Check summary of feature names and digests
#Appears to be over 1900 duplicates, including empty columns
feature_summary = data.table(feature = unlist(all_features),
digest = unlist(all_digests))
#For the second duplicated value onwards, sets duplicated to TRUE
feature_summary$duplicate = duplicated(feature_summary$digest)
cat("There are an estimated", sum(feature_summary$duplicate), "duplicated columns")
#Keep all columns that are not duplicates
names_to_keep = feature_summary[duplicate == F, feature]
#Read in data without duplictaed columns -- still need to read in by batch to read RAM requirement
n_batch = 4
col_idx = c(1:length(names_to_keep))
col_bat = cut(col_idx, n_batch, labels = c(1:n_batch))
#Loop through each batch
#Only the feature names and digests of each feature are stored.
for(i in 1:n_batch) {
print(i)
dt = read(file = "train_numeric.csv", colClasses = "numeric", select = names_to_keep[col_bat == i])
if (i == 1) {
train_numeric = dt
} else {
train_numeric = cbind(train_numeric, dt)
}
rm(dt)
gc()
}
for(i in 1:n_batch) {
print(i)
dt = read(file = "test_numeric.csv", colClasses = "numeric", select = names_to_keep[col_bat == i])
if (i == 1) {
test_numeric = dt
} else {
test_numeric = cbind(test_numeric, dt)
}
rm(dt)
gc()
}
#DATE VARIABLES
#Setup read parameters and lists to store outputs
n_batch = 6
col_idx = c(1:1157)
col_bat = cut(col_idx, n_batch, labels = c(1:n_batch))
all_features = vector("list", n_batch)
all_digests = vector("list", n_batch)
#Loop through each batch
#Only the feature names and digests of each feature are stored.
for(i in seq_along(all_features)) {
print(i)
dt = read(file = "train_date.csv", colClasses = "numeric", select = col_idx[col_bat == i])
all_features[[i]] = names(dt)
all_digests[[i]] = lapply(dt, digest)
rm(dt)
gc()
}
#Check summary of feature names and digests
#Appears to be over 1,900 duplicates, including empty columns
feature_summary = data.table(feature = unlist(all_features),
digest = unlist(all_digests))
#For the second duplicated value onwards, sets duplicated to TRUE
feature_summary$duplicate = duplicated(feature_summary$digest)
cat("There are an estimated", sum(feature_summary$duplicate), "duplicated columns")
#Keep all columns that are not duplicates
names_to_keep = feature_summary[duplicate == F, feature]
#Read in data without duplictaed columns -- still need to read in by batch to read RAM requirement
n_batch = 6
col_idx = c(1:length(names_to_keep))
col_bat = cut(col_idx, n_batch, labels = c(1:n_batch))
#Loop through each batch
#Only the feature names and digests of each feature are stored.
for(i in 1:n_batch) {
print(i)
dt = read(file = "train_date.csv", colClasses = "numeric", select = names_to_keep[col_bat == i])
if (i == 1) {
train_date = dt
} else {
train_date = cbind(train_date, dt)
}
rm(dt)
gc()
}
for(i in 1:n_batch) {
print(i)
dt = read(file = "test_date.csv", colClasses = "numeric", select = names_to_keep[col_bat == i])
if (i == 1) {
test_date = dt
} else {
test_date = cbind(test_date, dt)
}
rm(dt)
gc()
}
#Save datasets as RData file
save(train_categorical, file = "PreProcTrainCategorical.rda", compress = T)
save(test_categorical, file = "PreProcTestCategorical.rda", compress = T)
save(train_numeric, file = "PreProcTrainNumeric.rda", compress = T)
save(test_numeric, file = "PreProcTestNumeric.rda", compress = T)
save(train_date, file = "PreProcTrainDate.rda", compress = T)
save(test_date, file = "PreProcTestDate.rda", compress = T)
#Randomly sample 100000 ovservations from training data
#This expedites the model training and validation process. Subsequently full training set can be used for training and validation.
set.seed(44)
samples = sample(nrow(train_numeric), 100000) #Random sample of 100000 row indeces
train_numeric_sample = train_numeric[samples] #Select these rows in the i argument of data.table
train_categorical_sample = train_categorical[samples]
train_date_sample = train_date[samples]
#Save sampled datasets as RData file
save(train_numeric_sample, train_categorical_sample, train_date_sample, file = "PreProcTrainSample.rda", compress = T)
|
7a8368c427ec990a12e0a4298d899dea23f8c370
|
9f6a670f53570efe5774dcd02a2f3b4aec303ffb
|
/server.R
|
4526970c8c8ef937bb7087a81eb642e2368bc451
|
[] |
no_license
|
ravikeron/Capstone_Project
|
d75af85626cd917159ac77cea77757fd0ab75d03
|
c04756357dbd093928e3dc03d3fa59918e29fed0
|
refs/heads/master
| 2021-01-17T18:03:32.547793
| 2016-07-21T17:40:05
| 2016-07-21T17:40:05
| 63,888,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,382
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application for predicting the next word in a sentence
#
library(tm)
library(stringr)
library(shiny)
# load the 2-gram, 3-gram and 4-gram models created separately from the given input files
# These are built into term document matrix after taking a sample and cleaning
# These would be used to predict the next word
#load("E:/DataScience/Capstone/Project/freq2.RData");
#load("E:/DataScience/Capstone/Project/freq3.RData");
#load("E:/DataScience/Capstone/Project/freq4.RData");
load("freq2.RData");
load("freq3.RData");
load("freq4.RData");
Inputcleaning <- function(instr)
{
# First remove the non-alphabatical characters
instr <- iconv(instr, "latin1", "ASCII", sub=" ");
instr <- gsub("[^[:alpha:][:space:][:punct:]]", "", instr);
# Then convert to a Corpus
instrCorpus <- VCorpus(VectorSource(instr))
# Apply cleaning steps
instrCorpus <- tm_map(instrCorpus, content_transformer(removePunctuation))
instrCorpus <- tm_map(instrCorpus, content_transformer(removeNumbers))
instrCorpus <- tm_map(instrCorpus, content_transformer(stripWhitespace))
instrCorpus <- tm_map(instrCorpus, content_transformer(tolower))
instr <- as.character(instrCorpus[[1]])
instr <- gsub("(^[[:space:]]+|[[:space:]]+$)", "", instr)
# Return the cleaned sentence
if (nchar(instr) > 0) {
return(instr);
} else {
return("");
}
}
Predictnext <- function(instr)
{
#assign("mesg", "in Predictnext", envir = .GlobalEnv)
# Clean the input string and extract the words removing other characters
instr <- Inputcleaning(instr);
# Split the input string and extract the length
instr <- unlist(strsplit(instr, split=" "));
instrlen <- length(instr);
nxttermfound <- FALSE;
Predictnext <- as.character(NULL);
# Check the four gram
if (instrlen >= 3 & !nxttermfound)
{
# capture the 3 words from the end
instr1 <- paste(instr[(instrlen-2):instrlen], collapse=" ");
searchStr <- paste("^",instr1, sep = "");
freq4temp <- freq4[grep (searchStr, freq4$terms), ];
msg41 <- "before 4 matching check"
# check if any matching record returned
if ( length(freq4temp[, 1]) >= 1 )
{
msg1 <- "in record found"
Predictnext <- as.character(freq4temp[1,1]);
nxttermfound <- TRUE;
freq4temp <- NULL;
}
}
# Check the three gram using the three gram data frame
if (instrlen >= 2 & !nxttermfound)
{
# identify the 2 words from the end
instr1 <- paste(instr[(instrlen-1):instrlen], collapse=" ");
searchStr <- paste("^",instr1, sep = "");
freq3temp <- freq3[grep (searchStr, freq3$terms), ];
msg31 <- "before 3 matching check"
# check if any matching record returned
if ( length(freq3temp[, 1]) >= 1 )
{
Predictnext <- freq3temp[1,1];
nxttermfound <- TRUE;
freq3temp <- NULL;
}
}
# Check the two gram using the two gram data frame
if (instrlen >= 1 & !nxttermfound)
{
# get the last word
instr1 <- instr[instrlen];
searchStr <- paste("^",instr1, sep = "");
freq2temp <- freq2[grep (searchStr, freq2$terms), ];
msg21 <- "before 2 matching check"
# check if any matching record returned
if ( length(freq2temp[, 1]) >= 1 )
{
Predictnext <- freq2temp[1,1];
nxttermfound <- TRUE;
freq2temp <- NULL;
}
}
# If no next term found in Four, Three and Two Grams return No phrase matches
if (!nxttermfound & instrlen > 0)
{
Predictnext <- "No phrase matches"
}
nextterm <- word(Predictnext, -1);
if (instrlen > 0){
#dftemp1 <- data.frame(nextterm, mesg);
dftemp1 <- data.frame(nextterm);
return(dftemp1);
} else {
nextterm <- "";
#mesg <-"";
#dftemp1 <- data.frame(nextterm, mesg);
dftemp1 <- data.frame(nextterm);
return(dftemp1);
}
}
shinyServer(function(input, output) {
output$wordprediction <- renderPrint({
str1 <- Inputcleaning(input$inputId)
str2 <- Predictnext(str1)
input$action;
msg <<- as.character(str2[1,2]);
cat("", as.character(str2[1,1]))
#cat("\n\t");
#cat("\n\t");
#cat("Note: ", as.character(str2[1,2]));
})
#output$text1 <- renderText({paste("The input sentence is : ", input$inputId)});
output$text1 <- renderText({input$action;})
})
|
f9336e9a6db7688c655b1a71587ef805e177e0bf
|
102d4103ca1e3a2ab268cd9cd37d6b439c4fc3cc
|
/data and code for paper/code_for_prediction.R
|
15fbd5f2199c7c1337097a4375f03677cc113cc2
|
[
"CC0-1.0"
] |
permissive
|
Vicky-Zh/Tracking_and_forecasting_milepost_moments_of_COVID-19
|
8f4b81d78d56fc064e1c8b10cf10e5afc1f74f02
|
4bf61343ea892bfaeb84ef034e99c948afff4e08
|
refs/heads/master
| 2021-05-21T09:53:34.384692
| 2020-05-05T03:47:42
| 2020-05-05T03:47:42
| 252,644,815
| 1
| 0
|
CC0-1.0
| 2020-05-05T03:47:43
| 2020-04-03T05:56:49
|
R
|
UTF-8
|
R
| false
| false
| 11,155
|
r
|
code_for_prediction.R
|
#=======================================
# Prediction of turning points
# Author: Yanwen Zhang
# Date: Apr 2, 2020
# Description: In the paper <Tracking_and_forecasting_milepost_moments_of_the_epidemic_in_the_early_outbreak__framework_and_applications_to_the_COVID_19>, we proposed a method to predict "turning points", whose main idea is using the change velocity of infection rate (InfectionRateVelocity) and the change velocity of completion rate(RemovedRateVelocity) to forcast newly diagnoses cases and number of cases treated in the hospital in the future. Here, we proposed one of the algorithms to calculate the change rate and then make the prediction, which is the method we used in our paper mentioned above. At last, we offer a simple example to implement this method.
#=======================================
Iconicfun<-function(wd){
#=====================================
# Compute the iconic indicators
#
# Args:
# wd: dataframe with data and four variables, i.e.
# the cumulative confirmed cases up to the given day t,
# the daily confirmed cases at day t,
# the daily recovered ones and the daily deaths at day t,
# Returns: dataframe with variables
# date: the exact day in the formate "%y-%m-%d" as a character
# confirmed: the daily confirmed cases at the given date
# recovered: the daily recovered cases at the given date
# deaths: the daily deaths at the given date
# cumconfirmed: the cumulative confirmed cases
# inhospitals: The number of infectious cases in hospital
# infectionrate: The daily infection rate
# removedrate: the daily removed rate
n<-dim(wd)[1]
date<-as.Date(wd[,1])
cumconfirmed<-wd[,2]#cumulative confirmed cases
confirmed<-wd[,3]
recovered<-wd[,4]
deaths<-wd[,5]
inhospitals<-cumconfirmed-cumsum(recovered+deaths)#in-hospitals
infectionrate<-confirmed[-1]/inhospitals[-n]#the daily infection rate
removedrate<-(recovered+deaths)[-1]/inhospitals[-n]#the daily removed rate
#return results
result<-data.frame(date=date[-1],
cumconfirmed=cumconfirmed[-1],
confirmed=confirmed[-1],
recovered=recovered[-1],
deaths=deaths[-1],
inhospitals=inhospitals[-1],
infectionrate=infectionrate,
removedrate=removedrate)
return(result)
}
CalculateVelocity<-function (date, confirmed, inhospitals, infectionrate, removedrate, M, T){
#=====================================
# Compute the velocity of infection rate K change and completion rate I change.
#
# Args:
# date: the exact day in the formate "%y-%m-%d" as a character
# confirmed: the daily confirmed cases at the given date
# inhospitals: The number of infectious cases in hospital
# infectionrate: The daily infection rate
# removedrate: the daily removed rate
# M: the selection of time window.
# T: the selection of begining time,
# which must be in the formate "%y-%m-%d" as a character.
#
# Returns:
# A list contains InfectionRateVelocity, RemovedRateVelocity,
# and the final T.removed and M.removed used to calculate RemovedRateVelocity,
# which may be needed in later calculation.
#=====================================
f=data.frame(date=date,
confirmed=confirmed,
inhospitals=inhospitals,
infectionrate=infectionrate,
removedrate=removedrate)
f$date=as.Date(f$date)
# to initialize the t and m for real calculation
T.infection=which(f==T)
T.removed=which(f==T)
M.infection=M
M.removed=M
infectionrate.0=f$infectionrate[T.infection]
removedrate.0=f$removedrate[T.infection]
confirmed.0=f$confirmed[T.infection]
inhospitals.0=f$inhospitals[T.infection]
# to calculate InfectionRateVelocity
# This while loop is a correction process for "m" and "t" in special situations,
# in other words, this loop will be skipped in most cases.
while (f$infectionrate[T.infection-M.infection+1]<=f$infectionrate[T.infection] | f$infectionrate[T.infection-M.infection+1]==0){
M.infection=M.infection-1
if(M.infection>1) next
else{
T.infection=T.infection-1
M.infection=M
if(T.infection>=0) next
else {
stop("The infection rate K heaven't decrease yet.")
}
}
}
# The formula for velocity calculation.
InfectionRateVelocity=(f$infectionrate[T.infection]/f$infectionrate[T.infection-M.infection+1])^(1/(M.infection-1))
# to calculate RemovedRateVelocity
# The meaning of this while loop is the same as above.
while (f$removedrate[T.removed-M.removed+1]>=f$removedrate[T.removed] | f$removedrate[T.removed-M.removed+1]==0){
M.removed=M.removed-1
if(M.removed>1) next
else{
T.removed=T.removed-1
M.removed=M
if(T.removed>=0) next
else {
stop("The completion rate heaven't increase yet.")
}
}
}
# The formula for velocity calculation.
RemovedRateVelocity=(f$removedrate[T.removed]/f$removedrate[T.removed-M.removed+1])^(1/(M.removed-1))
velocity=list("InfectionRateVelocity"=InfectionRateVelocity,"RemovedRateVelocity"=RemovedRateVelocity,"T.removed"=T.removed,"M.removed"=M.removed,"T"=T)
return(velocity)
}
Prediction<-function(date, confirmed, inhospitals, infectionrate, removedrate, InfectionRateVelocity, RemovedRateVelocity,T){
#=====================================
# Predict future infectionrate, removedrate, E_t, inhospitals
# and get T.2, Z.1 and Z.2 at the same time.
#
# Args:
# date: the exact day in the formate "%y-%m-%d" as a character.
# confirmed: the daily confirmed cases at the given date.
# inhospitals: The number of infectious cases in hospital.
# infectionrate: The daily infection rate.
# removedrate: the daily removed rate.
# InfectionRateVelocity: the velocity of infection rate change.
# RemovedRateVelocity: the velocity of complerion rate change.
# T: the selection of begining time,
# which must be in the formate "%y-%m-%d" as a character.
#
# Returns:
# A dataframe contains prediction result of removedrate, inhospitals,
# and T.2, Z.1, Z.2.
#=====================================
f=data.frame(date=date,
confirmed=confirmed,
inhospitals=inhospitals,
infectionrate=infectionrate,
removedrate=removedrate)
f$date=as.Date(f$date)
T.infection=which(f==T)
infectionrate.0=f$infectionrate[T.infection]
removedrate.0=f$removedrate[T.infection]
confirmed.0=f$confirmed[T.infection]
inhospitals.0=f$inhospitals[T.infection]
infectionrate.pre=c(infectionrate.0)
removedrate.pre=c(removedrate.0)
confirmed.pre=c(confirmed.0)
inhospitals.pre=c(inhospitals.0)
t=1
# to predict the first zero point Z.1.
while (confirmed.pre[t]>1){
t=t+1
infectionrate=infectionrate.pre[t-1]*InfectionRateVelocity
removedrate=removedrate.pre[t-1]*RemovedRateVelocity
R_t=1+infectionrate-removedrate
inhospitals=inhospitals.pre[t-1]*R_t
E_t=inhospitals.pre[t-1]*infectionrate
infectionrate.pre=c(infectionrate.pre,infectionrate)
removedrate.pre=c(removedrate.pre,removedrate)
confirmed.pre=c(confirmed.pre,E_t)
inhospitals.pre=c(inhospitals.pre,inhospitals)
}
Z.1=as.Date(T)+t-1
# to predict the second zero point Z.2.
while (inhospitals.pre[t]>1 ){
t=t+1
infectionrate=infectionrate.pre[t-1]*InfectionRateVelocity
removedrate=removedrate.pre[t-1]*RemovedRateVelocity
R_t=1+infectionrate-removedrate
inhospitals=inhospitals.pre[t-1]*R_t
infectionrate.pre=c(infectionrate.pre,infectionrate)
removedrate.pre=c(removedrate.pre,removedrate)
inhospitals.pre=c(inhospitals.pre,inhospitals)
}
Z.2=as.Date(T)+t-1
# After prediction process, we can get the second turing point.
# If T.2 have already gone, we stop predicting it.
if (which.max(inhospitals.pre)>1){
T.2=as.Date(T)+which.max(inhospitals.pre)-1
}else T.2=NA
prediction<-data.frame("removedrate.pre"=removedrate.pre,"inhospitals.pre"=inhospitals.pre,"T.2"=T.2,"Z.1"=Z.1,"Z.2"=Z.2)
return(prediction)
}
totalPrediction<-function(wd,M,T){
#=====================================
# Integrate functions above, and handle a special situation (removedrate>1).
#
# Args:
# wd: dataframe with data and four variables, i.e.
# the cumulative confirmed cases up to the given day t,
# the daily confirmed cases at day t,
# the daily recovered ones and the daily deaths at day t,
# M: the selection of time window.
# T: the selection of begining time,
# which must be in the formate "%y-%m-%d" as a character.
#
# Returns:
# A list contains prediction result of removedrate, inhospitals, and T.2, Z.1, Z.2.
#=====================================
iconic=Iconicfun(wd)
date=iconic$date
confirmed=iconic$confirmed
recovered=iconic$recovered
deaths=iconic$deaths
inhospitals=iconic$inhospitals
infectionrate=iconic$infectionrate
removedrate=iconic$removedrate
f=data.frame(date=date,
confirmed=confirmed,
inhospitals=inhospitals,
infectionrate=infectionrate,
removedrate=removedrate)
f$date=as.Date(f$date)
velocity=CalculateVelocity(date, confirmed, inhospitals, infectionrate, removedrate, M, T)
InfectionRateVelocity=as.numeric(velocity["InfectionRateVelocity"])
RemovedRateVelocity=as.numeric(velocity["RemovedRateVelocity"])
T.removed=as.numeric(velocity["T.removed"])
M.removed=as.numeric(velocity["M.removed"])
prediction=Prediction(date, confirmed, inhospitals, infectionrate, removedrate, InfectionRateVelocity, RemovedRateVelocity, T)
removedrate.pre=prediction$removedrate.pre
# This while loop is used to avoid RemovedRateVelocity>1, which is counterintuitive.
while(removedrate.pre[length(removedrate.pre)]>1 & T.removed>=1){
T.removed=T.removed-1
while (f$removedrate[T.removed-M.removed+1]>=f$removedrate[T.removed] | f$removedrate[T.removed-M.removed+1]==0){
M.removed=M.removed-1
if(M.removed>1) next
else{
T.removed=T.removed-1
M.removed=M
if(T.removed>=0) next
else {
stop("The completion rate heaven't increase yet.")
}
}
}
RemovedRateVelocity=(f$removedrate[T.removed]/f$removedrate[T.removed-M.removed+1])^(1/(M.removed-1))
prediction=Prediction(date, confirmed, inhospitals, infectionrate, removedrate, InfectionRateVelocity, RemovedRateVelocity, T)
removedrate.pre=prediction$removedrate.pre
}
return(prediction)
}
#================================================
# A simply example with M=5.
# Users can change begining time T and Time window M.
filepath="Please input your filepath here"
filepath=edit(filepath)
result=matrix(0,ncol=3,nrow=32)
wd=read.csv(filepath)
for (i in 1:32){
print(i)
T=as.character(as.Date("2020-01-29")+i-1)
print(T)
x=totalPrediction(wd, M, T)
result[i,1]=as.character(x[1,3])
result[i,2]=as.character(x[1,4])
result[i,3]=as.character(x[1,5])
}
|
243ce38baf30070d839e99e96ea21419ebb5e17b
|
6f6d83b98d6fa5964ea0b684d4d829022ba4867e
|
/man/expand_word.Rd
|
1a0b55b979b49bc293e5d47b109e3cf8f387e131
|
[
"MIT"
] |
permissive
|
rmsharp/wordPuzzle
|
186d3ab49d8836f8849a1fdd3b469f07db4929c9
|
733449bbc3b91a8dd82d5d4e4034dab11bec9fd3
|
refs/heads/master
| 2020-05-09T20:35:24.804214
| 2019-04-21T23:26:54
| 2019-04-21T23:26:54
| 181,413,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 423
|
rd
|
expand_word.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expand_word.R
\name{expand_word}
\alias{expand_word}
\title{Returns a list with each character from the word in seccessive list elements}
\usage{
expand_word(word)
}
\arguments{
\item{word}{a 1 element character vector contain a word to be expanded}
}
\description{
Returns a list with each character from the word in seccessive list elements
}
|
13c182788063b3523a78a5f231d5b057cebca099
|
f281f08b82846459b3bfd53546e1abda60082a67
|
/rsrc/generate_pathway.R
|
5c44af1b2b4b172c42e172f0758b3330f91157de
|
[
"Apache-2.0"
] |
permissive
|
MadFace/MadFace
|
07de49a31cb5e0a5b5c9a6c6c3a8fe3410545dee
|
dad6df197bc1ad330863f0f84da4d97dfb7a3b7d
|
refs/heads/master
| 2021-08-14T18:02:46.020078
| 2017-11-16T11:22:18
| 2017-11-16T11:22:18
| 108,241,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,232
|
r
|
generate_pathway.R
|
library(igraph)
library(rjson)
WIDTH <- 2048
HEIGHT <- 2048
plot.params.1 <- as.list(NULL)
plot.params.1[["vertex.label.cex"]] <- 1
plot.params.1[["vertex.size"]] <- 3
SUB.WIDTH <- 480
SUB.HEIGHT <- 480
plot.params.2 <- as.list(NULL)
plot.params.2[["vertex.label.cex"]] <- 1
plot.params.2[["vertex.size"]] <- 30
##-----------------------------------------------------------
##
## Functions
##
##-----------------------------------------------------------
#----------------------------------
# create layered structure
#----------------------------------
layout.layered <- function(layout){
input_graph <- as.matrix(layout)
uniq_src = unique(input_graph[,1])
uniq_dst = unique(input_graph[,2])
## layer ratio H:L=num_Hlayer:num_Llayer, mergin_ratio = 10%
mergin_ratio = 0.15
num_Hlayer = length(uniq_src)
num_Llayer = length(uniq_dst)
L_ratio = num_Llayer / (num_Hlayer + num_Llayer)
if (L_ratio > (1-mergin_ratio)) {
H_border = 2 * (L_ratio - mergin_ratio) - 1
L_border = 2 * (L_ratio - 2 * (mergin_ratio)) - 1
} else if (L_ratio < mergin_ratio) {
H_border = 2 * (L_ratio + 2*(mergin_ratio)) - 1
L_border = 2 * (L_ratio + mergin_ratio) - 1
} else {
H_border = 2 * (L_ratio + mergin_ratio) - 1
L_border = 2 * (L_ratio) - 1
}
##H_border = 0.5
##L_border = -0.5
##message(H_border, ", ", L_border)
l = c()
for (i in seq(1, length(uniq_src))) {
pos= c(runif(1,-1,1), runif(1,H_border,1))
if (is.null(l)){
l = pos
} else {
l = rbind(l,pos)
}
}
for (i in seq(1, length(uniq_dst))) {
pos= c(runif(1,-1,1), runif(1,-1,L_border))
l = rbind(l,pos)
}
return(l)
}
get.vers <- function(graph.data.matrix){
uniq.src <- unique(graph.data.matrix[,1])
uniq.dst <- unique(graph.data.matrix[,2])
return(data.frame(c(as.character(uniq.src), as.character(uniq.dst)), stringsAsFactors=FALSE))
}
append.graph.matrix <- function(graph.matrix, url.name, services, servers){
for (service.id in 1:length(services)){
service.name <- services[[service.id]]$name
graph.matrix <- rbind(graph.matrix, c(service.name, url.name))
}
return(graph.matrix)
}
generate.graph <- function(graph.matrix){
## Generating graph
Edges <- data.frame(from=graph.matrix[,1], to=graph.matrix[,2])
vers <- get.vers(Edges)
g <- graph.data.frame(Edges, directed=TRUE, vertices=vers)
g <- simplify(g, remove.loops = FALSE)
##V(g)$shape <- rep("box", length(vers))
return(g)
}
generate.layout <- function(graph.matrix){
## Generating layout
l <- layout.layered(graph.matrix)
return(l)
}
plot.pathway <- function(graph.obj, layout=NULL, plot.params=plot.params.1, main=""){
if (is.null(layout)){
layout <- layout.fruchterman.reingold(graph.obj)
layout <- layout.norm(layout, -1,1, -1,1)
}
plot(graph.obj, layout=layout, main=main,
edge.color="#555555",
vertex.label=V(graph.obj)$name, vertex.label.cex=plot.params[["vertex.label.cex"]], vertex.size=plot.params[["vertex.size"]],
xlim=c(-1,1), ylim=c(-1,1), rescale=TRUE)
}
##-----------------------------------------------------------
##
## Main
##
##-----------------------------------------------------------
## Command line analyzer
command.args <- commandArgs(trailingOnly = TRUE)
monitoring.urls.json <- command.args[1]
servers.json <- command.args[2]
robj.dir <-command.args[3]
plot.dir <- command.args[4]
robj.file.pathway <- paste(c(robj.dir, "/overall_pathway.robj"), collapse="")
message("monitoring.urls.json = ", monitoring.urls.json)
message("servers.json = ", servers.json)
message("robj.dir = ", robj.dir)
message("robj.file.pathway = ", robj.file.pathway)
message("plot.dir = ", plot.dir)
message("Reading [", monitoring.urls.json, "] ...")
monitoring.urls <- fromJSON(file=monitoring.urls.json)
message("Reading [", servers.json, "] ...")
servers <- fromJSON(file=servers.json)
## Loop over monitoring pages
graph.matrix <- c()
for (level in 1:length(monitoring.urls)){
## Loop
for (url.id in 1:length(monitoring.urls[[level]]$urls)){
is.captured <- monitoring.urls[[level]]$urls[[url.id]]$capture
## Generating graph
if (is.captured){
sub.graph.matrix <- c()
file.prefix <- monitoring.urls[[level]]$urls[[url.id]]$file_prefix
url.name <- monitoring.urls[[level]]$urls[[url.id]]$name
services <- monitoring.urls[[level]]$urls[[url.id]]$services
## Setting R objects
robj.file2 <- paste(c(robj.dir, "/", file.prefix, "__bcp.robj"), collapse="")
robj.file3 <- paste(c(robj.dir, "/", file.prefix, "__pathway.robj"), collapse="")
message("robj.file2 = ", robj.file2)
message("robj.file3 = ", robj.file3)
## Generate from/to matrix
graph.matrix <- append.graph.matrix(graph.matrix, url.name, services, servers)
## Generate from/to sub matrix
sub.graph.matrix <- append.graph.matrix(sub.graph.matrix, url.name, services, servers)
sub.pathway.obj <- generate.graph(sub.graph.matrix)
layout <- generate.layout(sub.graph.matrix)
plot.file <- paste(c(plot.dir, "/", file.prefix, ".png"), collapse="")
message("Plotting [", plot.file, "] ...")
png(filename = plot.file, width = SUB.WIDTH, height = SUB.HEIGHT)
plot.pathway(sub.pathway.obj, layout, plot.params=plot.params.2)
dev.off()
## saving robj
message("Saving [", robj.file3, "] ...")
save(file=robj.file3, sub.pathway.obj, sub.graph.matrix)
}
}
}
##----------------------------------------
## Generating plots
##----------------------------------------
## Standard plot for Analysis
overall.pathway.obj <- generate.graph(graph.matrix)
## Calculating probability or normalized votes ?
plot.file <- paste(c(plot.dir, "/overall_pathway.png"), collapse="")
message("Plotting [", plot.file, "] ...")
png(filename = plot.file, width = WIDTH, height = HEIGHT)
main <- "Relational map of monitoring and system"
plot.pathway(overall.pathway.obj, layout=NULL, main=main, plot.params.1)
dev.off()
## saving robj
message("Saving [", robj.file.pathway, "] ...")
save(file=robj.file.pathway, overall.pathway.obj, graph.matrix)
|
9644d8c71ee87209934884961acc4992963448a8
|
907af44f17d7246e7fb2b967adddb937aa021efb
|
/man/fslmean.Rd
|
6870e10115de92a714b272fc7ba716590852fe23
|
[] |
no_license
|
muschellij2/fslr
|
7a011ee50cfda346f44ef0167a0cb52420f67e59
|
53276dfb7920de666b4846d9d8fb05f05aad4704
|
refs/heads/master
| 2022-09-21T07:20:18.002654
| 2022-08-25T14:45:12
| 2022-08-25T14:45:12
| 18,305,477
| 38
| 23
| null | 2019-01-10T20:57:47
| 2014-03-31T19:35:03
|
R
|
UTF-8
|
R
| false
| true
| 659
|
rd
|
fslmean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fslmean.R
\name{fslmean}
\alias{fslmean}
\title{Image Mean}
\usage{
fslmean(img, nonzero = FALSE, verbose = TRUE, ts = FALSE)
}
\arguments{
\item{img}{Object of class nifti, or path of file}
\item{nonzero}{(logical) Should the statistic be taken over non-zero voxels}
\item{verbose}{(logical) print out command before running}
\item{ts}{(logical) is the series a timeseries (4D), invoking \code{-t}
option}
}
\value{
Vector of unless ts option invoked, then matrix
}
\description{
Estimates Mean of Image from FSL
}
\note{
This uses option -m or -M in \code{\link{fslstats}}
}
|
0fae7e6dfad3aab7201e875d84ed54810734f9cc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/koRpus/examples/strain.Rd.R
|
985d2ce5252669295a9fb6fd7384ab66839ae798
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
strain.Rd.R
|
library(koRpus)
### Name: strain
### Title: Readability: Strain Index
### Aliases: strain
### Keywords: readability
### ** Examples
## Not run:
##D strain(tagged.text)
## End(Not run)
|
4c972272e219996d50298ae8df46d0c1a7d31be0
|
d198cb229e2dff928df9a23b682f7297d0770491
|
/mGFLMi.R
|
ec689f7070acde919a8b9e54916b51d583c42eed
|
[] |
no_license
|
aaron-scheffler/CARR-GFLM
|
42ade1cd7e99e30b28390fec13e99a660b9f3d21
|
f9f863d6244c5a98c8623fc23092c12f231b80a2
|
refs/heads/main
| 2023-08-24T14:40:49.544678
| 2021-09-24T23:44:36
| 2021-09-24T23:44:36
| 410,129,161
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,393
|
r
|
mGFLMi.R
|
mGFLMi <- function(X, # data.frame in long format with five labeled columns (described below)
# and row length equal to the length of the vectorized region-referenced
# functional predictors across all subjects
# DATA.FRAME COLUMNS:
# ID: subject ID
# x: region-referenced functional predictor
# reg: regional argument
# func: functional argument
# covar: covariate argument
y, # outcome (vector)
w, # functional domain (vector)
Basisw, # functional basis (matrix)
Pw, # functional penalty (matrix)
dist, # distribution of outcome (character)
pvar, # proportion of variation (scalar in [0, 1])
kbasis, # degrees of freedom for region-referenced mean surface smoothing, c(functional)
weights # implement weighted smoothing for region-referenced mean surface (1 = yes, 0 = no), for binomial outcomes only
){
#############################################################################
## Description: fits multivariate generalized functional linear model with age interaction (m-GFLMi)
## described in "Covariate-Adjusted Region-Referenced Generalized Functional Linear Model for EEG Data"
## by Scheffler et al. (2019).
## Inputs: see above
## Returns: list()
## rm: mgcv model fit (list)
## VCov: covariance matrix for model parameters (matrix)
## V: right singular vectors of design matrix (matrix)
## fit: fitted values returned from mgcv model (vector)
## betaHat: regression function (matrix)
## betaHati: regression function for interaction term (matrix)
## regMod: region-referenced smooth models (list)
## regMean: region-referenced smooths (list)
## m-GFLMi Outline:
## 0. Calculate summary variables and format data
## 1. Mean center subject-specific functional predictors
## 2. Construct design matrix
## 3. Construct penalty matrix
## 4. Fit m-GFLMi using mgcv
## 5. Construct regression functions
#############################################################################
# Install missing packages
list.of.packages <- c("mgcv", "splines", "data.table", "ppls", "pracma")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
# Load packages
library(data.table)
library(splines)
library(mgcv)
library(ppls)
library(pracma)
library(Matrix)
#############################################################################
# 0. Calculate summary variables and format data
#############################################################################
print("O. Calculate summary variables and format data...")
# Summary variables
lw <- length(w) # length of functional domain
by <- w[2] - w[1] # width of funcitonal grid points
n <- length(unique(X$ID)) # sample size
R <- length(unique(X$reg)) # total regions
ai <- unique(X[, c("ID", "covar")]) # covariate values by subject
ai[, 2] <- ai[order(ai[, 1]), 2] # order covariate values by subject
# Data formatting
y = y[order(unique(X$ID))]
X = data.table(X) # convert data matrix to data.table format
X = X[order(ID, reg, func)] # order by subject, region, frequency
# Weights for smoothing by group
wts <- rep(1, n)
if(dist == "binomial"){
if(weights == 1){
wts[y == as.numeric(names(table(y))[as.numeric(which.min(table(y)))])] <- max(1/(table(y)[1]/table(y)[2]), 1/(table(y)[2]/table(y)[1]))
}
}
wts <- rep(wts, each = lw)
#############################################################################
# 1. Mean center subject-specific functional predictors
#############################################################################
print("1. Mean center subject-specific functional predictors...")
regMean <- matrix(list(), nrow = R)
regMod <- matrix(list(), nrow = R)
newData <- data.frame(w)
colnames(newData) <- c("func")
for(r in 1:R){
regMod[[r]] <- gam(x ~ te(func, k = kbasis, bs = "ps", m = 2), data = X[which(X$reg == r), ], weights = wts)
X$x[which(X$reg == r)] <- X$x[which(X$reg == r)] - regMod[[r]]$fitted.values
regMean[[r]] <- matrix(predict(regMod[[r]], newdata = newData), nrow = lw)
}
#############################################################################
# 2. Construct design matrix
#############################################################################
print("2. Construct design matrix...")
# Form basis design matrix
B <- Basisw # form basis
# Form design matrix for each region
Xmat <- matrix(list(), nrow = R) # subject-specific functional predictors
Zc <- matrix(list(), nrow = R) # functional main effects design matrix
Zci <- matrix(list(), nrow = R) # functional main effects*covariate design matrix
for(r in 1:R){
Xmat[[r]] <- t(matrix(X$x[which(X$reg == r)], nrow = lw)) # form matrix with subject-specific functional predictor in each row
Zc[[r]] <- by * Xmat[[r]] %*% B # numerical integration of functional predictor and basis
Zci[[r]] <- by * Xmat[[r]] %*% B # numerical integration of functional predictor and basis (for interaction term)
for(i in 1:n){ # form interaction term between subject-specific functional predictor and age
Zci[[r]][i, ] <- Zci[[r]][i, ] * as.numeric(ai[i,2])
}
}
# Form an overall design matrix
Zc <- matrix(unlist(Zc), nrow = n)
Zci <- matrix(unlist(Zci), nrow = n)
Zc <- cbind(Zc, Zci) # concatenate main effects and interaction term
# Reduce dimension of design matrix via SVD
svdZc <- svd(Zc) # perform SVD
V <- svdZc$v[, cumsum(svdZc$d^2 / sum(svdZc$d^2)) < pvar] # extract right singular vectors
ZVc <- Zc %*% V # reduced dimension design matrix
modDat <- list(ZVc = ZVc, a = as.numeric(unlist(ai[,2]))) # store design matrix in list
#############################################################################
# 3. Construct penalty matrix
#############################################################################
print("3. Construct penalty matrix...")
PP <- matrix(list(), nrow = R) # main effect penalty matrix
PPi <- matrix(list(), nrow = R) # interaction term penalty matrix
PV <- matrix(list(), nrow = R) # main effect penalty matrix adjusted for SVD
PVi <- matrix(list(), nrow = R) # interaction term penalty matrix adjusted for SVD
db <- dim(Basisw)[2]
for(r in 1:R){
PP[[r]] <- matrix(0, nrow = db * R, ncol = db * R) # form penalty matrix
PP[[r]][(1 + db * (r - 1)):((db) + db * (r - 1)), (1 + db * (r - 1)):((db) + db * (r - 1))] <- Pw
PP[[r]] <- bdiag(PP[[r]], matrix(0, nrow = db * R, ncol = db * R))
PV[[r]] <- as.matrix(t(V) %*% PP[[r]] %*% V) # adjust penalty matrices to account for dimension reduction via SVD
PPi[[r]] <- matrix(0, nrow = db * R, ncol = db * R) # form penalty matrix
PPi[[r]][(1 + db * (r - 1)):((db) + db * (r - 1)), (1 + db * (r - 1)):((db) + db * (r - 1))] <- Pw
PPi[[r]] <- bdiag(matrix(0, nrow = db * R, ncol = db * R), PPi[[r]])
PVi[[r]] <- as.matrix(t(V) %*% PPi[[r]] %*% V) # adjust penalty matrices to account for dimension reduction via SVD
}
PP <- list(ZVc = c(PV, PVi)) # store penalty matrix in list
#############################################################################
# 4. Fit CARR-GFLM using mgcv
#############################################################################
print("4. Fit m-GFLMi using mgcv...")
rm <- gam(y ~ 1 + a + ZVc, data = modDat, paraPen = PP, family = dist, method = "REML")
#############################################################################
# 5. Construct the regression function
#############################################################################
print("5. Construct the regression function...")
coefs <- c(unlist(rm$coefficients))[-1] # extract model coefficients (excluding intercept)
Tbasis = Basisw # construct tensor basis
db <- dim(Basisw)[2]
betaHat <- matrix(NA, nrow = length(w), ncol = R) # organize regression function by regions
for(r in 1:R){
betaHat[, r] <- Tbasis %*% (V %*% coefs[-1])[(1 + db * (r - 1)):((db) + db * (r - 1))]
}
betaHati <- matrix(NA, nrow = length(w), ncol = R) # organize regression function by regions
for(r in 1:R){
betaHati[, r] <- Tbasis %*% (V %*% coefs[-1])[((1 + db * R) + db * (r - 1)):((db * R + db) + db * (r - 1))]
}
output <- list(rm, # m-GFLM model (list)
rm$fitted.values, # fitted values (vector)
betaHat, # main effect regression function (matrix)
betaHati, # interaction term regression function (matrix)
V, # right singular vectors explaining 95% of the total variation (matrix)
regMod, # region-referenced smooth models (list)
regMean # region-referenced smooths (list)
)
names(output) <- c("rm","fit", "betaHat", "betaHati", "V", "regMod", "regMean")
return(output)
}
|
b3ee61111d1be9cb63be40d8fd9b783852fc8c90
|
cd206a99a134a388c7ab086b2135f4a0f487f300
|
/runme.R
|
17715676cdf841f5f2328a59d61b7599c03507b0
|
[] |
no_license
|
jai-somai-lulla/diabetesPrediction
|
67163cd35a3fd02258cd042650873e5bee657afc
|
f75d8b578df18266c30fa5bc15093e97b6c639e0
|
refs/heads/master
| 2020-04-02T14:22:49.340145
| 2018-11-01T15:53:51
| 2018-11-01T15:53:51
| 154,522,028
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,498
|
r
|
runme.R
|
library("e1071")
args <- commandArgs(TRUE)
g1 <- as.double(args[1])
g2 <- as.double(args[2])
#print((g1+g2)[1])
if(!file.exists("naive.rda")) {
library(caret)
#NAIVE BAYES BLINDX 0.6510417
raw=read.csv("diabetes.csv")
#without cata 0.7292
raw$Outcome=sapply(raw$Outcome,as.factor)
for(i in 2:8){
(raw[which(raw[,i]==0),i]=NA)
}
raw$Outcome=sapply(raw$Outcome,as.factor)
bagged=preProcess(raw,method="bagImpute")
bagged=predict(bagged,raw)
raw=bagged
index=sample(nrow(raw),nrow(raw)*0.75)
train=raw[index,]
test=raw[-index,]
model=naiveBayes(Outcome~.,train)
pred=predict(model,test)
tx=table(test$Outcome,pred)
#print(confusionMatrix(tx))
save(tx, file = "naiveacc.rda")
save(model, file = "naive.rda")
}else{
load("naive.rda")
load("naiveacc.rda")
#print(tx)
#print("Load Successful")
}
if (length(args)==8) {
raw=read.csv("diabetes.csv")
x = as.data.frame(t(as.numeric(as.vector(args))))
names(x)=names(raw[,(1:8)])
ans=predict(model,x)
prob=predict(model,x,type="raw")
# if(ans==1){
# print(paste("Positive ",ans," <br />","Probablity Positive==>",prob[1,1]," <br /> Probablity Negative==>",prob[1,2],"<br />"))
# }else{
# print(paste("Negative ",ans,"<br />","Probablity Positive==>",prob[1,1]," <br /> Probablity Negative==>",prob[1,2],"<br />"))
# }
#print(paste("Probablity Positive==>",prob[1,1]," <br /> Probablity Negative==>",prob[1,2],"<br />"))
print(prob[1,1])
}else{
print("Insufficinct Input")
}
|
99fd31ba35af5d59819b164d85d27a01b0355c6f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Boom/examples/timeseries.boxplot.Rd.R
|
73fc8261ccb65e2958688d486b7bfbf529f3e956
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 375
|
r
|
timeseries.boxplot.Rd.R
|
library(Boom)
### Name: TimeSeriesBoxplot
### Title: Time Series Boxplots
### Aliases: TimeSeriesBoxplot
### Keywords: hplot
### ** Examples
x <- t(matrix(rnorm(1000 * 100, 1:100, 1:100), nrow=100))
## x has 1000 rows, and 100 columns. Column i is N(i, i^2) noise.
time <- as.Date("2010-01-01", format = "%Y-%m-%d") + (0:99 - 50)*7
TimeSeriesBoxplot(x, time)
|
03c0c6cf1474d09a80f72fb1e946954db36df36b
|
295b502d7e367edfa0ee4017f1a7b6a4135211d3
|
/R/ELMBJ.R
|
c6d4637249337b7db7336e41cbfc68394d46c524
|
[] |
no_license
|
whcsu/SurvELM
|
e5b09b504af20ad5e322c687504090cf19689cb9
|
c9297f6bd29ff3448e84d1420aeed1215e611ba9
|
refs/heads/master
| 2021-05-11T02:52:36.034952
| 2020-01-28T08:57:32
| 2020-01-28T08:57:32
| 117,897,717
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,725
|
r
|
ELMBJ.R
|
##' A Kernel Extreme Learning Machine Using the Buckley-James estimator
##' @title SurvELM ELMBJ
##' @param x The covariates(predictor variables) of training data.
##' @param y Survival time and censored status of training data. Must be a Surv \code{survival} object
##' @param Regularization_coefficient Ridge or Tikhonov regularization parameter. Default value for \code{\link{ELMBJEN}} is 10000. It need be set by the user here when using a single base ELM survival model. Also known as \eqn{C} in the ELM paper.
##' @param kerneltype Type of kernel matrix. kerneltype=1,a RBF kernel;kerneltype=2 , a linear kernel;kerneltype=3 ,a polynomial kernel;kerneltype=4, a sigmoid kernel.
##' @param Kernel_para Parameters for different types of kernels. A single value for kerneltype=1 or 2. A vector for kerneltype=3 or 4.
##' @return List of returned values
##' \tabular{ll}{
##' \code{trainMSE} \tab Mean Square Error(MSE) on training data. \cr
##' \code{newy} \tab Esitmated survival times of training data by the Buckley-James estimator. \cr
##' \code{outputWeight} \tab Weights of the output layer in ELM. \cr
##' }
##' @seealso \code{\link{ELMBJEN}}
##' @author Hong Wang
##' @references
##' \itemize{
##' \item Hong Wang et al (2017). A Survival Ensemble of Extreme Learning Machine. Applied Intelligence, DOI:10.1007/s10489-017-1063-4.
##' }
##' @examples
##' set.seed(123)
##' require(SurvELM)
##' require(survival)
##' #Lung DATA
##' data(lung)
##' lung=na.omit(lung)
##' lung[,3]=lung[,3]-1
##' n=dim(lung)[1]
##' L=sample(1:n,ceiling(n*0.5))
##' trset<-lung[L,]
##' teset<-lung[-L,]
##' rii=c(2,3)
##' #A kernel ELM base model
##' kerelmsurv=ELMBJ(trset[,-rii],Surv(trset[,rii[1]],trset[,rii[2]]))
##' #The traing MSE
##' tr_mse=kerelmsurv$trainMSE
##' #New survival times imputed for training data
##' y_impute=kerelmsurv$newy
##' @export
ELMBJ <- function(x,y, Regularization_coefficient, kerneltype=2,Kernel_para=c(2,1))
{
if(missing(Regularization_coefficient)) Regularization_coefficient=10000
ny <- ncol(y)
status <- y[, ny]
survtime = y[, 1L]
#imputey
newy = bjimpute(y = survtime, cen = status, x = x,inibeta = NULL)
c = Regularization_coefficient
omega_train = kernmat(x,kerneltype, Kernel_para,NULL)
#Calculate the mode coefficient beta
outputWeight = solve(omega_train + diag(rep(1, nrow(x)))/c) %*% newy
#Calculate the training output
ypre = omega_train %*% outputWeight
trainMSE = sqrt(sum((ypre - survtime)^2))
fit <- list()
fit$trainMSE=trainMSE
fit$newy=newy
fit$outputWeight=outputWeight
fit$trainx=x
fit$kerneltype=kerneltype
fit$Kernel_para=Kernel_para
class(fit) <- "ELMBJ"
fit
}
|
91bb16620a07e67b847111508c5e85f63f5c38c3
|
815debc2788802e65b9eaebc4f43471e69e3794d
|
/sdal19148496-report-dmml1 2/Bank/knn/bank_knn_sample_train.R
|
cb4d2e0ff031802d7a311fd2be99082c7b37de73
|
[] |
no_license
|
sobil-dalal/DMML1
|
9429f5f9221146f3eb8145e930f220dd1b749eca
|
4d2900f4c983f1530638a12fa3cbea255d8b1ad3
|
refs/heads/master
| 2023-03-14T06:03:40.688885
| 2021-02-26T02:42:45
| 2021-02-26T02:42:45
| 294,698,664
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,508
|
r
|
bank_knn_sample_train.R
|
library(caret)
# creating training and testing dataset from exisitng sample
indx <- createDataPartition(bank$y, p = 0.8, list = FALSE)
bank_train <- bank_n[indx,]
bank_test <- bank_n[- indx,]
# creating lables for test and training data sets
bank_train_labels <- bank[indx,21]
bank_test_labels <- bank[- indx,21]
# STEP 3 - TRAINING A MODEL ON THE DATA
# using the knn method of class package with K value equalent to square root of
# total train observation & odd number to eliminate tie vote issue for 2 factor classification i.e.
bank$y %>% length %>% sqrt %>% round # 203
bank_test_predict <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = 203)
# STEP 4 - EVALUATING MODEL PERFORMANCE
print("Model 1 : k = 203")
print(confusionMatrix(data = bank_test_predict,reference = bank_test_labels))
# Inbuilt ==> kappa = 0.4108 , Sensitivity = 0.32866
# Manual Normlization ==> kappa = 0.2307 , Sensitivity = 0.15409
# Only Numeric ==> kappa = 0.4382 , Sensitivity = 0.36853
# STEP 5 - IMPROVING MODEL PERFORMANCE
#Method 2
# by changing k values
# predict = 2 : k = 151
bank_test_predict2 <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = 151)
print("Model 2 : k = 151")
print(confusionMatrix(data = bank_test_predict2,reference = bank_test_labels))
# Inbuilt ==> kappa = 0.4307 , Sensitivity = 0.34698
# Manual Normlization ==> kappa = 0.2292 , Sensitivity = 0.15517
# Only Numeric ==> kappa = 0.4491 , Sensitivity = 0.38362
# predict = 3 : k = 101
bank_test_predict3 <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = 101)
print("Model 3 : k = 101")
print(confusionMatrix(data = bank_test_predict3,reference = bank_test_labels))
# Inbuilt ==> kappa = 0.4463 , Sensitivity = 0.36530
# Only Numeric ==> kappa = 0.4695 , Sensitivity = 0.41595
# predict = 4 : k = 75
bank_test_predict4 <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = 75)
print("Model 4 : k = 75")
print(confusionMatrix(data = bank_test_predict4,reference = bank_test_labels))
# Inbuilt ==> kappa = 0.4546 , Sensitivity = 0.37823
# Only Numeric ==> kappa = 0.4823 , Sensitivity = 0.43319
# predict = 5 : k = 51
bank_test_predict5 <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = 51)
print("Model 5 : k = 51")
print(confusionMatrix(data = bank_test_predict5,reference = bank_test_labels))
# Inbuilt ==> kappa = 0.4627 , Sensitivity = 0.39332
# Only Numeric ==> kappa = 0.4896 , Sensitivity = 0.44935
# predict = 6 : k = 31
bank_test_predict6 <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = 31)
print("Model 6 : k = 31")
print(confusionMatrix(data = bank_test_predict6,reference = bank_test_labels))
# Inbuilt ==> kappa = 0.4689 , Sensitivity = 0.40409
# Only Numeric ==> kappa = 0.5196 , Sensitivity = 0.48276
# predict = 7 : k = 21
bank_test_predict7 <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = 21)
print("Model 7 : k = 21")
print(confusionMatrix(data = bank_test_predict7,reference = bank_test_labels))
# Inbuilt ==> kappa = 0.4626 , Sensitivity = 0.41056
# Only Numeric ==> kappa = 0.5313 , Sensitivity = 0.50108
# predict = 8 : k = 11
bank_test_predict8 <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = 11)
print("Model 8 : k = 11")
print(confusionMatrix(data = bank_test_predict8,reference = bank_test_labels))
# Inbuilt ==> kappa = 0.4561 , Sensitivity = 0.41918
# Manual Normlization ==> kappa = 0.2977 , Sensitivity = 0.23384
# Only Numeric ==> kappa = 0.5049 , Sensitivity = 0.49461
print("Model n : k = 9 to 1 (only odd numbers)")
i <- 9
while (i > 0) {
bank_test_predict_n <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = i)
print(i)
print(confusionMatrix(data = bank_test_predict_n,reference = bank_test_labels))
i <- i - 2
}
#Best among above
# Inbuilt ==> kappa = 0.4744 , Sensitivity = 0.44289, Accuracy = 0.908 for K = 9 =============> BEST FOR INBUILT Normalization (scale)
# Manual Normlization ==> kappa = 0.3052 , Sensitivity = 0.28125, Accuracy = 0.8879 for k = 3 =============> BEST FOR Manual Normalization
# Only Numeric ==> kappa = 0.5068 , Sensitivity = 0.51509 for k = 5
print("Model n : k = 19 to 13 (only odd numbers)")
i <- 19
while (i > 11) {
bank_test_predict_n <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = i)
print(i)
print(confusionMatrix(data = bank_test_predict_n,reference = bank_test_labels))
i <- i - 2
}
#Best among above
# Inbuilt ==> kappa = 0.4665 , Sensitivity = 0.42672 for K = 13
# Only Numeric ==> kappa = 0.532 , Sensitivity = 0.50539 for k = 19 =============> BEST FOR ONLY NUMERIC
print("Model n : k = 29 to 23 (only odd numbers)")
i <- 29
while (i > 21) {
bank_test_predict_n <- class::knn(train = bank_train, test = bank_test, cl = bank_train_labels, k = i)
print(i)
print(confusionMatrix(data = bank_test_predict_n,reference = bank_test_labels))
i <- i - 2
}
#Best among above
# Only Numeric ==> kappa = 0.5255 , Sensitivity = 0.49892 for k = 23
# BEST OF ALL - Only Numeric for K = 19
"
[1] 19
Confusion Matrix and Statistics
Reference
Prediction Yes No
Yes 469 226
No 459 7083
Accuracy : 0.9168
95% CI : (0.9107, 0.9227)
No Information Rate : 0.8873
P-Value [Acc > NIR] : < 2.2e-16
Kappa : 0.5329
Mcnemar's Test P-Value : < 2.2e-16
Sensitivity : 0.50539
Specificity : 0.96908
Pos Pred Value : 0.67482
Neg Pred Value : 0.93914
Prevalence : 0.11266
Detection Rate : 0.05694
Detection Prevalence : 0.08438
Balanced Accuracy : 0.73723
'Positive' Class : Yes
"
|
0ae46438dbd43b330036d44294e51730270d3af4
|
2ec442671c9de078bb2ea441b7905caa875883b6
|
/man/chk4aiii_missing_pct.Rd
|
4eb1c97d0c13ceaabd87feb09212f665355758a6
|
[
"MIT"
] |
permissive
|
axmedmaxamuud/HighFrequencyChecks
|
ae39427ace2880bae98001b5f955a95c10a9e3f4
|
d84d7e80cb0b6805330a3d21fa6ff360b62530c7
|
refs/heads/master
| 2021-05-17T23:35:38.027268
| 2019-05-14T11:36:48
| 2019-05-14T11:36:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 876
|
rd
|
chk4aiii_missing_pct.Rd
|
\name{chk4aiii_missing_pct}
\alias{chk4aiii_missing_pct}
\title{
Report the percentage of missing values (NA) per fields
}
\description{
This function provide a report showing the percentage of missing values (NA) for each fields.
This report can be global (all the surveys) or displayed for each enumerator ID
}
\usage{
chk4aiii_missing_pct(ds,
enumeratorID,
enumeratorcheck)
}
\arguments{
\item{ds}{
dataset as a data.frame object
}
\item{enumeratorID}{
name as a string of the field in the dataset where the enumerator ID is stored
}
\item{enumeratorcheck}{
specify if the report has to be displayed for each enumerator or not as a boolean (TRUE/FALSE)
}
}
\value{
\item{logf }{the report}
}
\author{
Yannick Pascaud
}
\examples{
df<-sample_dataset
eid<-"enumerator_id"
ec<-FALSE
chk4aiii_missing_pct(df, eid, ec)
}
|
615f5b58f033504df2a5f88e9cef80427cb4ca01
|
18beba89bd528840d3aab7a171fa671c5ac0cf3a
|
/man/Download_DNAmethylation.Rd
|
2f7f7ebf1bb93ba323bd4e3707e330276d1b09a8
|
[] |
no_license
|
mpru/BIMEGA
|
8d748401ad29f252c9c87b6ec04bca2d185d9a62
|
6b445dc7581a2b78aae559b34c382a2f74d1391f
|
refs/heads/master
| 2021-01-22T17:33:56.718268
| 2016-06-19T04:21:29
| 2016-06-19T04:21:29
| 61,449,411
| 0
| 0
| null | 2016-06-18T20:55:01
| 2016-06-18T19:34:43
|
R
|
UTF-8
|
R
| false
| true
| 1,689
|
rd
|
Download_DNAmethylation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Download_Preprocess.R
\name{Download_DNAmethylation}
\alias{Download_DNAmethylation}
\title{The Download_DNAmethylation function}
\usage{
Download_DNAmethylation(CancerSite, TargetDirectory, downloadData = TRUE)
}
\arguments{
\item{CancerSite}{character of length 1 with TCGA cancer code.}
\item{TargetDirectory}{character with directory where a folder for downloaded files will be created.}
\item{downloadData}{logical indicating if data should be downloaded (default: TRUE). If false, the url of the desired data is returned.}
}
\value{
list with paths to downloaded files for both 27k and 450k methylation data.
}
\description{
Downloads DNA methylation data from TCGA.
}
\examples{
\dontrun{
# Optional register cluster to run in parallel
library(doParallel)
cl <- makeCluster(5)
registerDoParallel(cl)
# Methylation data for ovarian cancer
cancerSite <- "OV"
targetDirectory <- paste0(getwd(), "/")
# Downloading methylation data
METdirectories <- Download_DNAmethylation(cancerSite, targetDirectory, TRUE)
# Processing methylation data
METProcessedData <- Preprocess_DNAmethylation(cancerSite, METdirectories)
# Saving methylation processed data
saveRDS(METProcessedData, file = paste0(targetDirectory, "MET_", cancerSite, "_Processed.rds"))
# Clustering methylation data
res <- ClusterProbes(METProcessedData[[1]], METProcessedData[[2]])
# Saving methylation clustered data
toSave <- list(METcancer = res[[1]], METnormal = res[[2]], ProbeMapping = res$ProbeMapping)
saveRDS(toSave, file = paste0(targetDirectory, "MET_", cancerSite, "_Clustered.rds"))
stopCluster(cl)
}
}
\keyword{download}
|
86147312fc62a69efd743a0eb463a267798bab54
|
9b2aa890ed7f5d87af800c61d7ec5ab8a07ffb66
|
/numeros/perfecto.R
|
7b0ad062158a72cda1dcf0c068ca00dd2aae1e9a
|
[] |
no_license
|
jjdeharo/General
|
65b1456a5ef849d50a86318d825ac81bdf497a8e
|
99a7b4616f029c1b1d14427b1dd0ba5fcf785457
|
refs/heads/master
| 2021-01-23T08:39:44.975976
| 2015-04-11T11:07:16
| 2015-04-11T11:07:16
| 32,942,956
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
perfecto.R
|
# Halla n números perfectos
# Un número perfecto es un número natural que es igual a la suma de sus divisores propios positivos, sin incluirse él mismo
perfecto <- function(n=4) {
i <- 0
a <- 5
amigos <- vector()
while (i < n) {
a <- a + 1
if(a == tieneamigo(a)) {
i <- i + 1
amigos[i] <- a
cat("\rFaltan:",n-i,"\r")
}
}
return(amigos)
}
tieneamigo <- function(a) {
b <- sum(divisores(a, p=T))
if (sum(divisores(b,p=T)) == a) {
return(b)
} else {
return(F)
}
}
divisores <- function(n, propios=F) {
if(n == 1) return(1)
if( n < 0) {
n <- -n
}
if(propios) {
v <- 1:(n-1)
} else {
v <- 1:n
}
d <- n %% v
num <- v[d == 0]
return(num)
}
|
c40f60e285966a19adc8805a9ce26861ffcb964e
|
6cc2ba52d7fc77cb9c105397d85b32b8ca90e00a
|
/Tecan/auth/google_auth_functions.R
|
9a55909a8aacfcb452cda45190361f0b549b8375
|
[] |
no_license
|
Ploulack/HB
|
dd8abea825a1fc653d14062ab8481d3ed9eaca1e
|
9f8fb6fcbdad2b341bcd39bd9256d5ed5e2ab4b2
|
refs/heads/master
| 2021-09-15T09:04:23.999913
| 2018-04-06T16:15:44
| 2018-04-06T16:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,320
|
r
|
google_auth_functions.R
|
get_token <- function (auth_code, client_id,
client_secret,
redirect_uri,
scope_list)
{
my_drive_app <- httr::oauth_app("google",
key = client_id,
secret = client_secret)
req <- httr::POST("https://accounts.google.com/o/oauth2/token",
body = list(code = auth_code, client_id = client_id,
client_secret = client_secret, redirect_uri = redirect_uri,
grant_type = "authorization_code"), verbose = TRUE)
stop_for_content_type(req, "application/json; charset=utf-8")
token <- httr::content(req, type = "application/json")
token_formatted <- httr::Token2.0$new(app = my_drive_app,
endpoint = httr::oauth_endpoints("google"),
credentials = list(access_token = token$access_token,
token_type = token$token_type, expires_in = token$expires_in,
refresh_token = token$refresh_token),
params = list(scope = scope_list,
type = NULL, use_oob = FALSE, as_header = TRUE),
cache_path = FALSE)
token_formatted
}
stop_for_content_type <- function(req, expected) {
actual <- req$headers$`Content-Type`
if (actual != expected) {
stop(
sprintf(
paste0("Expected content-type:\n%s",
"\n",
"Actual content-type:\n%s"),
expected, actual
)
)
}
invisible(NULL)
}
shiny_get_url <- function(session){
if(!is.null(session)){
pathname <- session$clientData$url_pathname
hostname <- session$clientData$url_hostname
port <- session$clientData$url_port
url <- paste0(session$clientData$url_protocol,
"//",
hostname,
if(port != "") paste0(":", port),
if(pathname != "/") pathname)
url
} else {
NULL
}
}
|
78e2ca80e9cc284ec379aaed560dfcad7c57ec20
|
094905a6ed952725fdb49115e33d529811f0be74
|
/inst/scripts/SampleAnnotationExample55Data.R
|
c609024990beed2b1ef5005534201fb0596c4cae
|
[
"MIT"
] |
permissive
|
isglobal-brge/methylclock
|
48122d60cd68e16b13f2606f291cf80d3524ef6e
|
6a1a333889db9e3d2c10adbea362fa2bb3b82f41
|
refs/heads/master
| 2023-06-25T08:19:29.515722
| 2022-07-11T14:11:23
| 2022-07-11T14:11:23
| 175,777,273
| 29
| 18
|
MIT
| 2021-03-23T09:15:45
| 2019-03-15T08:15:22
|
C++
|
UTF-8
|
R
| false
| false
| 704
|
r
|
SampleAnnotationExample55Data.R
|
# Data was obtained from
#
# https://horvath.genetics.ucla.edu/html/dnamage/
#
# and refers to :
#
# Horvath S (2013) DNA methylation age of human tissues and cell types.
# Genome Biology.2013, 14:R115.
# DOI: 10.1186/10.1186/gb-2013-14-10-r115 PMID: 24138928
#
# Gibbs, W. Biomarkers and ageing: The clock-watcher.
# Nature 508, 168–170 (2014).
# https://doi.org/10.1038/508168a
#
# File contains an example data based on the Illumina DNA Infinium platform
# and can be downloaded :
MethylationDataExample55 <-
read.csv("https://horvath.genetics.ucla.edu/html/dnamage/MethylationDataExample55.csv")
|
a433a787c61b650b91d057fafa4a86b5c19060f1
|
e40e988267966490d147c73824cc5f44e4878c41
|
/R/cor_tests.R
|
7b222983d2739489635d1dfd2da3792f1d03003e
|
[] |
no_license
|
MiRoVaGo/P_plus_E
|
5061f378d831c0a3d9a7b80fca98e866a4629126
|
5341281e1b5b43ce623384085a8f7c6e04bc3727
|
refs/heads/main
| 2023-04-07T17:58:49.937304
| 2022-11-07T12:40:13
| 2022-11-07T12:40:13
| 410,815,948
| 1
| 0
| null | 2022-03-15T09:09:34
| 2021-09-27T09:10:31
|
R
|
UTF-8
|
R
| false
| false
| 2,993
|
r
|
cor_tests.R
|
#Required Libraries
library(dplyr)
library(data.table)
library(readr)
#Load Data
global_20cr <- read_csv('./../data/20cr.csv') %>% as.data.table() %>%
.[, PpE := P + E]
global_era20 <- read_csv('./../data/era20.csv') %>% as.data.table() %>%
.[, PpE := P + E]
global_era5 <- read_csv('./../data/era5.csv') %>% as.data.table() %>%
.[, PpE := P + E]
global_ncep <- read_csv('./../data/ncep.csv') %>% as.data.table() %>%
.[, PpE := P + E]
mean_20cr_T <- mean(global_20cr[Year > 1980 & Year < 2011, T])
mean_era20_T <- mean(global_era20[Year > 1980 & Year < 2011, T])
mean_era5_T <- mean(global_era5[Year > 1980 & Year < 2011, T])
mean_ncep_T <- mean(global_ncep[Year > 1980 & Year < 2011, T])
mean_20cr_P <- mean(global_20cr[Year > 1980 & Year < 2011, P])
mean_era20_P <- mean(global_era20[Year > 1980 & Year < 2011, P])
mean_era5_P <- mean(global_era5[Year > 1980 & Year < 2011, P])
mean_ncep_P <- mean(global_ncep[Year > 1980 & Year < 2011, P])
mean_20cr_E <- mean(global_20cr[Year > 1980 & Year < 2011, E])
mean_era20_E <- mean(global_era20[Year > 1980 & Year < 2011, E])
mean_era5_E <- mean(global_era5[Year > 1980 & Year < 2011, E])
mean_ncep_E <- mean(global_ncep[Year > 1980 & Year < 2011, E])
mean_20cr_PpE <- mean(global_20cr[Year > 1980 & Year < 2011, PpE])
mean_era20_PpE <- mean(global_era20[Year > 1980 & Year < 2011, PpE])
mean_era5_PpE <- mean(global_era5[Year > 1980 & Year < 2011, PpE])
mean_ncep_PpE <- mean(global_ncep[Year > 1980 & Year < 2011, PpE])
global_20cr <- global_20cr[, PpE := 100*(PpE - mean_20cr_PpE)/mean_20cr_PpE
][, P := 100*(P - mean_20cr_P)/mean_20cr_P
][, E := 100*(E - mean_20cr_E)/mean_20cr_E
][, T := T - mean_20cr_T]
global_era20 <- global_era20[, PpE := 100*(PpE - mean_era20_PpE)/mean_era20_PpE
][, P := 100*(P - mean_era20_P)/mean_era20_P
][, E := 100*(E - mean_era20_E)/mean_era20_E
][, T := T - mean_era20_T]
global_era5 <- global_era5[, PpE := 100*(PpE - mean_era5_PpE)/mean_era5_PpE
][, P := 100*(P - mean_era5_P)/mean_era5_P
][, E := 100*(E - mean_era5_E)/mean_era5_E
][, T := T - mean_era5_T]
global_ncep <- global_ncep[, PpE := 100*(PpE - mean_ncep_PpE)/mean_ncep_PpE
][, P := 100*(P - mean_ncep_P)/mean_ncep_P
][, E := 100*(E - mean_ncep_E)/mean_ncep_E
][, T := T - mean_ncep_T]
summary(lm(PpE ~ T, global_20cr))
summary(lm(PpE ~ T, global_era20))
summary(lm(PpE ~ T, global_era5))
summary(lm(PpE ~ T, global_ncep))
summary(lm(P ~ T, global_20cr))
summary(lm(P ~ T, global_era20))
summary(lm(P ~ T, global_era5))
summary(lm(P ~ T, global_ncep))
summary(lm(E ~ T, global_20cr))
summary(lm(E ~ T, global_era20))
summary(lm(E ~ T, global_era5))
summary(lm(E ~ T, global_ncep))
|
7eea755fa991b9b155e67a95110b53a9c7fb108e
|
57fd5e509ed5d9204d76789e9b6cfee18dcb52ab
|
/simulate_tournament.R
|
21f977ebc9b31f542ad8f20e24c48c55456ed850
|
[] |
no_license
|
drewlanenga/jackboot-firebase
|
8495d81db0d529bfe96ef639f73aa3cfbb21a1fc
|
28992680e18c92a2e6978b093fc31672766c75df
|
refs/heads/master
| 2021-01-02T23:07:18.759552
| 2014-02-04T17:52:50
| 2014-02-04T17:52:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,196
|
r
|
simulate_tournament.R
|
source('load.R')
#
# functions to help with the 'simulation'
#
simulate.from.lm <- function(n.sim, x.bar, std.error, df)
{
(rt(n.sim, df) * std.error) + x.bar
}
predict.game <- function(team.coefficients, opp_or, opp_dr, loc, n.sim = 10000)
{
sim <- list()
sim[['intercept']] <- simulate.from.lm(n.sim, team.coefficients$int.x, team.coefficients$int.se, team.coefficients$df)
sim[['opp_or']] <- simulate.from.lm(n.sim, team.coefficients$opp_or.x, team.coefficients$opp_or.se, team.coefficients$df)
sim[['opp_dr']] <- simulate.from.lm(n.sim, team.coefficients$opp_dr.x, team.coefficients$opp_dr.se, team.coefficients$df)
if( !is.na(team.coefficients[[ paste(loc, '.x', sep = '') ]] ) )
{
sim[['location']] <- simulate.from.lm( n.sim, team.coefficients[[ paste(loc, '.x', sep = '') ]], team.coefficients[[ paste(loc, '.se', sep = '') ]], team.coefficients$df )
}
else
{
sim[['location']] <- 0
}
points <- sim[['intercept']] + (opp_or * sim[['opp_or']]) + (opp_dr * sim[['opp_dr']]) + sim[['location']]
return(points)
}
#
#
# find the probability that team1 beats team2 for each entry in the sample submission
#
#
submission <- sample.submission
for(i in 1:nrow(sample.submission))
{
print(i)
ids <- strsplit(sample.submission$id[i], '_')
season.letter <- ids[[1]][1]
team.ids <- as.numeric(ids[[1]][2:3])
ratings.team1 <- ratings.spread[ ratings.spread$season == season.letter & ratings.spread$teamid == team.ids[1], ]
ratings.team2 <- ratings.spread[ ratings.spread$season == season.letter & ratings.spread$teamid == team.ids[1], ]
coefficients.team1 <- coefficients[ coefficients$season == season.letter & coefficients$teamid == team.ids[2], ]
coefficients.team2 <- coefficients[ coefficients$season == season.letter & coefficients$teamid == team.ids[2], ]
#
# team1
#
team1 <- predict.game(coefficients.team1, ratings.team2$off, ratings.team2$def, 'homeN', n.sim)
#
# team2
#
team2 <- predict.game(team.coefficients, ratings.team2$off, ratings.team2$def, 'homeN', n.sim)
# add to output matrix
submission[i, 2] <- mean(team1 > team2)
}
write.csv(submission, "data/output/submission.csv", row.names = FALSE, col.names = TRUE)
|
e9ab2cdd8bf8b003e485aecfcd0ce7f3a48636ca
|
2275a7fa3595c07ad52f6a92e535ec4491c95ec4
|
/rcodes/3ldgn_compara_med.r
|
87cbb65097b60e4076921270f3d27f8cf1fa6505
|
[] |
no_license
|
AlfonsBC/Statistical-methods
|
777b021ecf0af50999b8d614eaf3c801abec71d2
|
c0c5676cb26587c0210527c945db98bac2bbf224
|
refs/heads/main
| 2023-03-22T02:50:14.758038
| 2021-03-17T03:47:38
| 2021-03-17T03:47:38
| 345,008,708
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,293
|
r
|
3ldgn_compara_med.r
|
# Definimos un directorio de trabajo
#setwd("/Volumes/GoogleDrive/Mi unidad/MET_ESTAD/7DATOS")
# librerias necesarias
library("data.table")
library(ggplot2)
# Los datos se descargan de
# https://datos.cdmx.gob.mx/dataset/base-covid-sinave
# aquí también se descarga el diccionario de variables
####################
# Leemos los datos #
####################
tt <- system.time(BD <- as.data.frame(fread("C:/Users/pc/Desktop/Data Science IIMAS/6th Semester/6.- ME/5. Datos/Pandemia/sisver_public.csv",
showProgress = TRUE,
sep = ",", header = TRUE,
na.strings= "")))[3]
tt/60
# Tamaño
print(object.size(BD),units="Gb")
# Filtro
BD1 <- BD[BD$entresi == "CIUDAD DE MEXICO" & BD$resdefin == "SARS-CoV-2", ]
# FALLECIMIENTOS Y HOSPITALIZACIONES
BD1$DEF <- 1*!is.na(BD1$fecdef)
BD1$HOSP <- 1*(BD1$tipacien == "HOSPITALIZADO")
tb1 <- table(BD1$DEF)
tb2 <- table(BD1$HOSP)
round(100*tb1/sum(tb1), 1)
round(100*tb2/sum(tb2), 1)
################
# LDGN - media #
################
alfa <- 0.05
k <- seq(0.05, 3, length.out = 20)
n <- round(1/(alfa*(k^2)), 0)
ss <- data.frame(k = k, n = n)
plot(ss, type = "l")
j <- 3
mu <- mean(BD1$edad, na.rm = TRUE)
nn <- ss[j,2]
m <- 10000
x_bar <- rep(NA, m)
for (t in 1:m) {
x_bar[t] <- mean(sample(BD1$edad, nn, replace = TRUE), na.rm = TRUE)
}
ss
sigma <- sd(BD1$edad, na.rm = TRUE)
kk <- ss[j,1]
# Se cumple lo que dice la LDGN ?
sum(abs(x_bar - mu) < kk*sigma)/m >= 0.95
#
# ¡LA LDGN nos da una cota muy conservadora!
#
#####################
# LDGN - proporción #
#####################
alfa <- 0.05
eps <- seq(0.05, 0.5, length.out = 20)
n <- round(0.25/(alfa*(eps^2)), 0)
ss <- data.frame(eps = eps, n = n)
plot(ss, type = "l", lwd = 2)
j <- 2
p <- mean(BD1$DEF)
nn <- ss[j,2]
m <- 10000
p_hat <- rep(NA, m)
for (t in 1:m) {
p_hat[t] <- mean(sample(BD1$DEF, nn, replace = TRUE), na.rm = TRUE)
}
ss
sigma <- sd(BD1$DEF, na.rm = TRUE)
kk <- ss[j,1]
# Se cumple lo que dice la LDGN ?
sum(abs(p_hat - p) < kk*sigma)/m >= 0.95
#
# ¡LA LDGN nos da una cota muy conservadora!
#
#########################
# COMPARACION DE MEDIAS #
#########################
# EDAD DE LOS FALLECIMIENTOS
bar_w <- tapply(BD1$edad, BD1$DEF, mean, na.rm = TRUE)
s_w <- tapply(BD1$edad, BD1$DEF, sd, na.rm = TRUE)
n_w <- tapply(BD1$edad, BD1$DEF, length)
alfa <- 0.05
kk <- 1/sqrt(n_w*alfa)
lim_inf <- bar_w - kk*s_w
lim_sup <- bar_w + kk*s_w
data.frame(mu = bar_w, lim_inf = lim_inf, lim_sup = lim_sup,
row.names = c("NO FALLECEN", "FALLECEN"))
# PORCENTAJE DE HOSPITALIZADOS X ALCALDIA
p_hat <- 100*tapply(BD1$HOSP, BD1$mpioresi, mean, na.rm = TRUE)
s_p <- 100*tapply(BD1$HOSP, BD1$mpioresi, sd, na.rm = TRUE)
n_p <- tapply(BD1$HOSP, BD1$mpioresi, length)
kk <- 1/sqrt(n_p*alfa)
res <- data.frame(alcaldia = substr(names(p_hat), 1, 5),
p_hat = as.numeric(p_hat),
lim_inf = as.numeric(p_hat - kk*s_p),
lim_sup = as.numeric(p_hat + kk*s_p),
stringsAsFactors = FALSE)
ggplot(res, aes(x = alcaldia, y = p_hat, group = alcaldia)) +
geom_point(size = 2) +
geom_errorbar(aes(ymin = lim_inf, ymax = lim_sup)) +
theme_bw()
|
bfd3b1314b324788e4246af3a512eaf0cee5b138
|
5bb106daabc909357fb7fdf3940a755d635a81ee
|
/data-raw/fluff.R
|
1dec0b058284f19040cb9ee633a04c26b8423d67
|
[] |
no_license
|
alketh/codeword
|
b7abcead4c9c3f67d89b244603b9d4f3b7485659
|
bb42607c17c5d80e757f49b2b5a5da5cacd89e6b
|
refs/heads/master
| 2021-05-09T11:02:35.902491
| 2018-01-26T01:33:32
| 2018-01-26T01:33:32
| 118,981,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 113
|
r
|
fluff.R
|
xxx <- readLines("data-raw/agg-data.R")
dummy_script <- xxx
save(dummy_script, file = "data/dummy_script.rda")
|
19724e030bae5a828319acd5cf07631556739a45
|
8eaf931982f7e38b1a5fd934f2da31383edb459f
|
/global.R
|
eda23ed2fdceb9adbacb176d84ea92e3f56f660a
|
[
"MIT"
] |
permissive
|
alexdum/roclib
|
a1b0356ab43b287ffcbba3c9a518f9f4f229c3c9
|
e82696ce5e6437eebebc5e885ecdb82fa2c79edd
|
refs/heads/main
| 2023-08-29T17:13:57.795073
| 2021-11-15T20:55:28
| 2021-11-15T20:55:28
| 369,631,626
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,339
|
r
|
global.R
|
suppressPackageStartupMessages({
library(leaflet)
library(shinythemes)
library(shinyjs)
library(shiny)
library(shinydashboard)
library(dplyr)
library(ggplot2)
library(ggridges)
library(viridis)
library(plotly)
library(rgdal, quietly = T)
library(sf)
library(RColorBrewer)
library(raster)
library(png)
library(shinyWidgets)
library(shinycssloaders)
library(ggspatial)
library(markdown)
library(ncdf4)
})
source("utils/utils.R")
source("utils/grapsh_funct.R")
source("utils/calc_func.R")
source("utils/map_func.R")
#date1 <- readRDS("www/data/tabs/season+anual_mean_models.rds")
### calcul anual din sezoniere
#date11 <- date1%>% group_by(Lon,Lat,model,scen,param)%>% summarise(value= ifelse(param=="precAdjust",sum(value),mean(value)))%>%
# mutate(season = "anual")
#res <-bind_rows(date1, date11)
#saveRDS(res,"www/data/tabs/season+anual_mean_models.rds")
#
# brks.p<-c(50,100.0,150.0,200.0,250.0,300.0,350.0,400.0,450.0,500.0,550,600)
# cols.p <- c("#ffffd9",brewer.pal(9,"GnBu"), "#023858","#081d58","#810f7c","#88419d")
# brks<-c(-6.0,-4.0,-2.0,0.0,2.0,4.0,6.0,8.0,10.0,12.0,14.0,16.0,18.0,20.0,22.0,24.0,26,28.0,30.0,32.0,34.0,36.0)
#cols <- c("#8c6bb1","#9ecae1","#deebf7","#ffffe5",brewer.pal(9,"YlOrRd"),rev(brewer.pal(9,"PuRd")))
## pentru judete limita a se folosit doar o data dupa se pune in comentariu.
# Options for Spinner
options(spinner.color = "#0275D8", spinner.color.background = "#ffffff", spinner.size = 2)
judete <- read_sf("www/data/shp/counties.shp") %>% st_transform(4326)
ctrs <- read_sf("www/data/shp/countries.shp")
sea <- read_sf("www/data/shp/sea.shp")
logo <- readPNG("www/png/sigla_anm.png")
# Citeste datele pentru explore in Deails
shape_uat <- readRDS(file = "www/data/shp/uat_ro.rds")
shape_region <- readRDS(file = "www/data/shp/region_ro.rds")
shape_county <- readRDS(file = "www/data/shp/county_ro.rds")
start_county <- readRDS("www/data/tabs/anomalies/variables/county_anomalies_annual_prAdjust_rcp45_1971_2100.rds")
# pentru map de start la leafletProxy
start_county <- shape_county %>% right_join(start_county$changes, by = c("code" = "name"))
start_county$values <- start_county$mean_2021_2050
# next click on polygons for graphs
# https://community.rstudio.com/t/shiny-leaflet-link-map-polygons-to-reactive-plotly-graphs/40527/2
|
7db9d021432539ae3b0352228cbcdfddde707607
|
bfce76dad46a2b28a1eb2ae3622c62becd3e5d52
|
/NapaPest toxEval Data Prep Code.R
|
94c1c354bb311f25b26e1bab721f3a53e64cb337
|
[] |
no_license
|
jadeealy14/FRI_R_Work
|
467bee3bdbe97783852139f485591ccbf5f620db
|
27a31da4f92bca4a5922a1bf738cd5abb29e3e61
|
refs/heads/main
| 2023-03-29T00:29:24.423120
| 2021-04-02T04:20:30
| 2021-04-02T04:20:30
| 342,083,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,169
|
r
|
NapaPest toxEval Data Prep Code.R
|
NapaResultsCAS <- merge(NapaResultsCAS, NapaPestSites, by= "SITE_NO", no.dups = TRUE)
##------ toxEval data file -------
data <- NapaResultsCAS[, colnames(NapaResultsCAS) == "SITE_NO" | colnames(NapaResultsCAS) == "SAMPLE_START_DT" | colnames(NapaResultsCAS) == "CAS.Number" | colnames(NapaResultsCAS) == "RESULT_VA" ]
data <- data[,c("SITE_NO", "SAMPLE_START_DT", "CAS.Number", "RESULT_VA")]
colnames(data) <- c("SiteID", "Sample Date", "CAS", "Value")
data$SiteID <- as.character(data$SiteID)
str(data$SiteID)
data$SiteID <- paste0('T', data$SiteID)
View(data)
##----- toxEval chemical file ------
chemicals <- NapaResultsCAS[, colnames(NapaResultsCAS) == "PARM_SEQ_GRP_NM" | colnames(NapaResultsCAS) == "CAS.Number"]
colnames(chemicals) <- c("Class", "CAS")
View(chemicals)
##----- toxEval sites file ----
sites <- NapaResultsCAS[, colnames(NapaResultsCAS) == "SITE_NO" | colnames(NapaResultsCAS) == "DEC_LONG_VA"| colnames(NapaResultsCAS) == "DEC_LAT_VA" | colnames(NapaResultsCAS) == "RSQA_STUDY.x"]
sites <- sites[, c(1,1,2,3,4)]
colnames(sites) <- c("SiteID", "Short Name", "site_grouping", "dec_lat", "dec_lon")
#sites$SITEID <- NULL / accidentally added a column to my dataframe, and this is the code to delete columns from dataframes
#Need to figure out how to put "T" in front of the SITEID and Short Name before I convert to excel
sites$SiteID <- as.character(sites$SiteID) #first have to convert from numeric --> character
str(sites$SiteID) #double check that it converted to character correctly
#sites$SiteID <- paste0('T', sites$SiteID) #code to add T into "SiteID"
##this step is actually unnecessary, but it's fine to add if desired
##however, "as.character()" alone was sufficient for changing siteID from being rude as numeric --> charaacter
View(sites) #reload sites to view "T"
#always reload to see new, recent changes made
sites$`Short Name` <- as.character(sites$`Short Name`)
str(sites$`Short Name`)
#it seemed to work, so now I just need to redownload this file and change my mastersheet
##----- convering above files into excel sheet -----
write.csv(data, "NapaToxEval_data.csv")
write.csv(chemicals, "NapaToxEval_chemicals.csv")
write.csv(sites, "NapaToxEval_sites.csv")
##----- load data -----
library(toxEval)
setwd("/Users/jadeealy/R work/toxEval sheets/")
library(readxl)
read_excel("NapaToxEvalData+Chemicals+Sites.xlsx")
path_to_tox <- system.file("extdata", package="toxEval")
tox_list <- create_toxEval("NapaToxEvalData+Chemicals+Sites.xlsx")
getwd()
##---- next step after loading data -------
ACC <- get_ACC(tox_list$chem_info$CAS)
ACC <- remove_flags(ACC)
cleaned_ep <- clean_endPoint_info(end_point_info)
filtered_ep <- filter_groups(cleaned_ep,
groupCol = "intended_target_family",
assays = c("ATG","BSK", "NVS", "OT", "TOX21",
"CEETOX", "APR", "CLD", "TANGUAY",
"NHEERL_PADILLA","NCCT_SIMMONS", "ACEA"),
remove_groups = c("Background Measurement",
"Undefined"))
chemical_summary <- get_chemical_summary(tox_list, ACC, filtered_ep)
plot_tox_boxplots(chemical_summary, "Biological")
plot_tox_stacks(chemical_summary,
chem_site = tox_list$chem_site,
category = "Biological")
plot_tox_endpoints(chemical_summary, top_num = 10,
category = "Biological",
filterBy = "Cell Cycle")
make_tox_map(chemical_summary,
chem_site = tox_list$chem_site,
category = "Biological")
plot_tox_heatmap(chemical_summary,
chem_site = tox_list$chem_site,
category = "Biological")
str(data)
#'data.frame': 80 obs. of 4 variables:
# $ SiteID : chr "T11456500" "T11456500" "T11456500" "T11456500" ...
#$ Sample Date: chr "4/18/2017 3:50:00 PM" "5/2/2017 4:10:00 PM" "4/11/2017 5:20:00 PM" "4/25/2017 8:30:00 PM" ...
#$ CAS : chr "138261-41-3" "138261-41-3" "1071-83-6" "138261-41-3" ...
#$ Value : num 16 16 0.02 16 0.02 0.02 7 16 7 0.02 ...
|
21f443562cf00274b950fee91b8cf5f4ea01ef89
|
1ab3fe36ec133cb90fcfc4071c15b37edc1d1c79
|
/Seccion 11 - Conexiones por doquier - Análisis de Redes Sociales/155 - Las matrices de adyacencia y listas de aristas de un grafo.R
|
ef0b83900e3a947ec17ae9ed2b6acacde7a6b14f
|
[] |
no_license
|
achiola/r
|
419d182bd6ec546af4ef0dc10b7b59678ada561b
|
08a5c2b78b58193d7fdbbf0fa612c52ec21df925
|
refs/heads/master
| 2020-07-06T14:24:46.427229
| 2020-01-07T21:43:38
| 2020-01-07T21:43:38
| 203,048,278
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 785
|
r
|
155 - Las matrices de adyacencia y listas de aristas de un grafo.R
|
install.packages("Matrix")
library(Matrix)
load("Seccion 11 - Conexiones por doquier - Análisis de Redes Sociales/meetup-hiking.Rdata")
unique(users$user_id)
unique(users$group_id)
group_membership <- sparseMatrix(users$group_id, users$user_id, x=T)
#matriz de adyacencia
#si usuario A y usuario B comparten 3 grupos, entonces ese es el dato que se guarda
#en la interseccion
adjacency <- t(group_membership) %*% group_membership
summary(adjacency)
#armamos una matriz de aristas
user_edgelist <- as.data.frame(summary(adjacency))
summary(user_edgelist)
#como es simetrica
user_edgelist.upper <- user_edgelist[user_edgelist$i < user_edgelist$j,]
save(user_edgelist.upper, file="Seccion 11 - Conexiones por doquier - Análisis de Redes Sociales/meetup-hiking-edgelist.Rdata")
|
71bb2a04a964607398472d3d0fa94e77f7a629fb
|
31f3d6031b5ac2310317b72a20ef8f2c29d55049
|
/r/src/background/plot.bg.r
|
4a64035815ce2900c615b455df27e65c29815c8f
|
[] |
no_license
|
uataq/X-STILT
|
638c3c76e6e396c0939c85656a53eb20a1eaba74
|
eaa7cfabfc13569a9e598c90593f6418bc9113d5
|
refs/heads/master
| 2023-07-22T03:14:50.992219
| 2023-07-14T16:40:55
| 2023-07-14T16:40:55
| 128,477,511
| 12
| 5
| null | 2023-06-19T22:57:03
| 2018-04-06T22:46:36
|
R
|
UTF-8
|
R
| false
| false
| 1,550
|
r
|
plot.bg.r
|
#
plot.bg = function(site, site_lon, site_lat, sensor, sensor_gas, recp_box,
recp_info, sel_traj, densf, obs_df, plm_df, intersectTF,
bg_df, bg_side = NA, bg_deg, bin_deg, map, td, picname,
font.size, pp_fn = NULL) {
# plot map first
uni_sides = unique(bg_df$bg.side)
print(uni_sides)
if (!is.na(bg_side)) uni_sides = bg_side
width = 9; height = 9
for ( bg_side in uni_sides ) {
pp = plot.urban.plume(site, site_lon, site_lat, sensor, sensor_gas,
recp_box, recp_info, sel_traj, densf, obs_df,
plm_df, intersectTF, bg_df, bg_side, bg_deg,
bin_deg, map, td, font.size, pp_fn)
if ( 'list' %in% class(pp) ) {
e1 = pp$delta
picname_delta = gsub('forward_plume', 'forward_plume_delta', picname)
picname_delta = gsub('.png', paste0('_', bg_side, '.png'), picname_delta)
ggsave(e1, filename = picname_delta, width = width, height = height)
p1 = pp$total
} else p1 = pp
ggsave(p1, filename = picname, width = width, height = height)
} # end for
} # end of function
if (F) {
# if there is an intersection, plot latitude series
l1 = plot.bg.3d(site, timestr, obs_df, bg_df)
# merge map of forward plume and latitude series, DW, 10/30/2018
pl = ggarrange(plotlist = list(p1, l1), heights = c(2, 1), nrow = 2, labels = c('a)', 'b)'))
}
|
2940d3bb3127c04ab54941968cadfdbaa11177c8
|
6ad337e2b26380a4ebf1ac301bb3e8aff19b846b
|
/R/aCTR.R
|
d569dabf790ad9a91fe06dc0790990232d3b7eb9
|
[] |
no_license
|
kaseyriver11/k3d3
|
2824f2c078c2f0ba0659333b0bd68909442c4270
|
85c21f7725f6afe06a95d773716ddadff4386622
|
refs/heads/master
| 2020-12-29T02:44:20.607587
| 2017-06-04T22:56:11
| 2017-06-04T22:56:11
| 38,123,059
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,963
|
r
|
aCTR.R
|
#' D3 Visualizations: Collapsible Tree
#'
#' Creates a collapsible tree given an appropraite json file.
#'
#' @param data the json file being used for the visualizations.
#' @param width width for the graph's frame area (in pixels) - default is null.
#' @param height height for the graph's frame area (in pixels) - default is null.
#' @param HT what is the maximum number of leaves in a column
#' @param WD what is the maximum depth of the tree
#' @param maxChar what is the maximum number of characters in a nodes text
#' @param maxsize what is the root nodes size
#' @param color1 the color of the circles which still have children - default lightbluesteel
#' @param color2 the color of the circles whose children are already shown, or that do not have children - default black
#' @param color3 the color of the outside of the circles - default steelblue
#' @param color4 the color of the lines connecting the circles - default grey
#'
#' @examples
#' \dontrun{
#' # load in an appropriate json file.
#' # Such as \url{https://gist.github.com/mbostock/1093025#file-flare-json}
#' # we will call this json.json
#' aCTR(json.json) # This should reproduce the Mike Bostock's Example
#' aCTR(json.json, color1 = "blue", color2 = "red", color3 = "green", color4 = "black")
#' # Here we change around the colors of the visualization.
#' }
#'
#' @source
#' D3.js was created by Michael Bostock. See \url{http://d3js.org/}
#'
#' @import htmlwidgets
#'
#' @export
aCTR <- function(data,
width = 500,
height = 300,
HT = 20,
WD = 6,
maxChar = 50,
maxsize=20,
minimum_distance = 21,
top_bar = 'BRAND, SEGMENT, BASE SIZE, SUB1, SUB2, SUB3',
color1 = "lightsteelblue",
color2 = "#fff",
color3 = "steelblue",
color4 = "#ccc")
{
# create options
options = list(
width = width,
height = height,
HT = HT,
WD = WD,
maxChar = maxChar,
maxsize=maxsize,
minimum_distance = minimum_distance,
top_bar = top_bar,
color1 = color1,
color2 = color2,
color3 = color3,
color4 = color4
)
# create widget
htmlwidgets::createWidget(
name = "aCTR",
x = list(data = data, options = options),
width = width,
height = height,
htmlwidgets::sizingPolicy(padding = 0, browser.fill = TRUE),
package = "k3d3"
)
}
#' @rdname k3d3-shiny
#' @export
aCTROutput <- function(outputId, width = "100%", height = "500px") {
shinyWidgetOutput(outputId, "aCTR", width, height,
package = "k3d3")
}
#' @rdname k3d3-shiny
#' @export
arenderCTR <- function(expr, env = parent.frame(), quoted = FALSE) {
if (!quoted) { expr <- substitute(expr) } # force quoted
shinyRenderWidget(expr, aCTROutput, env, quoted = TRUE)
}
|
b1211a5e2d56162e92a2259e31af17fcc5576d21
|
6f7403d41fe5f3cf5bddfd3cdd78f38131098ac2
|
/pca_wine-v2.R
|
07134be6b990162e51c12f2dbe15d76ab519eb97
|
[] |
no_license
|
nirajpjaiswal/R_With_ML
|
9490777f57ec8e11e2482bfee43e8c95ecd4396f
|
1aa5e8450212b29d28b649b9745d3528b8211073
|
refs/heads/main
| 2023-05-06T15:38:44.515907
| 2021-05-27T07:13:12
| 2021-05-27T07:13:12
| 356,593,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,303
|
r
|
pca_wine-v2.R
|
# PCA
# dataset: wine
# method: SVD (singular value decomposition)
# Use this for class demo
library(caTools)
library(e1071)
library(caret)
path="F:/aegis/4 ml/dataset/unsupervised/pca/wine.csv"
wine=read.csv(path)
View(wine)
# y-variable (customer_segment) categorises customers based on various parameters
# for a given wine, predict to which customer segment this wine has to be recommended
# how to plot all these variables in a graph to show the segmentation
# use PCA to extract the 2 most (new) important features that can explain the maximum variation in the dataset
# prediction region and prediction boundary can then be viewed from this reduced dimensions
# these newly extracted features are called PRINCIPAL COMPONENTS
length(colnames(wine))
# feature scaling using the minmax()
minmax=function(x) return( (x-min(x)) / (max(x)-min(x)) )
#pos = grep('Customer_Segment', colnames(wine))
#wine_scale = wine[,c(1:pos-1)]
#wine_scale=as.data.frame(lapply(wine_scale,minmax))
#wine_scale[pos]=wine[pos]
#View(wine_scale)
# scale the dataset
winescale=as.data.frame(lapply(wine,minmax))
winescale$Customer_Segment=wine$Customer_Segment
View(winescale)
pos = grep('Customer_Segment', colnames(winescale))
pos
# apply the PCA
# --------------
pca=prcomp(winescale[-pos])
# pca
summ = summary(pca)
# look under "porportion of variance" to get the % of variance explained
print(summ)
res1=t(data.frame(summ$importance))
View(res1)
expl_var = res1[,'Proportion of Variance']
# explained variation
# expl_var = (summ$sdev^2)/sum( (summ$sdev)^2)
screeplot(pca,col="brown",main="Principal Components")
# Yes, rotation (orthogonal) is necessary because it maximizes the difference between variance captured by the component. This makes the components easier to interpret. Not to forget, that's the motive of doing PCA where, we aim to select fewer components (than features) which can explain the maximum variance in the data set. By doing rotation, the relative location of the components doesn't change, it only changes the actual coordinates of the points.
# If we don't rotate the components, the effect of PCA will diminish and we'll have to select more number of components to explain variance in the data set.
df = data.frame(PC= paste0("PC",1:13), var_explained=expl_var)
df$PC=factor(df$PC, levels=paste0("PC",1:13))
df
str(df)
ggplot(df,aes(x=PC,y=var_explained)) +
geom_col(size=1,fill="white", colour="blue") +
labs(title = "Scree Plot")
ggplot(df,aes(x=PC,y=var_explained)) +
geom_bar(stat="identity", colour="black",fill="violet") +
labs(title = "Scree Plot")
# build the PCA
wine_pca = as.data.frame(pca$x)
wine_pca = wine_pca[c(1,2)]
wine_pca$Customer_Segment = wine$Customer_Segment
View(wine_pca)
# ---------------------------------------------
# from here, build any classification model
# ----------------------------------------------
# shuffle the dataset
wine_pca = wine_pca[order(sample(seq(1,nrow(wine_pca)))),]
View(wine_pca)
# split the dataset into train and test
split=sample.split(wine_pca$Customer_Segment,SplitRatio = 0.8)
train=subset(wine_pca,split==TRUE)
test=subset(wine_pca,split==FALSE)
nrow(wine_pca); nrow(train); nrow(test)
View(train)
View(test)
# build an SVM
model=svm(Customer_Segment~., data=train, kernel='linear',
type='C-classification')
prediction = predict(model,test[-3])
confusionMatrix(as.factor(test$Customer_Segment), as.factor(prediction))
# visualize the results
# ----------------------
# install.packages("ElemStatLearn")
library(ElemStatLearn)
set=train
X1=seq(min(set[,1])-1,max(set[,1])+1, by=0.1)
X2=seq(min(set[,2])-1,max(set[,2])+1, by=0.1)
grid_set = expand.grid(X1,X2)
colnames(grid_set)=c('PC1','PC2')
y_grid = predict(model,newdata = grid_set)
length(y_grid)
plot(set[,-3],
main="SVM Classification",
xlab='PC1', ylab='PC2',
xlim=range(X1), ylim=range(X2))
contour(X1,X2,matrix(as.numeric(y_grid),length(X1),length(X2)),add=T)
points(grid_set,pch='.',col=ifelse(y_grid==2,'deepskyblue', ifelse(y_grid==1, 'springgreen3','tomato') ))
points(set,pch=21,bg=ifelse(set[,3]==2, 'blue3', ifelse(set[,3]==1, 'green4','red3')))
|
4321060a7f249e15443b1f3b820e9a5bd10b7d9f
|
706aa50f561d7f8ebd0cb266e53e30d316f546bc
|
/code/Make_Figure_2.R
|
afdfaa4baffebb52d2ad8d84f21c6033cecb6b0a
|
[] |
no_license
|
willbrugger/vaccine_reevaluation
|
623b18c32edf54a6771eab3a398e7e3c90320bfc
|
15523c63c1e157796c83c10e2bb6cb68e960b4f0
|
refs/heads/main
| 2023-07-11T10:32:29.881584
| 2021-08-20T22:16:24
| 2021-08-20T22:16:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,069
|
r
|
Make_Figure_2.R
|
library(tidyverse)
library(readxl)
# IMR Data
imr_estimates = read_xlsx("https://github.com/tkmika/BIO465_Vaccine/blob/main/data/UNIGME-2020-Country-Sex-specific_U5MR-CMR-and-IMR.xlsx", sheet=2)
imr = tibble(imr_estimates) %>% rename('Country_code' = 'Child Mortality Estimates', 'Country' = 2, 'Uncertainty_bounds' = 3, '1990' = 4,
'1991' = 5, '1992' = 6, '1993' = 7, '1994' = 8, '1995' = 9, '1996' = 10, '1997' = 11, '1998' = 12, '1999' = 13,
'2000' = 14, '2001' = 15, '2002' = 16, '2003' = 17, '2004' = 18, '2005' = 19, '2006' = 20,
'2007' = 21, '2008' = 22, '2009' = 23, '2010' = 24, '2011' = 25, '2012' = 26, '2013' = 27,
'2014' = 28, '2015' = 29, '2016' = 30, '2017' = 31, '2018' = 32, '2019' = 33,
'f1990' = 34, 'f1991' = 35, 'f1992' = 36, 'f1993' = 37, 'f1994' = 38, 'f1995' = 39, 'f1996' = 40,
'f1997' = 41, 'f1998' = 42, 'f1999' = 43, 'f2000' = 44, 'f2001' = 45, 'f2002' = 46, 'f2003' = 47,
'f2004' = 48, 'f2005' = 49, 'f2006' = 50, 'f2007'= 51, 'f2008' = 52, 'f2009' = 53, 'f2010' = 54,
'f2011' = 55, 'f2012' = 56, 'f2013' = 57, 'f2014' = 58, 'f2015' = 59, 'f2016' = 60, 'f2017' = 61,
'f2018' = 62, 'f2019' = 63)
imr = imr[-c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),]
imr = filter(imr, Uncertainty_bounds == 'Median')
# HebB Data
hepBdata = read_xls('https://github.com/tkmika/BIO465_Vaccine/blob/main/data/HepBdata.xls')
hepBdata = tibble(hepBdata)
hepBdata = select(hepBdata, Country, '1993':'2019')
# Male IMR vs HepB %
male_imr = select(imr, 'Country', '1993':'2019')
imr_all = pivot_longer(male_imr, cols='1993':'2019', names_to = 'Year', values_to = 'IMR')
hepB_all = pivot_longer(hepBdata, cols='2019':'1993', names_to = 'Year', values_to = 'HepB')
all_data = left_join(hepB_all, imr_all, by = c('Country', 'Year')) %>% na.omit(all_data)
us = filter(all_data, Country == 'United States of America')
ggplot(us) + geom_text(aes(x = HepB, y = IMR, label = Year), hjust = 1.25, vjust = 1, check_overlap = TRUE) + geom_point(aes(x = HepB, y = IMR, label = Year)) +
theme_bw(base_size = 16) + labs(x = "1-year-old children vaccinated for HepB (%)", y = 'Male Infant Mortality Rate (deaths/1000)') +
scale_x_continuous(expand = c(0,0), lim = c(0, 102)) + scale_y_continuous(expand = c(0,0), lim = c(4.74, 9.5))
# Female IMR vs HepB %
female_imr = select(imr, 'Country', 'f1993':'f2019')
female_imr = rename(female_imr, '1993'= 'f1993', '1994' = 'f1994', '1995' = 'f1995', '1996' = 'f1996', '1997' = 'f1997',
'1998' = 'f1998', '1999' = 'f1999', '2000' = 'f2000', '2001' = 'f2001', '2002' = 'f2002', '2003' = 'f2003',
'2004' = 'f2004', '2005' = 'f2005', '2006'='f2006', '2007' = 'f2007', '2008' = 'f2008', '2009' = 'f2009',
'2010' = 'f2010', '2011' = 'f2011', '2012' = 'f2012', '2013' = 'f2013', '2014' = 'f2014', '2015' = 'f2015',
'2016' = 'f2016', '2017' = 'f2017', '2018' = 'f2018', '2019' = 'f2019')
female_imr_all = pivot_longer(female_imr, cols='1993':'2019', names_to = 'Year', values_to = 'IMR')
all_f = left_join(hepB_all, female_imr_all, by = c('Country', 'Year')) %>% na.omit(all_f)
usa = filter(all_f, Country == 'United States of America')
ggplot(usa) + geom_text(aes(x = HepB, y = IMR, label = Year), hjust = 1.25, vjust = 1, check_overlap = TRUE) + geom_point(aes(x = HepB, y = IMR, label = Year)) +
theme_bw(base_size = 16) + labs(x = "1-year-old children vaccinated for HepB (%)", y = 'Female Infant Mortality Rate (deaths/1000)') +
scale_x_continuous(expand = c(0,0), lim = c(0, 102)) + scale_y_continuous(expand = c(0,0), lim = c(4.75, 9.5))
# Statistical Test
cor.test(us$HepB, us$IMR, method='spearman', exact = FALSE)
cor.test(usa$HepB, usa$IMR, method = 'spearman', exact = FALSE)
|
4cf0686bdc812235e33f09b633a0c7498721d709
|
3be35f6e9bf55ed92efb3d0cdcf2ebd36b931b4d
|
/man/eigen.test.Rd
|
6adf4d6b639575c5b625413ce20e3b641d2244a9
|
[] |
no_license
|
cran/vcvComp
|
9097272287ff319bded4b49819afd05de3fbc511
|
d965eb36cde4192dd4ff540c9850a7bfa7eac1ef
|
refs/heads/master
| 2020-12-22T23:04:28.707405
| 2020-12-17T08:00:02
| 2020-12-17T08:00:02
| 236,956,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,825
|
rd
|
eigen.test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eigen.test.R
\name{eigen.test}
\alias{eigen.test}
\title{Difference test for successive relative eigenvalues}
\usage{
eigen.test(n, relValues)
}
\arguments{
\item{n}{the sample size(s), given as a number or a vector of length 2}
\item{relValues}{a vector of relative eigenvalues}
}
\value{
The P-values for the test of difference between successive eigenvalues
}
\description{
Tests the difference between two successive relative eigenvalues
}
\examples{
# Data matrix of 2D landmark coordinates
data("Tropheus.IK.coord")
coords <- which(names(Tropheus.IK.coord) == "X1"):which(names(Tropheus.IK.coord) == "Y19")
proc.coord <- as.matrix(Tropheus.IK.coord[coords])
# Data reduction
phen.pca <- prcomp(proc.coord, rank. = 5, tol = sqrt(.Machine$double.eps))
pc.scores <- phen.pca$x
# Covariance matrix of each population
S.phen.pop <- cov.group(pc.scores, groups = Tropheus.IK.coord$POP.ID)
# Relative PCA = relative eigenanalysis between 2 covariance matrices
# (population IKA1 relative to IKS5)
relEigen.a1s5 <- relative.eigen(S.phen.pop[, , "IKA1"], S.phen.pop[, , "IKS5"])
# Test of the difference between 2 successives eigenvalues
# of the covariance matrix of IKA1 relative to IKS5
n_ika1 <- length(which(Tropheus.IK.coord$POP.ID == "IKA1")) # sample size for IKA1
n_iks5 <- length(which(Tropheus.IK.coord$POP.ID == "IKS5")) # sample size for IKS5
eigen.test(n = c(n_ika1, n_iks5), relValues = relEigen.a1s5$relValues)
}
\references{
Mardia KV, Kent JT, Bibby JM (1979)
\emph{Multivariate analysis}. Academic Press, London.
}
\seealso{
\code{\link{relative.eigen}} for the computation of relative eigenvalues,
\code{\link[stats:Chisquare]{pchisq}} for Chi-squared distribution
}
|
e4ca17e1cd544b6cff9cee9a1e28cf3227c46899
|
493583c405b9e6267b25b7db400ee32f18ae092f
|
/inst/doc/do/ALB.BULK.R
|
3ec30eb171b066cbe06ec4ef0bca6ffb6888fe3d
|
[
"MIT"
] |
permissive
|
dbescond/iloData
|
69a3e2b78b3d868799384c1dd085b1e1e87c44cd
|
c4060433fd0b7025e82ca3b0a213bf00c62b2325
|
refs/heads/master
| 2021-01-21T19:54:33.877674
| 2018-07-05T11:30:47
| 2018-07-05T11:30:47
| 92,175,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,137
|
r
|
ALB.BULK.R
|
#############################################################################
# Program to prepare Short-term indicators.
# Short term indicators
# Auteur: David Bescond ILO / Department of statistics
# Date: April 2016. last update May 2017
#############################################################################
Target <- "ALB"
init_time <- Sys.time()
cleanTemp <- list.files('C:\\temp\\') %>% as_data_frame %>% filter(value %>% str_detect('\\.'))
if(nrow(cleanTemp) > 0) {for (i in 1:nrow(cleanTemp)){unlink(paste0('C:\\temp\\', cleanTemp$value[i]))} }
require(Ariane,quietly =TRUE)
require(lubridate, quietly =TRUE)
require(readxl,quietly =TRUE)
setwd(paste0(ilo:::path$data, '/',Target,'/BULK/'))
Sys.setenv(http_proxy="") #
Sys.setenv(https_proxy="")
Sys.setenv(ftp_proxy="") #
INPUT <- paste0(ilo:::path$data, '/',Target,'/BULK/input/')
Mapping_File <- read_excel(paste0('./ReadME_',Target,'.xlsx'), sheet="File", guess_max = 1000) %>% filter(IsValidate %in% 'Yes')
Mapping_Definition <- read_excel(paste0('./ReadME_',Target,'.xlsx'), sheet="Definition", guess_max = 21474836)
# STEP 1 Download, open, CLEAN UP AND REDUCE ORIGINAL FILE
{
require(RSelenium) ######## 1.7.1 only
pJS <- phantom()
shell('java -jar C:/R/library/RSelenium/bin/selenium-server-standalone.jar', wait = FALSE)
remDr <- remoteDriver(browserName = 'phantomjs')
remDr$open()
remDr$navigate('http://www.instat.gov.al/en/themes/labour-market-and-education/employment-and-unemployment-from-lfs/#tab2' )
Sys.sleep(3)
remDr$getTitle()[[1]]
webElem <- remDr$findElement(value = '//a[@href = "#tab2"]')
webElem$clickElement()
firstlink <- remDr$findElement(value = '//div[@id = "tab2"]')$findChildElement("css selector", value = 'tbody')$findChildElement("css selector", value = 'tr')$findChildElements("css selector", value = 'td.icon')[[1]]$findChildElement("css selector", value = 'a')$getElementAttribute('href') %>% unlist
download.file(firstlink, paste0('./input/', basename(firstlink)), mode = 'wb')
Sys.sleep(2)
secondlink <- remDr$findElement(value = '//div[@id = "tab2"]')$findChildElement("css selector", value = 'tbody')$findChildElements("css selector", value = 'tr')[[2]]$findChildElements("css selector", value = 'td.icon')[[1]]$findChildElement("css selector", value = 'a')$getElementAttribute('href') %>% unlist
download.file(secondlink, paste0('./input/', basename(secondlink)), mode = 'wb')
Sys.sleep(2)
thirdlink <- remDr$findElement(value = '//div[@id = "tab2"]')$findChildElement("css selector", value = 'tbody')$findChildElements("css selector", value = 'tr')[[3]]$findChildElements("css selector", value = 'td.icon')[[1]]$findChildElement("css selector", value = 'a')$getElementAttribute('href') %>% unlist
download.file(thirdlink, paste0('./input/', basename(thirdlink)), mode = 'wb')
Sys.sleep(2)
fourthlink <- remDr$findElement(value = '//div[@id = "tab2"]')$findChildElement("css selector", value = 'tbody')$findChildElements("css selector", value = 'tr')[[4]]$findChildElements("css selector", value = 'td.icon')[[1]]$findChildElement("css selector", value = 'a')$getElementAttribute('href') %>% unlist
download.file(fourthlink, paste0('./input/', basename(fourthlink)), mode = 'wb')
invisible(gc(reset = TRUE))
remDr$close()
invisible(try(remDr$closeServer(), silent = TRUE))
pJS$stop()
rm(pJS)
for (i in 1:length(Mapping_File$NAME)){
if(i %in% 1:3){
X <- readxl:::read_excel(paste0('./input/', Mapping_File$NAME[i], '.xlsx'), skip=5)%>%
select(-c(1,2)) %>%
gather(Time, Value, -Sex, -Age) %>%
fill(Age) %>%
mutate(Time = paste0(str_sub(Time,-4,-1), str_sub(Time,-7,-6)))
}
if(i %in% 4){
X <- readxl:::read_excel(paste0('./input/', Mapping_File$NAME[i], '.xlsx'), skip=3)%>%
select(-c(1)) %>%
rename(Indicator = X__2) %>%
gather(Time, Value, -Indicator) %>%
mutate(Time = paste0(str_sub(Time,-4,-1), 'Q',str_sub(Time,-7,-7))) %>%
filter(!Indicator %in% 'of which') %>%
mutate(
Value = Value / 1000,
Indicator = gsub('15 years old and over', '15-++ years', Indicator),
Age = str_sub(Indicator, -11, -1),
Indicator = ifelse(nchar(Indicator)> 12, str_sub(Indicator, 1, -13), NA),
Age = gsub('15-++ years', '15 years old and over', Age, fixed = TRUE)) %>%
fill(Indicator)
}
save(X, file = paste0('./input/', Mapping_File$NAME[i], '.Rdata'))
unlink(paste0('./input/', Mapping_File$NAME[i], '_COLNAMES.csv'))
rm(X)
invisible(gc(reset = TRUE))
}
}
# STEP 2 MAP to ILO CODE
for (i in 1:length(Mapping_File$NAME)){
print(Mapping_File$NAME[i])
load(paste0(INPUT,Mapping_File$NAME[i],".Rdata"))
# get mapping frame File should be filled and File name correspond to Mapping_File ID
REF_MAPPING <- Mapping_Definition %>% filter(!File %in% NA, File %in% Mapping_File$ID[i]) %>% select(-File)
# reduce mapping frame to columns ILO KEY + columns available on the dataset
REF_MAPPING <- REF_MAPPING %>%
select(contains('_Code')) %>%
bind_cols(REF_MAPPING %>% select_(.dots = colnames(X)[!colnames(X)%in% c('Time','Value') ]))
# split columns to avail mapping redondancy
# SplitCol <- Mapping_File$SplitCol[i]
# if(!is.na(SplitCol)){
# SplitCol <- str_split(SplitCol, ' = ') %>% unlist
# SplitCol[1] <- gsub(' ', '.', SplitCol[1], fixed = TRUE)
# ref <- str_split(unique(REF_MAPPING[,SplitCol[[1]]]), ';') %>% unlist
# MAP <- NULL
# for ( j in seq_along(ref)){
# MAP <- bind_rows(MAP,
# bind_cols(REF_MAPPING %>% select(-contains(SplitCol[1])), data_frame(pass = 1:nrow(REF_MAPPING), ToChange = ref[j]))
# )
# }
# REF_MAPPING <- MAP %>% select(-pass)
## map sex
# test <- try(
# REF_MAPPING <- REF_MAPPING %>% mutate(ToChangeCode = mapvalues(ToChange, c('Both sexes','Female','Male'), c('SEX_T','SEX_F','SEX_M'), warn_missing = FALSE))
# , silent = TRUE )
# colnames(REF_MAPPING)[colnames(REF_MAPPING) %in% 'ToChange'] <- SplitCol[1]
# colnames(REF_MAPPING)[colnames(REF_MAPPING) %in% 'ToChangeCode'] <- SplitCol[2]
# } else {
# REF_MAPPING <- REF_MAPPING %>% mutate(Sex_Code = 'SEX_T')
# }
# rm(SplitCol)
#create ilo key of ref_mapping
ref_key_ilo <- REF_MAPPING %>% slice(1) %>% select(contains('_Code')) %>% colnames
REF_MAPPING <- REF_MAPPING %>% unite_('KEY_ILO', ref_key_ilo , remove = TRUE, sep = '/')
ref_key_ilo <- paste(ref_key_ilo, collapse = '/')
# clean
REF_MAPPING <- REF_MAPPING %>% mutate_all(funs(gsub('&','&', ., fixed = TRUE)))
#create key of X in national language
ref_key_nat <- X %>% slice(1) %>% select(-Time, -Value) %>% colnames
X <- X %>% unite_('KEY_NAT', ref_key_nat , remove = TRUE, sep = '/')
ref <- c('KEY_ILO', ref_key_nat)
REF_MAPPING <- REF_MAPPING %>% select_(.dots = ref)
# REF_MAPPING <- REF_MAPPING %>% rename(SEX = By.gender)
My_list <- vector("list", length(2:ncol(REF_MAPPING)))
MY_NEW <- X %>% mutate(KEY_ILO = as.character(NA))
rm(X)
invisible(gc(reset = TRUE))
MY_MATRIX <- NULL
j <- 1
for (j in 1:nrow(REF_MAPPING)){
MY_NEW$KEY_ILO <- {REF_MAPPING %>% slice(j) %>% select(KEY_ILO) %>% as.character}
for (k in 2:ncol(REF_MAPPING)){
My_list[[k]] <- levels(as.factor(unlist(strsplit(REF_MAPPING[j,colnames(REF_MAPPING)[k]] %>% as.character,";"))))
}
My_REF <- My_list[[2]]
if(ncol(REF_MAPPING)>2){
for(k in 3:ncol(REF_MAPPING)){
My_REF <- paste(sort(rep(My_REF,length(My_list[[k]]))),My_list[[k]],sep="/")
}
}
MY_MATRIX <-bind_rows(MY_MATRIX,
MY_NEW[MY_NEW$KEY_NAT%in%My_REF,colnames(MY_NEW)%in%c("KEY_NAT","KEY_ILO","Time","Value")])
}
invisible(gc(reset = TRUE))
######################### NEXT STEP
X <- MY_MATRIX %>%
mutate(Value = as.numeric(Value)) %>%
select(-KEY_NAT) %>%
group_by(KEY_ILO, Time) %>%
summarise(Value = sum(Value, na.rm = TRUE)) %>%
ungroup %>%
rename(ID = KEY_ILO) %>%
mutate( Collection_Code = Mapping_File$Collection_Code[i],
Country_Code = Mapping_File$Country_Code[i],
Source_Code = Mapping_File$Source_Code[i]) %>%
separate(ID, stringr::str_split(ref_key_ilo, '/') %>% unlist, remove = FALSE, sep = '/') %>%
select(-ID) %>%
mutate(Value = as.numeric(Value))
rm(My_REF,MY_MATRIX,MY_NEW,REF_MAPPING, ref_key_ilo, ref_key_nat)
save(X,file = paste(INPUT,Mapping_File$ID[i],".Rdata",sep=""))
rm(X)
invisible(gc(reset = TRUE))
print(Mapping_File$ID[i])
}
# STEP 3 Combined BY COUNTRY and manage exception
for (i in 1:length(Mapping_File$ID)){
print(Mapping_File$ID[i])
load(paste(INPUT,Mapping_File$ID[i],".Rdata",sep=""))
X <- X %>% mutate_all(as.character)
if(i==1) Y <- X else Y <- bind_rows(Y,X)
rm(X)
invisible(gc(reset = TRUE))
}
REF <- levels(as.factor(substr(Y$Source_Code,1,2)))
Y <- Y %>% # converge to ilostat format
as.tbl %>%
mutate( obs_status = as.character(NA),
note_source = 'R1:3903', # add tag Bulk
obs_value = as.numeric(Value)) %>%
select( collection = Collection_Code,
ref_area = Country_Code,
source = Source_Code,
indicator = Indicator_Code,
sex = Sex_Code,
classif1 = Classif1_Code,
classif2 = Classif2_Code,
time = Time,
obs_value ,
obs_status,
freq_code = Notes_Frequency_Code,
note_classif = Notes_Classif_Code,
note_indicator = Notes_Indicator_Code,
note_source
) %>%
mutate_all(funs(mapvalues(.,c('XXX_XXX_XXX', 'NaN', '', ' ', 'NA'), c(NA, NA, NA, NA, NA), warn_missing = FALSE))) %>%
filter(as.numeric(str_sub(time,1,4)) > 2013) %>%
mutate(obs_status = ifelse(time %in% '2014Q1', 'B', obs_status))
for (i in 1:length(REF)){
X <- Y %>% filter(substr(source,1,2)%in%REF[i])
save(X,file = paste(getwd(),'/output/',Target,'_',REF[i],".Rdata",sep=""))
rm(X)
invisible(gc(reset = TRUE))
}
REF <- cbind(PATH = paste0(getwd(), '/output/',Target,'_',REF,".Rdata"),ID = NA, Types ="NSO_ilostat", REF = Target)
# add historical data
write.csv(REF,paste("./FileToLoad.csv",sep=""),row.names = FALSE,na="")
final_time <- Sys.time(); final_time - init_time
rm(list=ls(all=TRUE))
invisible(gc(reset = TRUE))
q(save = "no", status = 0, runLast = FALSE)
|
0f72f5b49ca718af0054350029bd3309cb1de05f
|
9620db4a06584153b0176b8e2022bef8a7b6ed05
|
/analysis/exploratory/exploratory-analysis_v3.R
|
8de6f106e7a259a2a22496a7aedde97427ecc4c0
|
[] |
no_license
|
UCRCSI/blog_housing
|
de57be8d95b50c110870bf4ccd589dcdf970befc
|
66bcd87dd18c2f3745f2bbb8cc8f55e598008d2a
|
refs/heads/master
| 2020-06-10T23:19:50.061668
| 2020-03-19T20:59:01
| 2020-03-19T20:59:01
| 193,786,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,718
|
r
|
exploratory-analysis_v3.R
|
# Load relevant libraries
library(tidyverse)
library(survey)
library(srvyr)
library(forcats)
library(reshape2)
library(openxlsx)
library(rlang)
library(tidycensus)
# Loading and cleaning data -----------------------------------------------
## Load data
housing <- read.csv("raw data/ahs2017_flat_r.csv")
## Clean data
# relabeling HH race and ethnicity levels
levels(housing$HHRACE3) <- c("HH AIAN", "HH Asian", "HH Black", "HH NHPI", "HH White")
levels(housing$HHSPAN2) <- c("HH Hispanic or Latinx", "HH Not Hispanic or Latinx")
levels(housing$RACEETH) <- c("HH AIAN", "HH Asian", "HH Black", "HH NHPI", "HH White NH")
# relabeling column variables
levels(housing$TENURE)[2:3] <- c("Owner", "Renter")
levels(housing$HUDSUB)[1:3] <- c("Other renter", "Public housing", "Voucher recipient")
levels(housing$RENTCNTRL)[1:2] <- c("No rent control", "Rent control")
levels(housing$DBMISSRENT)[1:2] <- c("Not missed rent", "Missed rent")
# converting to characters
housing$HUDINCLIM_80_FLAG <- as.factor(as.character(housing$HUDINCLIM_80_FLAG))
housing$HUDINCLIM_50_FLAG <- as.factor(as.character(housing$HUDINCLIM_50_FLAG))
housing$HUDINCLIM_30_FLAG <- as.factor(as.character(housing$HUDINCLIM_30_FLAG))
# converting NAs to (Missing)
var.races <- colnames(housing)[grepl("^RACE(_|\\d+$)", names(housing))]
var.span <- colnames(housing)[grepl("^SPAN\\d+$", names(housing))]
var.flag <- colnames(housing)[grepl("FLAG$", names(housing))]
var.num <- colnames(housing)[grepl("^NUM", names(housing))]
var.rating <- colnames(housing)[grepl("^RATING", names(housing))]
var.pov <- colnames(housing)[grepl("+POV", names(housing))]
col_factor <- colnames(housing)[!(colnames(housing) %in%
c(var.races, var.span, var.num, var.rating,
var.flag, var.pov, "X.1", "X", "HHAGE",
"HINCP", "FINCP", "TOTHCAMT", "WEIGHT"))]
housing[,col_factor] <- lapply(housing[,col_factor], function(x) fct_explicit_na(x)) %>% as.data.frame
# Housing discrimination by regional family income ---------------------------------
## sample-info
finc.byrace <- apply_by_defs_two(housing_weighted, "twoway_median",
race.label, race.defs, "FINCP")
finc.byspan <- apply_by_defs_two(housing_weighted, "twoway_median",
span.label, span.defs, "FINCP")
## reading in ACS median family income tables by geography
# census_api_key("11654cc6c6ee4ff4791e54461c9b48da31c5ff68", install = TRUE)
fincome_avg_division <- get_acs(table = "B19113", year = 2017, survey = "acs1",
geography = "division")
fincome_avg_division <- fincome_avg_division %>% mutate(inclim_30 = 0.30 * estimate,
inclim_80 = 0.80 * estimate)
fincome_avg_division$DIVISION <- substr(fincome_avg_division$NAME, 1,
nchar(fincome_avg_division$NAME)-9)
# merging housing data with ACS median family income data
housing <- merge(housing, fincome_avg_division, by = "DIVISION", all.x = TRUE)
housing <- housing %>% mutate(REGINCLIM_80_FLAG = as.factor(ifelse(FINCP < inclim_80, 1, 0)),
REGINCLIM_30_FLAG = as.factor(ifelse(FINCP < inclim_30, 1, 0)))
# comparing HUD federal level income limits with regional income limits (ACS data)
## most lenient standard
summary(housing$HUDINCLIM_80_FLAG)
summary(housing$REGINCLIM_80_FLAG)
## strictest standard
summary(housing$HUDINCLIM_30_FLAG)
summary(housing$REGINCLIM_30_FLAG)
# Weighting data ----------------------------------------------------------
housing_weighted <- housing %>% as_survey_design(ids = 1, weight = WEIGHT)
# Defining functions ------------------------------------------------------
# generates table of weighted totals by a grouping variable `group_var`
totals_by_variable <- function(df, group_var) {
group_var <- sym(group_var)
df %>% filter((!!group_var) != "(Missing)") %>%
group_by(!!group_var) %>% summarize(n = survey_total()) -> tmp
return (tmp)
}
# generates table of weighted proportions by a grouping variable `group_var`
prop_by_variable <- function(df, group_var) {
group_var <- sym(group_var)
df %>% filter((!!group_var) != "(Missing)") %>%
group_by(!!group_var) %>% summarize(race_prop = survey_mean()) -> tmp
return (tmp)
}
# generates two way weighted proportion tables by grouping variables
# `group_var1` and `groupvar2,` with % out of group_var1
twoway_prop <- function(df, group_var1, group_var2) {
sym.group_var1 <- sym(group_var1)
sym.group_var2 <- sym(group_var2)
df %>% filter(((!!sym.group_var1) != "(Missing)" &
!(is.na((!!sym.group_var1)))) &
((!!sym.group_var2) != "(Missing)" &
!(is.na((!!sym.group_var2))))) %>%
group_by((!!sym.group_var1), (!!sym.group_var2)) %>%
summarize(prop = survey_mean()) -> tmp
tmp2 <- dcast(tmp, eval(parse(text = group_var1)) ~
eval(parse(text = group_var2)), value.var = "prop")
tmp3 <- dcast(tmp, eval(parse(text = group_var1)) ~
eval(parse(text = group_var2)), value.var = "prop_se")
colnames(tmp3) <- paste(colnames(tmp2), "_se", sep = "")
n.col <- ncol(tmp2)
final <- cbind(tmp2, tmp3[,c(2:n.col)])
return (final)
}
# generates median value of `med_var` for each level of `group_var`
twoway_median <- function(df, group_var, med_var) {
group_var <- sym(group_var)
med_var <- sym(med_var)
df %>% filter(((!!group_var) != "(Missing)" &
!(is.na((!!group_var)))) &
((!!med_var) != "(Missing)" &
!(is.na((!!med_var))))) %>%
group_by((!!group_var)) %>%
summarize(median_income = survey_median((!!med_var))) -> tmp
return (tmp)
}
# applies function `fun.name` to demographic grouping variables `def_var` when only
# grouping by one variable at a time
apply_by_defs_one <- function(df, fun.name, dem.name, def.var) {
n <- length(def.var)
ls <- list()
FUN <- match.fun(fun.name)
for (i in 1:n) {
by_def <- FUN(df, def.var[i])
colnames(by_def)[1] <- dem.name
ls[[i]] <- by_def
}
tmp <- do.call("rbind", ls)
return(tmp)
}
# applies function `fun.name` to demographic grouping variables `def_var` when only
# grouping by one variable at a time
apply_by_defs_two <- function(df, fun.name, dem.name, def.var, group.var2) {
n <- length(def.var)
ls <- list()
FUN <- match.fun(fun.name)
for (i in 1:n) {
by_def <- FUN(df, def.var[i], group.var2)
colnames(by_def)[1] <- dem.name
ls[[i]] <- by_def
}
tmp <- do.call("rbind", ls)
return(tmp)
}
# applies function `fun.name` to demographic grouping variables `def_var` when only
# grouping by one variable at a time with a given criteria `criteria`
twoway_prop_criteria <- function(df, group_var1, group_var2, criteria) {
sym.group_var1 <- sym(group_var1)
sym.group_var2 <- sym(group_var2)
df %>% filter(eval(parse(text=criteria)) &
((!!sym.group_var1) != "(Missing)" &
!(is.na((!!sym.group_var1)))) &
((!!sym.group_var2) != "(Missing)" &
!(is.na((!!sym.group_var2))))) %>%
group_by((!!sym.group_var1), (!!sym.group_var2)) %>%
summarize(prop = survey_mean()) -> tmp
final <- dcast(tmp, eval(parse(text = group_var1)) ~
eval(parse(text = group_var2)), value.var = "prop")
return (final)
}
# applies function `fun.name` to demographic grouping variables `def_var` when only
# grouping by one variable at a time with specific criteria `criteria`
apply_by_defs_two_criteria <- function(df, fun.name, dem.name, def.var,
group.var2, criteria) {
n <- length(def.var)
ls <- list()
FUN <- match.fun(fun.name)
for (i in 1:n) {
by_def <- FUN(df, def.var[i], group.var2, criteria)
colnames(by_def)[1] <- dem.name
ls[[i]] <- by_def
}
tmp <- do.call("rbind", ls)
return(tmp)
}
# # applies function `totals_by_variable` to demographic grouping variables
# apply_by_definitions <- function(df, dem.name, def.var) {
# n <- length(def.var)
# ls <- list()
# for (i in 1:n) {
# by_def <- totals_by_variable(df, sym(def.var[i]))
# colnames(by_def)[1] <- dem.name
# ls[[i]] <- by_def
# }
# tmp <- do.call("rbind", ls)
# return(tmp)
# }
# totals_by_variable <- function(df, group_var) {
# group_var <- enquo(group_var)
# df %>% filter((!!group_var) != "(Missing)") %>%
# group_by(!!group_var) %>% summarize(n = survey_total()) -> tmp
# return (tmp)
# }
# race_household <- totals_by_variable(housing_weighted, HOUSEHOLDRACE)
# Setup and definitions ---------------------------------------------------
## Setting up Excel workbooks
excelfile <- createWorkbook()
excelfile_hud <- createWorkbook()
## Race definitions and labels
race.defs <- c("HOUSEHOLDRACE", "HHRACE3")
race.label <- "Household race"
span.defs <- c("HOUSEHOLDSPAN", "HHSPAN2")
span.label <- "Household span"
raceeth.defs <- c("HOUSEHOLDRACEETH", "RACEETH")
raceeth.label <- "Household raceeth"
# Sample-info sheets ------------------------------------------------------
## Sheet 1: sample-info-race ----------------------------------------------
tmp <- apply_by_defs_one(housing_weighted, "totals_by_variable",
race.label, race.defs)
addWorksheet(wb = excelfile, sheetName = "sample-info-race", gridLines = TRUE)
writeData(wb = excelfile, sheet = "sample-info-race",
x = tmp, startCol = 1, startRow = 1)
## Sheet 2: sample-info-span ----------------------------------------------
tmp <- apply_by_defs_one(housing_weighted, "totals_by_variable",
span.label, span.defs)
addWorksheet(wb = excelfile, sheetName = "sample-info-span", gridLines = TRUE)
writeData(wb = excelfile, sheet = "sample-info-span",
x = tmp, startCol = 1, startRow = 1)
# write.csv(tmp, "sample-info-span.csv")
## Sheet 3: sample-info-raceeth -------------------------------------------
tmp <- apply_by_defs_one(housing_weighted, "totals_by_variable",
raceeth.label, raceeth.defs)
addWorksheet(wb = excelfile, sheetName = "sample-info-raceeth", gridLines = TRUE)
writeData(wb = excelfile, sheet = "sample-info-raceeth",
x = tmp, startCol = 1, startRow = 1)
# write.csv(tmp, "sample-info-raceeth.csv")
# -----
# DELETE THIS
# write.csv(tmp, "sample-info-race.csv")
# race_household <- totals_by_variable(housing_weighted, sym("HOUSEHOLDRACE"))
# race_HH <- totals_by_variable(housing_weighted, HHRACE3)
# tmp <- list(race_household, race_HH)
# colnames(race_household)[1] <- colnames(race_HH)[1] <- "Household race"
# do.call("rbind", tmp)
# rbind(tmp)
# totals_by_variable <- function(df, group_var) {
# df %>% group_by_(.dots = lazyeval::lazy(group_var)) %>% summarize(n = survey_total()) -> tmp
# return (tmp)
# }
# Sheet 4: Analysis --------------------------------------------------------
# Columns:
## Racial proportions ------------------------------------------------------
race_prop <- apply_by_defs_one(housing_weighted, "prop_by_variable",
race.label, race.defs)
span_prop <- apply_by_defs_one(housing_weighted, "prop_by_variable",
span.label, span.defs)
## Geography (Census division) ---------------------------------------------
division.byrace <- apply_by_defs_two(housing_weighted, "twoway_prop",
race.label, race.defs, "DIVISION")
division.byspan <- apply_by_defs_two(housing_weighted, "twoway_prop",
span.label, span.defs, "DIVISION")
## Median income -----------------------------------------------------------
inc.byrace <- apply_by_defs_two(housing_weighted, "twoway_median",
race.label, race.defs, "HINCP")
inc.byspan <- apply_by_defs_two(housing_weighted, "twoway_median",
span.label, span.defs, "HINCP")
## Income as % of poverty level --------------------------------------------
pov.byrace <- apply_by_defs_two(housing_weighted, "twoway_median",
race.label, race.defs, "PERPOVLVL")
pov.byspan <- apply_by_defs_two(housing_weighted, "twoway_median",
span.label, span.defs, "PERPOVLVL")
## Tenure ------------------------------------------------------------------
tenure.byrace <- apply_by_defs_two(housing_weighted, "twoway_prop",
race.label, race.defs, "TENURE")
tenure.byspan <- apply_by_defs_two(housing_weighted, "twoway_prop",
span.label, span.defs, "TENURE")
## Housing assistance ------------------------------------------------------
### HUDSUB
hudsub.byrace <- apply_by_defs_two(housing_weighted, "twoway_prop",
race.label, race.defs, "HUDSUB")
hudsub.byspan <- apply_by_defs_two(housing_weighted, "twoway_prop",
span.label, span.defs, "HUDSUB")
### RENTCNTRL
rentcntrl.byrace <- apply_by_defs_two(housing_weighted, "twoway_prop",
race.label, race.defs, "RENTCNTRL")
rentcntrl.byspan <- apply_by_defs_two(housing_weighted, "twoway_prop",
span.label, span.defs, "RENTCNTRL")
## Housing delinquency -----------------------------------------------------
### DBMISSRENT
missrent.byrace <- apply_by_defs_two(housing_weighted, "twoway_prop",
race.label, race.defs, "DBMISSRENT")
missrent.byspan <- apply_by_defs_two(housing_weighted, "twoway_prop",
span.label, span.defs, "DBMISSRENT")
# Writing tables into Excel -----------------------------------------------
## by race
all_race_stats <- cbind(race_prop, inc.byrace[,-1], pov.byrace[,-1], tenure.byrace[,-1],
hudsub.byrace[,-1], rentcntrl.byrace[,-1], missrent.byrace[,-1],
division.byrace[,-1])
addWorksheet(wb = excelfile, sheetName = "analysis-byrace", gridLines = TRUE)
writeData(wb = excelfile, sheet = "analysis-byrace", x = all_race_stats,
startCol = 1, startRow = 1)
# write.csv(all_race_stats, "analysis-byrace.csv")
## by span eth
all_span_stats <- cbind(span_prop, inc.byspan[,-1], pov.byspan[,-1], tenure.byspan[,-1],
hudsub.byspan[,-1], rentcntrl.byspan[,-1], missrent.byspan[,-1],
division.byspan[,-1])
addWorksheet(wb = excelfile, sheetName = "analysis-byspan", gridLines = TRUE)
writeData(wb = excelfile, sheet = "analysis-byspan", x = all_span_stats,
startCol = 1, startRow = 1)
# write.csv(all_span_stats, "analysis-byspan.csv")
## write into Excel sheet
openxlsx::saveWorkbook(excelfile, "csv files/exploratory-analysis.xlsx", overwrite = TRUE)
# Federal housing assistance discrimination -------------------------------
# create qualifying criteria variable
# function that creates tables but without _se
## filter(HOUSEHOLDRACE & CRITERIA) %>% group_by()
criteria_80 <- "HUDINCLIM_80_FLAG == '1' & !(is.na(HUDINCLIM_80_FLAG))"
criteria_50 <- "HUDINCLIM_50_FLAG == '1' & !(is.na(HUDINCLIM_50_FLAG))"
criteria_30 <- "HUDINCLIM_30_FLAG == '1' & !(is.na(HUDINCLIM_30_FLAG))"
# Federal income limit proportions
## 80% income limit
prop.inclim80.byrace <- apply_by_defs_two(housing_weighted, "twoway_prop",
race.label, race.defs, "HUDINCLIM_80_FLAG")
prop.inclim80.byspan <- apply_by_defs_two(housing_weighted, "twoway_prop",
span.label, span.defs, "HUDINCLIM_80_FLAG")
colnames(prop.inclim80.byrace)[2:3] <-
colnames(prop.inclim80.byspan)[2:3] <- c(">80%", "<80%")
## 50% income limit
prop.inclim50.byrace <- apply_by_defs_two(housing_weighted, "twoway_prop",
race.label, race.defs, "HUDINCLIM_50_FLAG")
prop.inclim50.byspan <- apply_by_defs_two(housing_weighted, "twoway_prop",
span.label, span.defs, "HUDINCLIM_50_FLAG")
colnames(prop.inclim50.byrace)[2:3] <-
colnames(prop.inclim50.byspan)[2:3] <- c(">50%", "<50%")
## 30% income limit
prop.inclim30.byrace <-apply_by_defs_two(housing_weighted, "twoway_prop",
race.label, race.defs, "HUDINCLIM_30_FLAG")
prop.inclim30.byspan <-apply_by_defs_two(housing_weighted, "twoway_prop",
span.label, span.defs, "HUDINCLIM_30_FLAG")
colnames(prop.inclim30.byrace)[2:3] <-
colnames(prop.inclim30.byspan)[2:3] <- c(">30%", "<30%")
info.byrace <- cbind(prop.inclim80.byrace[,1:3],
prop.inclim50.byrace[,2:3],
prop.inclim30.byrace[,2:3])
info.byspan <- cbind(prop.inclim80.byspan[,1:3],
prop.inclim50.byspan[,2:3],
prop.inclim30.byspan[,2:3])
# Write to Excel sheet
addWorksheet(wb = excelfile_hud, sheetName = "sample-info-byrace", gridLines = TRUE)
writeData(wb = excelfile_hud, sheet = "sample-info-byrace",
x = info.byrace, startCol = 1, startRow = 1)
addWorksheet(wb = excelfile_hud, sheetName = "sample-info-byspan", gridLines = TRUE)
writeData(wb = excelfile_hud, sheet = "sample-info-byspan",
x = info.byspan, startCol = 1, startRow = 1)
# Analysis by income limits ------------------------------------------------
## 80% income limit
huddiscrim.byrace.80 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
race.label, race.defs, "HUDSUB", criteria_80)
huddiscrim.byspan.80 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
span.label, span.defs, "HUDSUB", criteria_80)
colnames(huddiscrim.byrace.80)[2:4] <- colnames(huddiscrim.byspan.80)[2:4] <-
paste(colnames(huddiscrim.byrace.80)[2:4], "_80", sep = "")
## 50% income limit
huddiscrim.byrace.50 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
race.label, race.defs, "HUDSUB", criteria_50)
huddiscrim.byspan.50 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
span.label, span.defs, "HUDSUB", criteria_50)
colnames(huddiscrim.byrace.50)[2:4] <- colnames(huddiscrim.byspan.50)[2:4] <-
paste(colnames(huddiscrim.byrace.50)[2:4], "_50", sep = "")
## 30% income limit
huddiscrim.byrace.30 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
race.label, race.defs, "HUDSUB", criteria_30)
huddiscrim.byspan.30 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
span.label, span.defs, "HUDSUB", criteria_30)
colnames(huddiscrim.byrace.30)[2:4] <- colnames(huddiscrim.byspan.30)[2:4] <-
paste(colnames(huddiscrim.byrace.30)[2:4], "_30", sep = "")
# Write to Excel sheet
huddiscrim.byrace <- cbind(huddiscrim.byrace.80, huddiscrim.byrace.50[,-1],
huddiscrim.byrace.30[,-1])
huddiscrim.byspan <- cbind(huddiscrim.byspan.80, huddiscrim.byspan.50[,-1],
huddiscrim.byspan.30[,-1])
addWorksheet(wb = excelfile_hud, sheetName = "huddiscrim-byrace", gridLines = TRUE)
writeData(wb = excelfile_hud, sheet = "huddiscrim-byrace",
x = huddiscrim.byrace, startCol = 1, startRow = 1)
addWorksheet(wb = excelfile_hud, sheetName = "huddiscrim-byspan", gridLines = TRUE)
writeData(wb = excelfile_hud, sheet = "huddiscrim-byspan",
x = huddiscrim.byspan, startCol = 1, startRow = 1)
# Analysis by miss rent ---------------------------------------------------
criteria_MR_80 <- "DBMISSRENT == 'Missed rent' & HUDINCLIM_80_FLAG == '1' & !(is.na(HUDINCLIM_80_FLAG))"
criteria_MR_50 <- "DBMISSRENT == 'Missed rent' & HUDINCLIM_50_FLAG == '1' & !(is.na(HUDINCLIM_50_FLAG))"
criteria_MR_30 <- "DBMISSRENT == 'Missed rent' & HUDINCLIM_30_FLAG == '1' & !(is.na(HUDINCLIM_30_FLAG))"
## 80% income limit
mr.byrace.80 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
race.label, race.defs, "HUDSUB", criteria_MR_80)
mr.byspan.80 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
span.label, span.defs, "HUDSUB", criteria_MR_80)
colnames(mr.byrace.80)[2:4] <- colnames(mr.byspan.80)[2:4] <-
paste(colnames(mr.byrace.80)[2:4], "_80", sep = "")
## 50% income limit
mr.byrace.50 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
race.label, race.defs, "HUDSUB", criteria_50)
mr.byspan.50 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
span.label, span.defs, "HUDSUB", criteria_50)
colnames(mr.byrace.50)[2:4] <- colnames(mr.byspan.50)[2:4] <-
paste(colnames(mr.byrace.50)[2:4], "_50", sep = "")
## 30% income limit
mr.byrace.30 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
race.label, race.defs, "HUDSUB", criteria_30)
mr.byspan.30 <- apply_by_defs_two_criteria(housing_weighted, "twoway_prop_criteria",
span.label, span.defs, "HUDSUB", criteria_30)
colnames(mr.byrace.30)[2:4] <- colnames(mr.byspan.30)[2:4] <-
paste(colnames(mr.byrace.30)[2:4], "_30", sep = "")
# Write to Excel sheet
missrent.byrace <- cbind(mr.byrace.80, mr.byrace.50[,-1], mr.byrace.30[,-1])
missrent.byspan <- cbind(mr.byspan.80, mr.byspan.50[,-1], mr.byspan.30[,-1])
addWorksheet(wb = excelfile_hud, sheetName = "missrent-byrace", gridLines = TRUE)
writeData(wb = excelfile_hud, sheet = "missrent-byrace",
x = missrent.byrace, startCol = 1, startRow = 1)
addWorksheet(wb = excelfile_hud, sheetName = "missrent-byspan", gridLines = TRUE)
writeData(wb = excelfile_hud, sheet = "missrent-byspan",
x = missrent.byspan, startCol = 1, startRow = 1)
## write into Excel
openxlsx::saveWorkbook(excelfile_hud, "csv files/hud-analysis.xlsx", overwrite = TRUE)
summary(tmp$DIVISION)
tmp$NAME <- as.factor(tmp$NAME)
summary(tmp$NAME)
summary(tmp$inclim_30)
# match and fill in table
# have HUDINCLIM_30 and HUDINCLIM_80 flags
# same cross references
|
5f63f64b6618330e6c629c22664a215f7baaa420
|
ef6622052965084d42588ee7b9c75d029e54392f
|
/man/soccerPitchBG.Rd
|
a019d72caece9685be25dbe4cacee596b6db0102
|
[] |
no_license
|
cRistiancec/soccermatics
|
a1f10a8602b003feef44f62eba9d423e26b99b12
|
184e2072c45359c374d9aac9580263e7237aa4b9
|
refs/heads/master
| 2020-04-04T22:18:31.005372
| 2018-11-04T18:14:06
| 2018-11-04T18:14:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,338
|
rd
|
soccerPitchBG.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/soccerPitchBG.R
\name{soccerPitchBG}
\alias{soccerPitchBG}
\title{Plot a soccer pitch ggplot object}
\usage{
soccerPitchBG(lengthPitch = 105, widthPitch = 68, fillPitch = "white",
colPitch = "grey60", grass = FALSE, arrow = c("none", "r", "l"),
arrow_col = "default", lwd = 0.5, title = NULL, subtitle = NULL)
}
\arguments{
\item{lengthPitch, widthPitch}{length and width of pitch in metres}
\item{fillPitch, colPitch}{pitch fill and line colour}
\item{grass}{if \code{TRUE}, uses a more realistic pitch}
\item{arrow}{optional, adds arrow showing team attack direction as right (\code{'r'}) or left (\code{'l'})}
\item{arrow_col}{colour of attack direction arrow}
\item{lwd}{numeric, pitch line width}
\item{title, subtitle}{optional, adds title and subtitle to plot}
}
\value{
a ggplot object
}
\description{
Draws a soccer pitch as a ggplot object for the purpose of adding layers such as player positions, player trajectories, etc..
}
\examples{
# get x,y-coords of player #8 during first 10 minutes
data(tromso)
dd <- subset(tromso, id == 9)[1:1200,]
# draw player path on pitch
soccerPitchBG(grass = TRUE) +
geom_path(data = dd, aes(x, y))
}
\seealso{
\code{\link{soccerPitchFG}} for adding soccer pitch lines to an existing ggplot object
}
|
88be5e054424989755756c8d1874f46303f66482
|
08481da2b6d3690aa157a161f9df284c802a5177
|
/R/create_pamdata.R
|
f2ad33cbf629b4bd6e206f442f6c7d598eb39721
|
[
"MIT"
] |
permissive
|
brgordon17/coralclass
|
605dfedaaaf48dfd4ad589b6aaf3c7d0bfc44603
|
18de22b48a3bf0cff99c2c82bb206d92d5a53058
|
refs/heads/master
| 2020-06-24T11:01:51.980043
| 2020-06-15T11:02:49
| 2020-06-15T11:02:49
| 198,944,888
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,318
|
r
|
create_pamdata.R
|
#' Create pamdata.
#'
#' \code{create_pamdata()} reproduces the mean FvFm data.
#'
#' This function reproduces the PAM data that was collected in this experiment.
#'
#' @param path The path where the .csv is located
#' @param saverda Logical indicating if a .rda file should be saved to /data
#'
#' @return Returns a dataframe of class tbl_df
#'
#' @author Benjamin R. Gordon
#'
#' @seealso
#' \code{\link[tibble]{tibble}}
#'
#' @export
#'
create_pamdata <- function(path = "./data-raw/pamdata.csv",
saverda = TRUE) {
# read and sort data
pamdata <- readr::read_csv(path, na = "0")
pamdata <- dplyr::select(pamdata, day, class, FvFm)
pamdata$class <- factor(pamdata$class,
levels = c("control", "eCO2", "eT", "eCO2eT"))
# write data
if(saverda) {
save(pamdata, file = "./data/pamdata.rda", compress = "bzip2")
}
pamdata
}
## Data documentation ----------------------------------------------------------
#' Mean daily PAM data
#'
#' A dataset containing the mean PAM measurement for each class on days recorded
#'
#' @format A tibble with 28 rows and 3 variables:
#' \describe{
#' \item{day}{day of measurement}
#' \item{FvFm}{Photosynthetic yield}
#' \item{class}{treatment class label}
#' }
#' @source Benjamin R. Gordon
"pamdata"
|
638f5598b70568894e9e55ee7e536a3e4ab0a2f9
|
1ed87c596958af5205fe6efe481d97f456e1fae6
|
/rExamples/customization/charis.R
|
863e8dd909016cc7ee0b41e28bf95390c90b6d0c
|
[] |
no_license
|
aaronxhill/dataviz14f
|
1530a3d16803c3e49d0f940dde687da6ebe3b6f5
|
290187d53b1e88bcf255c23dd2ba8e3af7294ea2
|
refs/heads/master
| 2020-03-30T19:02:34.451718
| 2014-11-15T00:33:58
| 2014-11-15T00:33:58
| 23,426,895
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,674
|
r
|
charis.R
|
library(ggplot2)
library(grid)
fpath <- "/Users/aaron/classes/dataviz14f/rExamples/customization"
##### Charis #####
fname <- "charis.png"
# original code:
ggplot(iris) +
geom_point(aes(x=Sepal.Length, y=Sepal.Width, shape=Species), color = "DarkGoldenrod1", alpha=.4) +
geom_point(aes(x=Petal.Length, y=Petal.Width, shape=Species), color = "SlateBlue2", alpha=.4) +
theme(legend.position="none") +
facet_grid(. ~ Species) +
coord_fixed() +
scale_shape_manual(values=c(15, 16, 17)) +
xlab("Length in cm") + ylab("Width in cm") +
ggtitle("Height to Width Ratios \nin Iris Petals and Sepals Divided by Species")
ggplot(iris) +
geom_point(aes(x=Sepal.Length, y=Sepal.Width, shape=Species), color = "DarkGoldenrod1", alpha=.4) +
geom_point(aes(x=Petal.Length, y=Petal.Width, shape=Species), color = "SlateBlue2", alpha=.4) +
coord_fixed() +
scale_shape_manual(values=c(15, 16, 17)) +
theme(legend.position="none") +
xlab("Length in cm") +
ylab("Width in cm")
ggtitle("Height to Width Ratios in Iris Petals and Sepals")
## suggested modifications
a <- ggplot(iris) +
geom_point(aes(x=Sepal.Length, y=Sepal.Width, shape=Species), color = "DarkGoldenrod1", alpha=.4) +
geom_point(aes(x=Petal.Length, y=Petal.Width, shape=Species), color = "SlateBlue2", alpha=.4) +
theme(legend.position="none") +
facet_grid(. ~ Species) +
xlab("Length in cm") +
ylab("Width in cm") +
scale_y_continuous(expand = c(0,0)) +
ggtitle("Height to Width Ratios in Iris Petals and Sepals Divided by Species")
b <- ggplot(iris) +
geom_point(aes(x=Sepal.Length, y=Sepal.Width, shape=Species), color = "DarkGoldenrod1", alpha=.4) +
geom_point(aes(x=Petal.Length, y=Petal.Width, shape=Species), color = "SlateBlue2", alpha=.4) +
theme(legend.position="none") +
xlab("Length in cm") +
ylab("Width in cm") +
scale_y_continuous(expand = c(0,0)) +
ggtitle("Height to Width Ratios in Iris Petals and Sepals")
grid.show.layout(grid.layout(3, 3, widths = unit(c(1, 1, 1), c("null", "null", "null")),
heights = c(3, 1, 1), c("lines", "null", "null")))
png(fname, width=1200, height=1200, res=144)
pushViewport(viewport(layout=grid.layout(3, 3, widths = unit(c(1, 1, 1), c("null", "null", "null")),
heights = c(3, 1, 1), c("lines", "null", "null"))))
vplayout <- function(x,y)
viewport(layout.pos.row = x, layout.pos.col = y)
print(b, vp=vplayout(2, 1:2))
print(a, vp=vplayout(3, 1:3))
grid.text(label="Trends in Height to Width Ratios in Iris Parts", x=0.5, y=0.95, gp=gpar(fontsize=20, fontface="bold"))
grid.text("Species", x = 0.75, y=0.85, just="left", gp = gpar(fontsize=12, fontface="bold"))
grid.text("Setosa", x = 0.8, y=0.81, just="left", gp = gpar(fontsize=10))
grid.text("Versicolor", x = 0.8, y=0.76, just="left", gp = gpar(fontsize=10))
grid.text("Virginica", x = 0.8, y=0.71, just="left", gp = gpar(fontsize=10))
grid.text("Iris Part", x = 0.75, y=0.65, just="left", gp = gpar(fontsize=12, fontface="bold"))
grid.text("Sepal", x = 0.8, y=0.61, just="left", gp = gpar(fontsize=10))
grid.text("Petal", x = 0.8, y=0.56, just="left", gp = gpar(fontsize=10))
# draw the shapes for the legend
# x and y are the coordinates for drawing a triangle, referenced in "grid.path"
grid.rect(x = 0.78, y=0.81, width=0.01, height=0.01, gp=gpar(fill="black"))
grid.circle(x=0.78, y=0.76, r=0.005, gp=gpar(fill="black"))
x <- c(.775, .780, .785)
y <- c(.705, .715, .705)
grid.path(x, y, gp=gpar(fill="black"))
grid.rect(x = 0.78, y=0.61, width=0.01, height=0.01, gp=gpar(col="white", fill="DarkGoldenrod1"))
grid.rect(x = 0.78, y=0.56, width=0.01, height=0.01, gp=gpar(col="white", fill="SlateBlue2"))
dev.off()
|
99f6dc680c73e7042b37f61dbbe481e7db7323b8
|
67a6f1af8a7e28e3e64f123ce48fff3017364094
|
/man/writeEnrichment.Rd
|
6ca72a127cda5047eaef1e679d019715f248917c
|
[] |
no_license
|
tastanlab/NoRCE
|
3257f0af8da9cbff2152313edc9df9e3b2b4dd1c
|
e8779cec9bdece0e71a8f85389259a8d1714465f
|
refs/heads/master
| 2020-05-28T14:17:11.643250
| 2019-05-26T12:11:24
| 2019-05-26T12:11:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,196
|
rd
|
writeEnrichment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{writeEnrichment}
\alias{writeEnrichment}
\title{Write the tabular form of the pathway or GO term enrichment results}
\usage{
writeEnrichment(mrnaObject, fileName, sept = "\\t", type = "pAdjust",
n)
}
\arguments{
\item{mrnaObject}{Object of the enrichment result}
\item{fileName}{File name of the txt file}
\item{sept}{File separator, by default, it is tab('\\t')}
\item{type}{Draw the dot plot according to the p-value or adjusted p-value ("pvalue", "pAdjust"). Default value is "pAdjust".}
\item{n}{Number of GO terms or pathways, that ordered by type and has least number of top p-value}
}
\value{
Text file of the enrichment results in a tabular format
}
\description{
Write the tabular form of the pathway or GO term enrichment results
}
\examples{
data(brain_disorder_ncRNA)
ncGO <- geneGOEnricher(gene = brain_disorder_ncRNA, hg='hg19', near=TRUE, genetype = 'Ensembl_gene', pAdjust = "none")
writeEnrichment(mrnaObject=ncGO, fileName = "test.txt")
writeEnrichment(mrnaObject=ncGO, fileName = "test.txt", n=4)
writeEnrichment(mrnaObject=ncGO,fileName = "test.txt", type = "pvalue",n=4)
}
|
f56cf4db666b4555f1d89397df8eed1ffa6f766f
|
12ab6551a0f4088005d556799ae5dea6e1b9c596
|
/simulations/simulation_vre.R
|
dbae066eb4fdcb07ba4efedd4d0f4a215b9d1b2c
|
[] |
no_license
|
jl3859/causal_mlm
|
f0a25c1a7663bd15022876de7aa5bc2b5226df70
|
64b7da1df9efcf1de56e4816c0c900bfac7fceb2
|
refs/heads/master
| 2020-09-19T13:30:22.046712
| 2020-01-06T22:15:18
| 2020-01-06T22:15:18
| 224,231,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,006
|
r
|
simulation_vre.R
|
library(lfe)
library(lme4)
library(lmerTest)
# Source data generation process functions
source("dgp_script.R")
# Set iteration number for simulations
iter <- 1000
# Set seed for reproducibility
set.seed(2123)
# RANDOM EFFECTS VIOLATION SIMULATION #################################################################################
## call random effects dgp and store data frame
re_dat <- re_dat_function(40, 25)
## store SATE and generate data frame for models
SATE_re <- mean(re_dat$Y1) - mean(re_dat$Y0)
model_stu_re <- re_dat %>% select(Y_stud, Z_stud, yearstea, teach.edu, avgtest, minority, parent.edu,
fam.income, freelunch, dist.school.hour, gender, pretest, classid)
#### Initial Run ######################################################################################################
# Random Effects Violation Linear Regression
lr.re <- lm(Y_stud ~., data = model_stu_re[,-13])
# Random Effects Violation - Linear Regression Model Bias
(lr.re.bias <- lr.re$coefficients[2] - SATE_re)
# Random Effects Violation w/ Fixed Effects
fixed.re <- lfe::felm(model_stu_re$Y_stud ~ model_stu_re$Z_stud + model_stu_re$minority + model_stu_re$parent.edu +
model_stu_re$fam.income + model_stu_re$freelunch + model_stu_re$dist.school.hour +
model_stu_re$gender + model_stu_re$pretest | model_stu_re$classid)
#Random Effects Violation - Fixed Effects Model Bias
(fe.re.bias <- fixed.re$coefficients[1] - SATE_re)
#Random Effects Violation w/ Random Effects
random.re <- lmerTest::lmer(Y_stud ~ Z_stud + minority + parent.edu + fam.income + freelunch + dist.school.hour +
gender + pretest + yearstea + avgtest + teach.edu + (1 | classid), data = model_stu_re)
#Random Effects Violation - Random Effects Model Bias
(re.re.bias <- summary(random.re)$coefficients[2,1] - SATE_re)
#### REV Randomization Distribution ###################################################################################
# initialize results data frame
vre_rd_sim <- data.frame(type = rep(NA, iter*3),
coef = rep(NA, iter*3),
conf_int_low = rep(NA, iter*3),
conf_int_high = rep(NA, iter*3))
# initialize count number for results data frame rows
count <- 1
for(i in 1:iter){
# Randomization Distribution - randomize treatment
base_data <- re_dat
N <- nrow(base_data)
X_stud <- rnorm(N, 0, 1)
prob_stud <- inv.logit((X_stud/max(abs(X_stud))) * log(19))
Z_stud <- rbinom(N, 1, prob = prob_stud)
base_data$Z_stud <- Z_stud
base_data$Y_stud <- ifelse(Z_stud == 1, base_data$Y1, base_data$Y0)
base_data_model <- base_data %>% select(Y_stud, Z_stud, yearstea, teach.edu, avgtest, minority, parent.edu,
fam.income, freelunch, dist.school.hour, gender, pretest, classid)
# Linear Regression Sim
j <- count
vre_rd_sim[j,1] <- "lr"
lr_vre_sim <- lm(Y_stud ~., data = base_data_model[,-13])
vre_rd_sim[j,2] <- lr_vre_sim$coefficients[2]
vre_rd_sim[j,3] <- confint(lr_vre_sim, 'Z_stud', level = .95)[1,1]
vre_rd_sim[j,4] <- confint(lr_vre_sim, 'Z_stud', level = .95)[1,2]
# Fixed Effect Sim
j <- count+1
vre_rd_sim[j,1] <- "fixed"
fixed_vre_sim <- lfe::felm(base_data_model$Y_stud ~ base_data_model$Z_stud + base_data_model$minority +
base_data_model$parent.edu + base_data_model$fam.income + base_data_model$freelunch +
base_data_model$dist.school.hour + base_data_model$gender + base_data_model$pretest |
base_data_model$classid)
vre_rd_sim[j,2] <- fixed_vre_sim$coefficients[1]
vre_rd_sim[j,3] <- confint(fixed_vre_sim, 'base_data_model$Z_stud', level = .95)[1,1]
vre_rd_sim[j,4] <- confint(fixed_vre_sim, 'base_data_model$Z_stud', level = .95)[1,2]
# Random Effect Sim
j <- count+2
vre_rd_sim[j,1] <- "random"
random_vre_sim <- lmerTest::lmer(Y_stud ~ Z_stud + minority + parent.edu + fam.income + freelunch + dist.school.hour +
gender + pretest + (1 | classid), data = base_data_model)
vre_rd_sim[j,2] <- summary(random_vre_sim)$coefficients[2,1]
vre_rd_sim[j,3] <- confint(random_vre_sim, 'Z_stud', level = .95)[1,1]
vre_rd_sim[j,4] <- confint(random_vre_sim, 'Z_stud', level = .95)[1,2]
count <- j+1
print(i)
}
# add random effects violation SATE to results data frame
vre_rd_sim$SATE <- rep(SATE_re, nrow(vre_rd_sim))
vre_rd_sim$bias <- vre_rd_sim$coef - vre_rd_sim$SATE
vre_lr_rd_sim <- vre_rd_sim %>% filter(type == "lr")
vre_fixed_rd_sim <- vre_rd_sim %>% filter(type == "fixed")
vre_random_rd_sim <- vre_rd_sim %>% filter(type == "random")
write_csv(vre_lr_rd_sim, "output/vre_lr_rd_sim.csv")
write_csv(vre_fixed_rd_sim, "output/vre_fixed_rd_sim.csv")
write_csv(vre_random_rd_sim, "output/vre_random_rd_sim.csv")
write_csv(vre_rd_sim, "output/vre_full_rd_sim.csv")
#### REV Sampling Distribution ########################################################################################
# initialize results data frame
vre_sd_sim <- data.frame(type = rep(NA, iter*3),
coef = rep(NA, iter*3),
conf_int_low = rep(NA, iter*3),
conf_int_high = rep(NA, iter*3),
SATE = rep(NA, iter*3))
# initialize count number for results data frame rows
count <- 1
for(i in 1:iter){
#Sampling Distribution - resample data
base_data <- re_dat_function(40, 25)
SATE_sim <- mean(base_data$Y1) - mean(base_data$Y0)
base_data_model <- base_data %>% select(Y_stud, Z_stud, yearstea, teach.edu, avgtest, minority, parent.edu,
fam.income, freelunch, dist.school.hour, gender, pretest, classid)
#Linear Regression Sim
j <- count
vre_sd_sim[j,1] <- "lr"
lr_vre_sim <- lm(Y_stud ~., data = base_data_model[,-13])
vre_sd_sim[j,2] <- lr_vre_sim$coefficients[2]
vre_sd_sim[j,3] <- confint(lr_vre_sim, 'Z_stud', level = .95)[1,1]
vre_sd_sim[j,4] <- confint(lr_vre_sim, 'Z_stud', level = .95)[1,2]
vre_sd_sim[j,5] <- SATE_sim
#Fixed Effect Sim
j <- count+1
vre_sd_sim[j,1] <- "fixed"
fixed_vre_sim <- lfe::felm(base_data_model$Y_stud ~ base_data_model$Z_stud + base_data_model$minority +
base_data_model$parent.edu + base_data_model$fam.income + base_data_model$freelunch +
base_data_model$dist.school.hour + base_data_model$gender + base_data_model$pretest |
base_data_model$classid)
vre_sd_sim[j,2] <- fixed_vre_sim$coefficients[1]
vre_sd_sim[j,3] <- confint(fixed_vre_sim, 'base_data_model$Z_stud', level = .95)[1,1]
vre_sd_sim[j,4] <- confint(fixed_vre_sim, 'base_data_model$Z_stud', level = .95)[1,2]
vre_sd_sim[j,5] <- SATE_sim
#Random Effect Sim
j <- count+2
vre_sd_sim[j,1] <- "random"
random_vre_sim <- lmerTest::lmer(Y_stud ~ Z_stud + minority + parent.edu + fam.income + freelunch + dist.school.hour +
gender + pretest + (1 | classid), data = base_data_model)
vre_sd_sim[j,2] <- summary(random_vre_sim)$coefficients[2,1]
vre_sd_sim[j,3] <- confint(random_vre_sim, 'Z_stud', level = .95)[1,1]
vre_sd_sim[j,4] <- confint(random_vre_sim, 'Z_stud', level = .95)[1,2]
vre_sd_sim[j,5] <- SATE_sim
count <- j+1
print(i)
}
vre_sd_sim$bias <- vre_sd_sim$coef - vre_sd_sim$SATE
# Create separate dataframes for the different models
vre_lr_sd_sim <- vre_sd_sim %>% filter(type == "lr")
vre_fixed_sd_sim <- vre_sd_sim %>% filter(type == "fixed")
vre_random_sd_sim <- vre_sd_sim %>% filter(type == "random")
write_csv(vre_lr_sd_sim, "output/vre_lr_d_sim.csv")
write_csv(vre_fixed_sd_sim, "output/vre_fixed_sd_sim.csv")
write_csv(vre_random_sd_sim, "output/vre_random_d_sim.csv")
write_csv(vre_sd_sim, "output/vre_full_sd_sim.csv")
|
f8cb3fb1448cfafda15fba8719025791fe95ab15
|
91f62e042ef580971bf2d17f8817ac12bda51df5
|
/feb 3 lab2-first part.R
|
76f95f6747a3642dcb46064a37fa747986abfcc2
|
[] |
no_license
|
Yuewangluisa/DataAnalyticsSpring2020
|
ab5a08e3ce194203df292477bfc842a73fe61bd4
|
06e817d3497074e86088ef4d401f515f64ea1e3b
|
refs/heads/master
| 2020-12-20T01:54:12.194770
| 2020-04-21T20:41:10
| 2020-04-21T20:41:10
| 235,923,843
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,689
|
r
|
feb 3 lab2-first part.R
|
multivariate <-read.csv(file.choose(),header=TRUE)
attach(multivariate)
names(multivariate)
multivariate
plot(Income,Immigrant,main='Scatterplot')
plot(Immigrant,Homeowners)
help(lm)
mm<-lm(Homeowners~Immigrant)
mm
plot(Immigrant,Homeowners)
abline(mm)
abline(mm,col=2,lwd=3)
summary(mm)
attributes(mm)
mm$coefficients
HP<- Homeowners/Population
PD<-Population/area
mm<-lm(Immigrant~Income+Population+HP+PD)
summary(mm)
cm<-coef(mm)
cm
library(dplyr)
#method 1
df_mtcars <- mtcars
head(df_mtcars)
filter(df_mtcars, mpg > 20)
sample_n(filter(df_mtcars, mpg > 20), 10)
arrange( sample_n(filter(df_mtcars, mpg >20), 10) ,desc(mpg))
results_mpg <- arrange( sample_n(filter(df_mtcars, mpg >20), 10) ,desc(mpg))
results_mpg
#method 2
a1<-filter(df_mtcars, mpg > 20)
a2<-sample_n(df_mtcars,5)
results_mpg_des<-arrange(a2,desc(mpg))
results_mpg_des
#method 3
df_mtcars%>%filter(mpg>20)%>%sample_n(10)%>%arrange(desc(mpg))
results<-df_mtcars%>%filter(mpg>20)%>%sample_n(10)%>%arrange(desc(mpg))
results
install.packages('gcookbook')
library(gcookbook)
library(ggplot2)
ggplot(pg_mean, aes(x=group, y=weight)) + geom_bar(stat = "identity")
BOD
# Time is numeric (continuous)
str(BOD)
ggplot(BOD, aes(x=Time, y=demand)) + geom_bar(stat = "identity")
# Convert Time to a discrete (categorical) variable with factor() function.
ggplot(BOD, aes(x=factor(Time), y=demand)) + geom_bar(stat = "identity")
# change the color of the bars and add an outline to the bars
# NOTE: In ggplot2, the default is to use the British spelling, colour, instead of
# the American spelling, color.
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat = "identity", fill="lightblue", colour = "red")
ggplot(BOD, aes(x=factor(Time), y=demand)) +geom_bar(stat = "identity", fill="orange", colour = "red")
# Grouping Bars Together
# You want to group bars together by a second variable.
# In this example we’ll use the cabbage_exp data set, which has two categorical variables,
# Cultivar and Date, and one continuous variable, Weight:
library(gcookbook) # For the data set
library(ggplot2)
cabbage_exp
# We’ll map Date to the x position and map Cultivar to the fill color
# ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(position = "dodge")
ggplot(cabbage_exp, aes(x=Date, fill=Cultivar)) + geom_bar(position = "dodge")
library(gcookbook) # For the data set
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) +
geom_bar(stat="identity")
# Making Bar Graph of Counts
ggplot(diamonds, aes(x=cut)) +geom_bar() # this is equvalent to using geom_bar(stat="bin)
# The diamonds data set has 53,940 rows, each of which represents information about one
data("diamonds")
diamonds
# In this example, the variable on the x-axis is discrete. If we use a continuous variable on
# the x-axis, we’ll get a histogram
ggplot(diamonds,aes(x=carat)) + geom_bar()
# It turns out that in this case, the result is the same as if we had used geom_histogram() instead of geom_bar()
ggplot(diamonds, aes(x=carat)) + geom_histogram()
# Using Colors in Bar Graphs. Now we want to use different colors for the bars in our bar graph
# We can do this by using the "fill" asethetic.
# We’ll use the uspopchange data set for this example. It contains the percentage change
# in population for the US states from 2000 to 2010. We’ll take the top 10 fastest-growing
# states and graph their percentage change.
# We’ll also color the bars by region (Northeast,South, North Central, or West)
# Taking Top 10 States
library(gcookbook) # for the dataset
ups <- subset(uspopchange, rank(Change)>40)
ups
# Now we can make the graph, mapping Region to fill
ggplot(ups, aes(x=Abb, y= Change, fill=Region)) + geom_bar(stat = "identity")
# Do an Experiment with followings ... :=)
ggplot(ups, aes(x=Abb, y=Change, fill=Region)) +geom_bin2d()
ggplot(ups, aes(x=Abb, y=Change, fill=Region)) + geom_col()
# The default colors aren’t very appealing, so you may want to set them, using
# scale_fill_brewer() or scale_fill_manual().
ggplot(ups, aes(x=reorder(Abb,Change), y=Change, fill=Region)) + geom_bar(stat = "identity", colour= "red") +
scale_fill_manual(values=c("#669933", "#FFCC66")) + xlab("US-States")
ggplot(ups, aes(x=reorder(Abb,Change), y=Change, fill=Region)) + geom_bar(stat = "identity", color = "purple") +
scale_fill_manual(values=c("#224455","#DDCC33"))
# Coloring Negative and Positive Bars Differently
# You want to use different colors for negative and positive-valued bars.
library(gcookbook)
csub <- subset(climate, source="Berkeley" & Year >= 1900)
csub
csub$pos <- csub$Anomaly10y >=0
csub
ggplot(csub, aes(x=Year, y=Anomaly10y, fill= pos)) + geom_bar(stat = "identity", position = "identity")
# changing the color with scale_fill_manual
ggplot(csub, aes(x=Year, y=Anomaly10y, fill=pos)) + geom_bar(stat="identity", colour="black", size=0.25) +
scale_fill_manual(values=c("#CCEEFF", "#FFDDDD"), guide=FALSE)
# Adjusting Bar Width and Spacing
# You want to adjust the width of bars and the spacing between them.
# To make the bars narrower or wider, set width in geom_bar(). The default value is 0.9;
# larger values make the bars wider, and smaller values make the bars narrower
library(gcookbook) # for the datset
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat="identity")
# Narrow Bars
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat="identity", width = 0.5)
# Wider bars, maximum width = 1
ggplot(pg_mean, aes(x=group, y=weight)) +geom_bar(stat = "identity", width = 0.95)
# Different bar widths
ggplot(cabbage_exp, aes(x=Date, y= Weight, fill=Cultivar)) + geom_bar(stat = "identity", width = 0.5, position = "dodge")
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(stat = "identity", width = 0.5, position = position_dodge(0.7))
# Making a Sketched Bar Graph
library(gcookbook) # for the dataset
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) + geom_bar(stat = "identity")
cabbage_exp
ggplot(cabbage_exp, aes(x= Date, y= Weight, fill=Cultivar)) + geom_bar(stat = "identity") + guides(fill=guide_legend(reverse = TRUE))
# Adding Lables to your Graphs
library(gcookbook) # For the data set
ggplot(cabbage_exp, aes(x=interaction(Date,Cultivar), y=Weight)) +geom_bar(stat = "identity") + geom_text(aes(label=Weight),vjust=1.5,colour="white")
library(ggplot2)
# Adjust y limits to be a little higher
ggplot(cabbage_exp, aes(x=interaction(Date, Cultivar), y=Weight)) +
geom_bar(stat="identity") +
geom_text(aes(label=Weight), vjust=-0.2) +
ylim(0, max(cabbage_exp$Weight) * 1.05)
# Map y positions slightly above bar top - y range of plot will auto-adjust
ggplot(cabbage_exp, aes(x=interaction(Date, Cultivar), y=Weight)) +
geom_bar(stat="identity") +
geom_text(aes(y=Weight+0.1, label=Weight))
ggplot(cabbage_exp, aes(x=Date, y=Weight, fill=Cultivar)) +
geom_bar(stat="identity", position="dodge") +
geom_text(aes(label=Weight), vjust=1.5, colour="white",
position=position_dodge(.9), size=3)
# make a Cleveland dot plot
#The simplest way to create a dot plot is to use geom_point() function
library(gcookbook) # For the data set
tophit <- tophitters2001[1:25,] # take top 25 top hitters
tophit
ggplot(tophit, aes(x=avg, y=name)) + geom_point()
tophit[,c("name","lg","avg")]
ggplot(tophit, aes(x=avg, y= reorder(name,avg))) + geom_point(size=3, colour="red") +
theme_bw() +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour ="grey60",linetype="dashed"))
ggplot(tophit, aes(x=avg, y=reorder(name,avg))) + geom_point(size=2.5, colour="blue") +
theme_classic() +
theme(panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.major.y = element_line(colour = "grey60", linetype = twodash))
# Get the names sorted by lg, then by avg
nameorder <- tophit$name[order(tophit$lg, tophit$avg)]
# Turn the name into factor, with levels in the order of nameorder
tophit$name <- factor(tophit$name, levels = nameorder)
ggplot(tophit, aes(x=avg, y=name)) +
geom_segment(aes(yend=name), xend=0, colour="grey70")+
geom_point(size=3, aes(colour=lg)) +
scale_color_brewer(palette="Set1", limits=c("NL","AL")) +
theme_bw() +
theme(panel.grid.major.y = element_blank(),
legend.position = c(1,0.55),
legend.justification = c(1,0.5))
ggplot(tophit, aes(x=avg, y=name)) +
geom_segment(aes(yend=name), xend=0, colour="grey40") +
geom_point(size=3, aes(colour=lg)) +
scale_color_brewer(palette="Set1", limits=c("NL","AL"), guide=FALSE) +
theme_bw() +
theme(panel.grid.major.y = element_blank()) +
facet_grid(lg ~ ., scales = "free_y", space="free_y")
|
26658946f1b91d4c58716d656934aedf627966f7
|
95a2abbc422cf8d569c2e4d8098df052e053ebd6
|
/R/occurrence_images.R
|
4e9b827d4a69421488272e329ae26ca375818cc7
|
[] |
no_license
|
AtlasOfLivingAustralia/ALA4R
|
215b3cfd7fefc5af360f15ad8d8573b7c993da27
|
d048d7ecb22a16932d9474278957ba45a8763e0d
|
refs/heads/master
| 2021-09-29T18:19:06.842268
| 2021-09-13T05:35:10
| 2021-09-13T05:35:10
| 24,315,261
| 39
| 13
| null | 2021-03-30T01:58:19
| 2014-09-22T05:20:49
|
R
|
UTF-8
|
R
| false
| false
| 3,676
|
r
|
occurrence_images.R
|
#' Find images using occurrence ids
#'
#' @references \itemize{
#' \item Associated ALA web service for image search counts:
#' \url{https://images.ala.org.au/ws#/Search/search}
#' }
#' @param occ_id character: IDs of occurrences as single sring or vector of
#' strings
#' @param fq string: (optional) character string or vector of strings,
#' specifying filters to be applied to the original query. These are of the
#' form "INDEXEDFIELD:VALUE" e.g. "kingdom:Fungi".
#' @param download logical: if TRUE download all images and add location to
#' dataframe
#' @param download_path string: (optional) filepath to download images to.
#' If not given and download param is TRUE, will create an images
#' folder
#' @param sounds logical (optional) Image search also returns sound files.
#' Ignored unless explicitly requested.
#' @param verbose logical: show additional progress information?
#' [default is set by ala_config()]
#' @return Data frame of image results
#'
#' @examples
#' \dontrun{
#' ## Download all images for an occurrence with a CC BY-NC 4.0 licence
#' occurrence_image_search(id = "d201f3e0-3e1d-47f1-94ce-9fc226cbc5ec",
#' fq = "recognisedLicence:CC BY-NC 4.0",
#' download = TRUE)
#' }
#' @export occurrence_images
occurrence_images <- function(occ_id, fq, download = FALSE, download_path,
sounds = FALSE,
verbose = ala_config()$verbose) {
assert_that(is.flag(verbose))
assert_that(is.flag(sounds))
assert_that(is.flag(download))
if (is.null(getOption("ALA4R_server_config")$base_url_images) ||
getOption("ALA4R_server_config")$base_url_images == "") {
stop("No URL to the image database has been configured: see base_url_images
in ", getOption("ALA4R_server_config")$config_function)
}
if (missing(occ_id)) {
stop("Please provide a list of occurrence ids to retrieve images for")
}
assert_that(is.character(occ_id))
if (!missing(fq)) {
assert_that(is.character(fq))
check_fq(fq, type = "images")
fq <- as.list(fq)
names(fq) <- rep("fq", length(fq))
}
else {
fq <- NULL
}
image_data <- data.table::rbindlist(lapply(occ_id, function(z) {
this_query <- list()
this_query$q <- paste0("occurrenceID:", '"', z, '"')
if (!is.null(fq)) {
this_query <- c(this_query, fq)
}
this_url <- build_url_from_parts(
getOption("ALA4R_server_config")$base_url_images,
c("ws", "/", "search"),
query = this_query)
data <- cached_get(URLencode(this_url), type = "json", verbose = verbose)
# if no images are found for any given occurrence id, print a warning
if (data$totalImageCount == 0) {
message(paste0("No images were found for occurrence id ", z))
}
df <- as.data.frame(data$images, stringsAsFactors = FALSE)
# throttle API calls so ALA server is not overloaded
Sys.sleep(1)
return(df)
}), fill = TRUE)
if (length(image_data) == 0) {
if (ala_config()$warn_on_empty) {
warning("No images were found for any of the occurrence ids provided")
}
return(image_data)
}
if (!sounds) {
data <- image_data$images[image_data$images$fileType == "image", ]
}
else {
data <- image_data$images
}
if (download) {
if (missing(download_path)) {
message(sprintf("No download path specified.
Media will be downloaded in %s",
file.path(getwd(), "media")))
download_path <- file.path(getwd(), "media")
}
download_images(data = image_data, media_dir = download_path,
verbose = verbose, sounds = sounds)
}
return(image_data)
}
|
9d1611eae5a18408a0ae0880bad48b635fa5de0c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PanelCount/examples/CRE_SS.Rd.R
|
c96030546d905c1bf912ebe1696bd99bc324ce88
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 399
|
r
|
CRE_SS.Rd.R
|
library(PanelCount)
### Name: CRE_SS
### Title: A Sample Selection Model with Correlated Random Effects
### Aliases: CRE_SS
### ** Examples
## No test:
data(rt)
# Note: estimation may take up 10~15 minutes
est = CRE_SS(isRetweet~fans+tweets+as.factor(tweet.id),
num.words~fans+tweets+as.factor(tweet.id),
id=rt$user.id, data=rt)
## End(No test)
|
b52db7b89556ec99b0996158ca673ce4a9c2ce9d
|
e070a2dfc51711762288679a10750ac1df139341
|
/source/paper/MGFigures/utilities.R
|
2552466d44caea82e8720ee82a30fe102623a2bc
|
[
"MIT"
] |
permissive
|
hakyimlab/MetaXcan-Postprocess
|
abfd03cc9dc4a577032589a94d6902b4cb4201d7
|
a65e36f29c6af412471cab36e31dabb325a93999
|
refs/heads/master
| 2020-05-21T13:30:17.256593
| 2017-05-11T19:02:08
| 2017-05-11T19:02:08
| 53,421,284
| 1
| 2
| null | 2017-05-11T19:02:09
| 2016-03-08T15:05:00
|
Python
|
UTF-8
|
R
| false
| false
| 1,052
|
r
|
utilities.R
|
library(stringi)
simpleCap <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
build_allele_key <- function(data.frame) {
allele_key <- sprintf("%s%s",data.frame$ref_allele, data.frame$eff_allele)
striHelper <- function(x) stri_c(x[stri_order(x)], collapse = "")
allele_key <- vapply(stri_split_boundaries(allele_key, type = "character"), striHelper, "")
return(allele_key)
}
split_into_lines <- function(strings, size) {
u <- unique(strings)
l <- list()
ppaste <- function(a, b, p) {
if (nchar(a) > 0) {
a <- paste0(a,"_")
}
a <- paste0(a,b,p)
a
}
for (word in u) {
comps <- strsplit(word, "_")[[1]]
line <- ""
buff <- ""
for (comp in comps) {
if (nchar(buff) + nchar(comp) > 19 && length(comps) > 1) {
line <- ppaste(line, buff, "\n")
buff <- comp
} else {
buff <- ppaste(buff, comp, "")
}
}
line <- ppaste(line, buff, "")
l[[word]] <- line
}
return(l)
}
|
7560834e03412d1fb56b48b42107d18fd7690abd
|
00b153ac44ca3fd122d6b478a40ad3b9a9e7cd27
|
/FISTULA/R-SCRIPTS/Patient311143_Hemodynamics.R
|
dac306317746dc77a2956982ea5b116d2e9d5418
|
[] |
no_license
|
RosamariaTricarico/PROJECTS
|
27e53e14e802fde6cdab2d36ea565ad0dc95666f
|
b77c086b57ca13dc092e389d8b89f5625e09c634
|
refs/heads/master
| 2020-04-21T10:17:55.394177
| 2019-03-16T13:40:23
| 2019-03-16T13:40:23
| 169,481,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,107
|
r
|
Patient311143_Hemodynamics.R
|
Hemodynamics <- read.csv(file="Hemodynamics_Fistula-Vein.csv", head=TRUE, sep=",")
View(Hemodynamics)
Patient1 <- filter(Hemodynamics, Patient == "031143 HJ")
#head(Patient1)
View(Patient1)
Baseline <- filter(Patient1, Scan == 1)
Week6 <- filter(Patient1, Scan == 2)
Month6 <- filter(Patient1, Scan == 3)
n <- dim(Baseline);
m <- (n[1]);
FoldChange_Week6 <- Week6[5:9]/Baseline[5:9]
FoldChange_Month6 <- Month6[5:9]/Baseline[5:9]
xdim <- head(Patient1[4:4],m)
#View(xdim)
Patient1_FoldChange_Week6 = cbind(xdim, FoldChange_Week6);
Patient1_FoldChange_Month6 = cbind(xdim, FoldChange_Month6);
#--Define plot titles:
lab.area <- "Area"
lab.WSSave <- "Average of WSS"
lab.WSSmax <- "Maximum WSS"
lab.WSSGave <- "Average of WSS gradient"
lab.OSI <- "Oscillatory Shear Index"
#--Custom strip function:
my.strip <- function(which.given, which.panel, ...) {
strip.labels <- c(lab.area, lab.WSSave, lab.WSSmax, lab.WSSGave, lab.OSI)
panel.rect(0, 0, 1, 1, col="#ffe5cc", border=1)
panel.text(x=0.5, y=0.5, adj=c(0.5, 0.55), cex=0.95,
lab=strip.labels[which.panel[which.given]])
}
#settings for graph title style
my.settings <- list(
par.main.text = list(font = 2, # make it bold
just = "left",
x = grid::unit(5, "mm")),
par.sub.text = list(font = 0.5,
just = "left",
x = grid::unit(5, "mm"))
)
#Week 6 fold change graph
pdf(file="Patient31143_FoldChange_Week6.pdf")
xyplot(area + WSSave + WSSmax + WSSGave + OSI ~ d.mm, data = Patient1_FoldChange_Week6, scales=list(y="free", rot=0),type = "l",
grid = TRUE, strip=my.strip, outer=TRUE, layout=c(1, 5, 1), xlab="Vein Length [mm]", ylab="",
main="Patient 331143 - 6 weeks fold change from baseline")
#par.settings=my.settings, main="Patient 31129 - 6 weeks fold change from baseline",
#sub="Source: Hemodynamic data-set")
dev.off()
#Month 6 fold change graph
pdf(file="Patient31143_FoldChange_Month6.pdf")
xyplot(area + WSSave + WSSmax + WSSGave + OSI ~ d.mm, data = Patient1_FoldChange_Month6, scales=list(y="free", rot=0),type = "l",
grid = TRUE, strip=my.strip, outer=TRUE, layout=c(1, 5, 1), xlab="Vein Length [mm]", ylab="",
main="Patient 31143 - 6 months fold change from baseline")
dev.off()
#Creating the combined graph
Patient1_FoldChange = 0;
Patient1_FoldChange = rbind(Patient1_FoldChange_Week6, Patient1_FoldChange_Month6);
#View(Patient1_FoldChange)
Scan <- tail(Patient1[3:3],(n[1]*2));
Patient1_FoldChange = cbind(Scan,Patient1_FoldChange);
pdf(file="Patient31143_FoldChange.pdf")
xyplot(area + WSSave + WSSmax + WSSGave + OSI ~ d.mm, data = Patient1_FoldChange, groups = Scan, scales=list(y="free", rot=0),type = "l",
grid = TRUE, strip=my.strip, outer=TRUE, layout=c(1, 5, 1), xlab="Vein Length [mm]", ylab="",
key=list(text=list(c("6 weeks", "6 months")),
title="Patient 31143 - Fold change from baseline",
col=c("deepskyblue3", "deeppink"), lty=c(1, 1),
columns=2, cex=0.7,
lines=TRUE))
dev.off()
|
a0d106f4bf8871020d0fc23249fb9b42bacb9414
|
c2a6015d964e0a004fa4ac9c59df8aed039cc4fc
|
/man/is.nr.Rd
|
220e2791d192dd7bace5255cf7f9ebd0194e2575
|
[] |
no_license
|
cran/ufs
|
27083e54b6e4c89f802c4de9218dbbd7c7d4260d
|
74bcfb60160bced552d79d301b739bb965d1a156
|
refs/heads/master
| 2023-06-23T09:48:11.331297
| 2023-06-09T15:30:03
| 2023-06-09T15:30:03
| 145,907,951
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 533
|
rd
|
is.nr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.nr.R
\name{is.nr}
\alias{is.nr}
\title{\code{NULL} and \code{NA} 'proof' checking of whether something is a number}
\usage{
is.nr(x)
}
\arguments{
\item{x}{The value or vector to check.}
}
\value{
TRUE or FALSE.
}
\description{
Convenience function that returns TRUE if the argument is not null, not NA,
and is.numeric.
}
\examples{
is.nr(8); ### Returns TRUE
is.nr(NULL); ### Returns FALSE
is.nr(NA); ### Returns FALSE
}
|
428ce186d2da223abe87f69ebb4aaf45e0c94f77
|
676663a4c9cad2ceba8561a8b03a60a7d382f721
|
/script/ref-youtube-library.R
|
bc1805db37c32f3a03d9de539fd91c4488334cbe
|
[
"MIT"
] |
permissive
|
minhyukyang/text-mining-using-r
|
d099ade222f6ed5e71d18f61b919b6a440af65b3
|
a72d1e4a6b7975cb072f7783f450bea847520860
|
refs/heads/main
| 2023-01-06T16:42:24.273862
| 2020-11-08T11:36:20
| 2020-11-08T11:36:20
| 305,714,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,299
|
r
|
ref-youtube-library.R
|
# iscrape 1.0.1
# FUNCTIONS
#' @title Get user page
#' @description Gets the user page
#' @param username A character denoting a valid instagram username.
#' @return Returns an httr webpage object text or NA.
#' @details
#' If the username is not valid, it does not return a webpage,
#' NA is returned with a warning.
#' @examples
#' # get page for username: instagram
#' pu <- get_page_user("instagram")
#' @seealso \code{\link{get_count_post}}, \code{\link{get_count_follower}}, \code{\link{get_count_following}}
#' @importFrom httr GET content http_error
#' @export
#'
get_page_user <- function(username=NULL) {
if(is.null(username)) stop("Input is empty.")
if(!is.character(username)) stop("Input must be a character datatype.")
if(length(username)>1) stop("Input must be of length one.")
webpage <- httr::GET(paste0("https://www.instagram.com/",username,"/"))
if(!httr::http_error(webpage))
{
page_user <- httr::content(webpage,as="text",encoding="UTF-8")
}else{
warning(paste0("Username ",username," did not return a valid web page."))
page_user <- NA
}
attr(page_user,"names") <- username
return(page_user)
}
#' @title Get post count from a user page
#' @description Parses a text userpage and extracts post count
#' @param userpage A user page. An output from \code{\link{get_page_user}}.
#' @return Returns an integer follower count or NA.
#' @details
#' If the parsing fails, NA is returned with a warning.
#' @examples
#' pu <- get_page_user("instagram")
#' cf <- get_count_post(pu)
#' @importFrom stringr str_extract str_replace str_detect
#' @export
#'
get_count_post <- function(userpage=NULL) {
if(is.null(userpage)) stop("Input is empty.")
# method 1
count_value <- stringr::str_extract(userpage,'[0-9,.km]+[ ]{1,}Posts') %>%
stringr::str_replace('[ ]{1,}Posts',"") %>% stringr::str_replace(",","")
# method 2
if(is.na(count_value) | count_value=="") {
count_value <- stringr::str_extract(stringr::str_extract(userpage,'edge_owner_to_timeline_media[:{"count]+[0-9]+'),'[0-9]+')
}
# if there is a valid number
if(!is.na(count_value)) {
if(count_value!="") {
mlpy <- 1
if(stringr::str_detect(count_value,"k")) mlpy <- 1000
if(stringr::str_detect(count_value,"m")) mlpy <- 1000000
count_value <- count_value %>% stringr::str_replace("k|m","")
if(stringr::str_detect(count_value,"[.]")) mlpy <- mlpy/10
count_value <- count_value %>% stringr::str_replace("[.]","") %>% as.integer()
count_value <- count_value*mlpy
}
}
if(is.na(count_value) | count_value=="") {
warning(paste0("Userpage did not return a post count. Instagram format may have changed."))
count_value <- NA
}
attr(count_value,"names") <- attr(userpage,"names")
return(count_value)
}
#' @title Get follower count from a user page
#' @description Parses a text userpage and extracts follower count
#' @param userpage A user page. An output from \code{\link{get_page_user}}.
#' @return Returns an integer follower count or NA.
#' @details
#' If the parsing fails, NA is returned with a warning.
#' @examples
#' pu <- get_page_user("instagram")
#' cf <- get_count_follower(pu)
#' @importFrom stringr str_replace str_extract str_detect
#' @export
#'
get_count_follower <- function(userpage=NULL) {
if(is.null(userpage)) stop("Input is empty.")
# method 1
count_value <- stringr::str_extract(userpage,'[0-9,.km]+[ ]{1,}Followers') %>%
str_replace('[ ]{1,}Followers',"") %>% str_replace(",","")
# method 2
if(is.na(count_value) | count_value=="") {
count_value <- stringr::str_extract(stringr::str_extract(userpage,'userInteractionCount":"[0-9]+'),'[0-9]+')
}
# method 3
if(is.na(count_value) | count_value=="") {
count_value <- str_extract(str_extract(userpage,'edge_followed_by[{count":]+[0-9]+\\}'),'[0-9]+')
}
# if there is a valid number
if(!is.na(count_value)) {
if(count_value!="") {
mlpy <- 1
if(stringr::str_detect(count_value,"k")) mlpy <- 1000
if(stringr::str_detect(count_value,"m")) mlpy <- 1000000
count_value <- count_value %>% stringr::str_replace("k|m","")
if(stringr::str_detect(count_value,"[.]")) mlpy <- mlpy/10
count_value <- count_value %>% stringr::str_replace("[.]","") %>% as.integer()
count_value <- count_value*mlpy
}
}
if(is.na(count_value) | count_value=="") {
warning(paste0("Userpage did not return a follower count. Instagram format may have changed."))
count_value <- NA
}
attr(count_value,"names") <- attr(userpage,"names")
return(count_value)
}
#' @title Get following count from a user page
#' @description Parses a text userpage and extracts following count
#' @param userpage A user page. An output from \code{\link{get_page_user}}.
#' @return Returns an integer following count or NA.
#' @details
#' If the parsing fails, NA is returned with a warning.
#' @examples
#' pu <- get_page_user("instagram")
#' cf <- get_count_following(pu)
#' @importFrom stringr str_replace str_extract str_detect
#' @export
#'
get_count_following <- function(userpage=NULL) {
if(is.null(userpage)) stop("Input is empty.")
# method 1
count_value <- stringr::str_extract(userpage,'[0-9,.km]+[ ]{1,}Following') %>%
stringr::str_replace('[ ]{1,}Following',"") %>% stringr::str_replace(",","")
# method 2
if(is.na(count_value) | count_value=="") {
count_value <- stringr::str_extract(stringr::str_extract(userpage,'edge_follow[:{"count]+[0-9]+'),'[0-9]+')
}
if(!is.na(count_value)) {
if(count_value!="") {
mlpy <- 1
if(stringr::str_detect(count_value,"k")) mlpy <- 1000
if(stringr::str_detect(count_value,"m")) mlpy <- 1000000
count_value <- count_value %>% stringr::str_replace("k|m","")
if(stringr::str_detect(count_value,"[.]")) mlpy <- mlpy/10
count_value <- count_value %>% stringr::str_replace("[.]","") %>% as.integer()
count_value <- count_value*mlpy
}
}
if(is.na(count_value) | count_value=="") {
warning(paste0("Userpage did not return a following count. Instagram format may have changed."))
count_value <- NA
}
attr(count_value,"names") <- attr(userpage,"names")
return(count_value)
}
#' @title Get user page info
#' @description Gets the three metrics from a user page
#' @param username A character vector of one or more valid instagram usernames.
#' @return Returns a dataframe with post counts, follower count and following count.
#' @details
#' If the username is not valid, an empty data.frame is returned
#' @examples
#' # get page for username: instagram
#' pu <- get_page_info("instagram")
#' @seealso \code{\link{get_count_post}}, \code{\link{get_count_follower}}, \code{\link{get_count_following}}
#' @importFrom stringr str_replace str_extract str_detect
#' @importFrom httr content GET http_error
#' @importFrom dplyr bind_rows
#' @export
#'
get_page_info <- function(username=NULL) {
if(is.null(username)) stop("Input is empty.")
if(!is.character(username)) stop("Input must be a character datatype.")
fun1 <- function(username){
webpage <- httr::GET(paste0("https://www.instagram.com/",username,"/"))
if(!httr::http_error(webpage))
{
pu <- httr::content(webpage,as="text",encoding="UTF-8")
return(data.frame(username=username,
posts=get_count_post(pu),
followers=get_count_follower(pu),
following=get_count_following(pu),
stringsAsFactors=FALSE))
}else{
warning(paste0("Username ",username," did not return a valid web page."))
return(data.frame(username=username,
posts=NA,
followers=NA,
following=NA,
stringsAsFactors=FALSE))
}
}
l <- lapply(username,fun1)
return(dplyr::bind_rows(l))
}
#' @title Get hashtag page
#' @description Gets the hashtag page
#' @param hashtag A character denoting a valid instagram hashtag.
#' @return Returns an httr webpage object as text or NA.
#' @details
#' If the tag is not valid and/or if it does not return a webpage,
#' NA is returned with a warning.
#' @examples
#' # get page for hashtag: instagram
#' un <- get_page_hashtag("instagram")
#' @seealso \code{\link{get_count_hashtag}}
#' @importFrom httr content GET http_error
#' @export
#'
get_page_hashtag <- function(hashtag=NULL) {
if(is.null(hashtag)) stop("Input is empty.")
if(!is.character(hashtag)) stop("Input must be a character datatype.")
if(length(hashtag)>1) stop("Input must be of length one.")
webpage <- httr::GET(paste0("https://www.instagram.com/explore/tags/",hashtag,"/"))
if(!httr::http_error(webpage))
{
page_hashtag <- httr::content(webpage,as="text",encoding="UTF-8")
}else{
warning(paste0("Hashtag ",hashtag," did not return a valid web page."))
page_hashtag <- NA
}
attr(page_hashtag,"names") <- hashtag
return(page_hashtag)
}
#' @title Get hashtag count from a hashtagpage
#' @description Parses a text hashtagpage and extracts hashtag count
#' @param hashtagpage A hashtagpage. An output from \code{\link{get_page_hashtag}}.
#' @return Returns a numeric follower count or NA.
#' @details
#' If the parsing fails, NA is returned with a warning.
#' @examples
#' ph <- get_page_hashtag("instagram")
#' ch <- get_count_hashtag(ph)
#' @importFrom stringr str_replace str_extract str_detect
#' @export
#'
get_count_hashtag <- function(hashtagpage=NULL) {
if(is.null(hashtagpage)) stop("Input is empty.")
# method 1
count_value <- stringr::str_extract(hashtagpage,'[0-9,.km]+[ ]{1,}Posts') %>%
stringr::str_replace('[ ]{1,}Posts',"") %>% stringr::str_replace(",","")
# method 2
if(is.na(count_value) | count_value=="") {
count_value <- stringr::str_extract(stringr::str_extract(hashtagpage,'edge_hashtag_to_media[:{"count]+[0-9]+'),'[0-9]+')
}
if(!is.na(count_value)) {
if(count_value!="") {
mlpy <- 1
if(stringr::str_detect(count_value,"k")) mlpy <- 1000
if(stringr::str_detect(count_value,"m")) mlpy <- 1000000
count_value <- count_value %>% stringr::str_replace("k|m","")
if(stringr::str_detect(count_value,"[.]")) mlpy <- mlpy/10
count_value <- count_value %>% stringr::str_replace("[.]","") %>% as.integer()
count_value <- count_value*mlpy
}
}
if(is.na(count_value) | count_value=="") {
warning(paste0("Hashtag page did not return a hashtag count. Instagram format may have changed."))
count_value <- NA
}
attr(count_value,"names") <- attr(hashtagpage,"names")
return(count_value)
}
pu <- get_page_user("inha_univ")
get_count_post(pu)
get_count_follower(pu)
get_count_following(pu)
# get_page_info(c("instagram","travelandleisure","minimalism"))
get_page_info("inha_univ")
# get webpage
ph <- get_page_hashtag("inha_univ")
get_count_hashtag(ph)
# 해시태그 가져오기
webpage <- httr::GET(paste0("https://www.instagram.com/explore/tags/","inha_univ","/"))
webpage %>% html_nodes("div.C4VMK > span")
link_tmp <- res_cl %>%
read_html() %>%
html_nodes("a.subject_fixed") %>%
html_attr("href") %>%
unique()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.