blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b9b748ee437a7e1f0ac51bd0cc78fe0db5e98019
|
f6f96b6095fdba1ab68adfcc4565849cc7982d8c
|
/R/mvBM.getRate.R
|
4c769aa548141231f83cf79de7be0218635dd82e
|
[
"MIT"
] |
permissive
|
aniwaniuk/evomap
|
d0b51a1fb208bef813647b40063e14b0aac71834
|
dfa7dfdc560d1fd04414dffedab7b6be765d8175
|
refs/heads/master
| 2020-04-15T20:25:04.527658
| 2018-05-29T14:03:25
| 2018-05-29T14:03:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 957
|
r
|
mvBM.getRate.R
|
#' Lineage-specific rate estimation using multiple variance Brownian motion
#'
#' Computes lineage-specific rates using mvBM
#' @param tree an object of class "phylo".
#' @param tree_mvBM an object of class "phylo". The rescaled tree from an mvBM procedure.
#' @param branches vector listing the branch numbers ('edge' numbers) for which the mvBM rate should be computed
#' @param sigma2Distr the MCMC distribution of sigma2 from an MCMC mvBM procedure
#' @return mvBM rate estimate
#' @references Smaers, Mongle & Kandler (2016) A multiple variance Brownian motion framework for estimating variable rates and inferring ancestral states. Biological Journal of the Linnean Society. 118 (1): 78-94.
#' @examples see https://smaerslab.com/software/
#' @export
mvBM.getRate<-function(tree,tree_mvBM,branches,sigma2Distr){
rateDistr<-sigma2Distr*(sum(tree_mvBM$edge.length[branches])/sum(tree$edge.length[branches]))
return(mean(rateDistr))
}
|
3af61ae4692d1dff65690d963171d2662755ccc0
|
daccbc095ccb9be61622399c2cfa3c3319aafbe0
|
/R/refine.R
|
cf9bbd617eaa8344808ccfc2a90df4b0bc9d998f
|
[] |
no_license
|
menghaomiao/aitr
|
c2199837ef5e125b73838233779fc997aa8e3cd3
|
6cfb60c0ae63ef7dd43b3f8c0f78293c1eeea5bb
|
refs/heads/master
| 2022-11-05T21:13:50.866225
| 2020-06-18T23:14:03
| 2020-06-18T23:14:03
| 110,192,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
refine.R
|
refine=function(inner, delta) {
rule=matrix(0, nrow(inner), ncol(inner))
rule[inner>=delta]=1
ind=rowSums(rule)==0
if (sum(ind)>0) rule[ind, ][inner[ind, ]>=-delta]=1
return(rule)
}
|
edece57f55f61686348bfe86cb37f40af03bf02c
|
9f972d4bde1195b867fde81e4726c1bbaf562bd4
|
/man/rss_varbvsr_iter_naive_reference.Rd
|
49ab93baa76150096511ed483b8f4a1e3b8fc523
|
[] |
no_license
|
MoisesExpositoAlonso/rssr
|
d2d10ff3ef417d7b979b0d5f1cc4c0c55a7a6305
|
c9a076bc7a3d36835eaa73a0b34cee1cf7a13657
|
refs/heads/master
| 2021-01-19T00:19:34.344067
| 2017-03-31T21:38:35
| 2017-03-31T21:38:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 696
|
rd
|
rss_varbvsr_iter_naive_reference.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{rss_varbvsr_iter_naive_reference}
\alias{rss_varbvsr_iter_naive_reference}
\title{Single update of RSS with variational method
This function is a very close translation of the original implementation of RSS. It is kept here for testing purposes
It performs a single update}
\usage{
rss_varbvsr_iter_naive_reference(SiRiS, sigma_beta, logodds, betahat, se,
alpha0, mu0, SiRiSr0, reverse)
}
\description{
Single update of RSS with variational method
This function is a very close translation of the original implementation of RSS. It is kept here for testing purposes
It performs a single update
}
|
890b0ec01dea6f0284d55056e1b6eeac5564e3c5
|
1ff0f0217347e7ec30167a5524ffb8260e49e823
|
/man/readCounts.Rd
|
9c527f9f8328185eb95caddce53406d03ba29b13
|
[] |
no_license
|
vaofford/amplican
|
0ee096b58585ceb24c6e451872af2a2fd87b2de6
|
7774dda136bdd3dd78c6c8c1f596195b847f77f3
|
refs/heads/master
| 2020-09-15T08:21:02.149838
| 2019-06-06T18:33:47
| 2019-06-06T18:33:47
| 223,392,406
| 0
| 0
| null | 2019-11-22T11:48:36
| 2019-11-22T11:48:35
| null |
UTF-8
|
R
| false
| true
| 545
|
rd
|
readCounts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AlignmentsExperimentSet-class.R
\name{readCounts}
\alias{readCounts}
\title{Alignments for forward reads.}
\usage{
readCounts(x)
}
\arguments{
\item{x}{(AlignmentsExperimentSet)}
}
\value{
(listOrNULL)
}
\description{
Set alignments for forward reads.
}
\examples{
file_path <- system.file("extdata", "results", "alignments",
"AlignmentsExperimentSet.rds", package = "amplican")
aln <- readRDS(file_path)
readCounts(aln)
}
\keyword{internal}
|
f3e55e462172f1e55bf092cacef1a05a02ea8d8d
|
a63fbd84fbc4aafb8d602adb36773f42991d0007
|
/data-raw/readDataWithLoc.R
|
fdb87b3628529e07a8ad1a0786b0a04bba8966e0
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
DIDSR/mitoticFigureCounts
|
768dc5b5c6ceaa9cf3841dc546b9ea5061b83f1f
|
ed9886cac4e3c928ee543d5d3eec0777bc883eb7
|
refs/heads/master
| 2022-10-31T10:08:57.717486
| 2022-10-23T20:50:46
| 2022-10-23T20:50:46
| 214,683,089
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,275
|
r
|
readDataWithLoc.R
|
library(xlsx)
library(iMRMC)
# * Creating `data-raw`. ####
# * Adding `data-raw` to `.Rbuildignore`.
# Next:
# * Add data creation scripts in data-raw
# * Use usethis::use_data() to add data to package
# Create usethis::use_data_raw()
# Open and read source data file ####
# We know that the study has 5 participants and 157 candidate mitotic figures
nReaders <- 5
readers <- c("observer.1", "observer.2", "observer.3", "observer.4", "observer.5")
nCases <- 157
cases <- 1:157
nModalities <- 5
modalities <- c("scanner.A", "scanner.B", "scanner.C", "scanner.D", "microscope")
# The source data file is an excel file with 10 sheets:
# one set of 5 sheets for each scanner and
# one set of 5 sheets for each reader.
# The data is redundant across these two sets
fileName <- file.path("data-raw", "mskcc20180627withLoc.xlsx")
# Read each sheet into different data frames
df.scanner.A <- read.xlsx(fileName, sheetIndex = 1)
df.scanner.B <- read.xlsx(fileName, sheetIndex = 2)
df.scanner.C <- read.xlsx(fileName, sheetIndex = 3)
df.scanner.D <- read.xlsx(fileName, sheetIndex = 4)
df.microscope <- read.xlsx(fileName, sheetIndex = 5)
# df.observer.1 <- read.xlsx(fileName, sheetIndex = 6)
# df.observer.2 <- read.xlsx(fileName, sheetIndex = 7)
# df.observer.3 <- read.xlsx(fileName, sheetIndex = 8)
# df.observer.4 <- read.xlsx(fileName, sheetIndex = 9)
# df.observer.5 <- read.xlsx(fileName, sheetIndex = 10)
masterRawWithLoc <- list(
df.scanner.A = df.scanner.A,
df.scanner.B = df.scanner.B,
df.scanner.C = df.scanner.C,
df.scanner.D = df.scanner.D,
df.microscope = df.microscope
# df.observer.1 = df.observer.1,
# df.observer.2 = df.observer.2,
# df.observer.3 = df.observer.3,
# df.observer.4 = df.observer.4,
# df.observer.5 = df.observer.5
)
# Check the truth across all data frames
if (!all(df.scanner.A$Ground.truth == df.scanner.B$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.scanner.C$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.scanner.D$Ground.truth)) browser()
if (!all(df.scanner.A$Ground.truth == df.microscope$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.1$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.2$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.3$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.4$Ground.truth)) browser()
# if (!all(df.scanner.A$Ground.truth == df.observer.5$Ground.truth)) browser()
# Concatenate the list of data frames to create one master data frame ####
dfMaster <- data.frame()
iModality <- 1
for (iModality in 1:5) {
df.current <- masterRawWithLoc[[iModality]]
df.current$modalityID <- modalities[iModality]
dfMaster <- rbind(dfMaster, df.current)
}
# Rename columns (misspellings)
dfMaster <- iMRMC::renameCol(dfMaster, "figure..", "targetID")
dfMaster <- iMRMC::renameCol(dfMaster, "ROI_ID", "roiID")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.1", "observer.1")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.2", "observer.2")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.3", "observer.3")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.4", "observer.4")
dfMaster <- iMRMC::renameCol(dfMaster, "Obeserver.5", "observer.5")
dfMaster <- iMRMC::renameCol(dfMaster, "Ground.truth", "truth")
# Make targetID a factor
dfMaster$targetID <- factor(dfMaster$targetID)
dfMaster$modalityID <- factor(dfMaster$modalityID)
# dfClassify: dfMaster includes rows corresponding to ROIs with no marks ####
# If there are no marks, then there are no candidates to classify.
# These rows need to be deleted ... "by hand"
dfClassify <- dfMaster
dfClassify <- dfClassify[dfClassify$targetID != 77, ]
dfClassify <- dfClassify[dfClassify$targetID != 114, ]
dfClassify$targetID <- factor(dfClassify$targetID)
# dfCountROI: Create df of counts per ROI and modality: including five readers and one truth ####
# Split the data by ROI and modality
dfMasterSplitByROIandModality <- split(dfMaster, list(dfMaster$roiID, dfMaster$modalityID))
iROI <- 1
dfCountROI <- data.frame()
for (iROI in 1:length(dfMasterSplitByROIandModality)) {
df.current <- dfMasterSplitByROIandModality[[iROI]]
dfCountROI <- rbind(
dfCountROI, data.frame(
wsiName = df.current[1, "wsiName"],
roiID = df.current[1, "roiID"],
modalityID = df.current[1, "modalityID"],
observer.1 = sum(df.current[ , "observer.1"]),
observer.2 = sum(df.current[ , "observer.2"]),
observer.3 = sum(df.current[ , "observer.3"]),
observer.4 = sum(df.current[ , "observer.4"]),
observer.5 = sum(df.current[ , "observer.5"]),
truth = sum(df.current[ , "truth"])
)
)
}
# dfCountWSI: Create df of counts per WSI and modality: including five readers and one truth ####
# Split the data by ROI and modality
dfCountROIsplitByWSI <- split(dfCountROI, list(dfCountROI$wsiName, dfCountROI$modalityID))
iWSI <- 1
dfCountWSI <- data.frame()
for (iWSI in 1:length(dfCountROIsplitByWSI)) {
df.current <- dfCountROIsplitByWSI[[iWSI]]
dfCountWSI <- rbind(
dfCountWSI, data.frame(
wsiName = df.current[1, "wsiName"],
modalityID = df.current[1, "modalityID"],
observer.1 = sum(df.current[ , "observer.1"]),
observer.2 = sum(df.current[ , "observer.2"]),
observer.3 = sum(df.current[ , "observer.3"]),
observer.4 = sum(df.current[ , "observer.4"]),
observer.5 = sum(df.current[ , "observer.5"]),
truth = sum(df.current[ , "truth"])
)
)
}
# Save data ####
dfClassify20180627 = dfClassify
dfCountWSI20180627 = dfCountWSI
dfCountROI20180627 = dfCountROI
usethis::use_data(dfClassify20180627, overwrite = TRUE)
usethis::use_data(dfCountWSI20180627, overwrite = TRUE)
usethis::use_data(dfCountROI20180627, overwrite = TRUE)
write.csv(dfClassify20180627, row.names = FALSE, file.path("data", "dfClassify20180627.csv"))
write.csv(dfCountWSI20180627, row.names = FALSE, file.path("data", "dfCountWSI20180627.csv"))
write.csv(dfCountROI20180627, row.names = FALSE, file.path("data", "dfCountROI20180627.csv"))
|
90cf09b5823ad0c4ce6999ccfeccb1a02d065997
|
47c5a1669bfc7483e3a7ad49809ba75d5bfc382e
|
/R/test.R
|
45d62b3cbd403bdd1bdaf4cb382a56d0d5d4f891
|
[] |
no_license
|
tdhock/inlinedocs
|
3ea8d46ece49cc9153b4cdea3a39d05de9861d1f
|
3519557c0f9ae79ff45a64835206845df7042072
|
refs/heads/master
| 2023-09-04T11:03:59.266286
| 2023-08-29T23:06:34
| 2023-08-29T23:06:34
| 20,446,785
| 2
| 2
| null | 2019-08-21T19:58:23
| 2014-06-03T14:50:10
|
R
|
UTF-8
|
R
| false
| false
| 4,871
|
r
|
test.R
|
test.file <- function
### Check an R code file with inlinedocs to see if the
### extract.docs.file parser accurately extracts all the code inside!
### The code file should contain a variable .result which is the
### documentation list that you should get when you apply
### extract.docs.file to the file. We check for identity of elements
### of elements of the list, so the order of elements should not
### matter, and thus this should be a good robust unit test.
(f,
### File name of R code file with inlinedocs to parse and check.
CRAN.checks=TRUE,
### try to make a package and run CRAN checks?
verbose=FALSE
### Show output?
){
##seealso<< \code{\link{save.test.result}}
e <- new.env()
suppressWarnings(sys.source(f,e))
## these are the items to check for, in no particular order
.result <- e$.result
parsers <- e$.parsers
result <- extract.docs.file(f, parsers, verbose=verbose)
for(FUN in names(.result)){
fun <- result[[FUN]]
.fun <- .result[[FUN]]
## first check to make sure all the stored items are there
for(N in names(.fun)){
.res <- .fun[[N]]
res <- fun[[N]]
if(is.null(res) || is.na(res) || is.na(.res) || .res!=res){
cat(
"\n-----\n",res,"\n-----\nin ",FUN,
"$",N,", expected:\n-----\n",.res,"\n-----\n",
sep="")
stop("docs mismatch in ",f)
}
}
## now check and see if there are no additional items!
additional <- !names(fun)%in%names(.fun)
show <- fun[additional] ##ignore NULL extracted items
show <- show[!sapply(show,is.null)]
not.def <- show[names(show) != "definition"]
if(length(not.def)){
cat("\n")
print(not.def)
stop("extracted some unexpected docs!")
}
}
## make sure there are no unexpected outer lists
not.expected <- names(result)[!names(result)%in%names(.result)]
if(length(not.expected)){
print(not.expected)
stop("extracted some unexpected documentation objects!")
}
## finally make a package using this file and see if it passes
## without warnings TDH 27 May 2011 added !interactive() since
## recursive calling R CMD check seems to systematically
## fail... ERROR: startup.Rs not found. This file is usually copied
## to the check directory and read as a .Rprofile, as done in
## tools:::.runPackageTests ... is this a bug in R? Anyway for now
## let's just not run the R CMD check.
if(CRAN.checks && is.null(e$.dontcheck)){
make.package.and.check(f,parsers,verbose)
}
if(verbose)cat("\n")
}
make.package.and.check <- function
### Assemble some R code into a package and process it using R CMD
### check, stopping with an error if the check resulted in any errors
### or warnings.
(f, ##<< R code file name from which we will make a package
parsers=default.parsers,
### Parsers to use to make the package documentation.
verbose=TRUE
### print the check command line?
){
pkgname <- sub("[.][rR]$","",basename(f))
pkgdir <- file.path(tempdir(),pkgname)
if(file.exists(pkgdir))unlink(pkgdir,recursive=TRUE)
rdir <- file.path(pkgdir,"R")
if(verbose)print(rdir)
dir.create(rdir,recursive=TRUE)
sillydir <- system.file("silly",package="inlinedocs")
tocopy <- file.path(sillydir,c("DESCRIPTION","NAMESPACE"))
file.copy(tocopy,pkgdir)
f.lines.in <- readLines(f)
f.lines.out <- grep("^[.]parsers", f.lines.in, invert=TRUE, value=TRUE)
writeLines(f.lines.out, file.path(rdir, "code.R"))
package.skeleton.dx(pkgdir,parsers)
cmd <- sprintf("%s CMD check --as-cran %s",
file.path(R.home("bin"), "R"),
pkgdir)
if(verbose)cat(cmd,"\n")
checkLines <- system(cmd, intern=TRUE, ignore.stderr=!verbose)
all.warnLines <- grep("WARNING|ERROR|NOTE",checkLines,value=TRUE)
ignore.lines <- c( # false positives.
##Status: 1 WARNING, 2 NOTEs
"Status",
##* checking R code for possible problems ... NOTE
"possible problems",
##* checking for code which exercises the package ... WARNING
"exercises",
##* checking DESCRIPTION meta-information ... NOTE
"meta-information",
##* checking CRAN incoming feasibility ... NOTE
"incoming feasibility")
ignore.regex <- paste(ignore.lines, collapse="|")
badLines <- grep(ignore.regex, all.warnLines, value=TRUE, invert=TRUE)
if(length(badLines)>0){
cat(paste(checkLines, collapse="\n"), "\n")
print(badLines)
stop("ERROR/WARNING/NOTE encountered in package check!")
}
}
save.test.result <- function
### For unit tests, this is an easy way of getting a text
### representation of the list result of extract.docs.file.
(f
### R code file with inlinedocs to process with extract.docs.file.
){
.result <- extract.docs.file(f)
dump(".result",tmp <- tempfile(),control=NULL)
lines <- readLines(tmp)
escaped <- gsub("\\dots", "\\\\dots", lines, fixed=TRUE)
cat(paste(escaped, "\n"))
}
|
d3afac544eb38a1b90431264df83b2bc54ea3c16
|
d4bf2f6857dc7b227ad321658e5d3a3dc12371f3
|
/Recommenders/Data_Exploration.R
|
96c5ac9187c9c5a94e8031347c1a5481dac4fd5e
|
[] |
no_license
|
DInoAtGit/ALS
|
942595ea7d4429cabc1cca251600a02bc20a91c5
|
980a14584fbd47bb8cb057297b3d9c17f1d7034c
|
refs/heads/master
| 2023-01-02T23:26:25.244365
| 2020-10-28T15:14:42
| 2020-10-28T15:14:42
| 282,937,029
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,026
|
r
|
Data_Exploration.R
|
#Load packages
pacman::p_load(tm,slam,topicmodels,SnowballC,wordcloud,RColorBrewer,tidyverse, caret, corrplot, broom, ggpubr, MASS,relaimpo, car, e1071,interplot,caTools,lubridate,date,stringi,ROCR,IRdisplay,knitr,data.table,dplyr,RColorBrewer)
pacman::p_load(recosystem,softImpute,reshape2)
pacman::p_load(BiocManager,MCRestimate)
pacman::p_load(recommenderlab,stringr)
#Update R
# pacman::p_load(installr)
# updateR()
#Set the directory
setwd("C:\\Dino\\Git\\ILS\\ILS\\Data")
#Load data
#pacman::p_load(R.utils)
#gunzip("views_model.gz", remove=FALSE)
activity_data = read.csv("views_model")
#Explore
head(activity_data, 4)
dim(activity_data)
str(activity_data)
#Factorize
cols = c('action', 'country','lang_code','role_id','client_type')
activity_data[,cols] = lapply(activity_data[,cols], factor)
#Convert to Date
activity_data$user_since_d = substr(activity_data$user_action_timestamp, start = 1, stop = 10)
activity_data$user_since_d = as.Date(activity_data$user_since_d, "%Y-%m-%d")
#Unique Roles
unique(activity_data$role_id)
#Roles and rows
barplot(table(activity_data$role_id, useNA = "ifany"))
#Lang and rows
barplot(table(activity_data$lang_code, useNA = "ifany"))
#Countr and rows
barplot(table(activity_data$country, useNA = "ifany"))
#Extract IR data & refactor
activity_data_t = activity_data[activity_data$country == 'IR',]
str(activity_data_t);dim(activity_data_t)
activity_data_t[,cols] = lapply(activity_data_t[,cols], factor)
barplot(table(activity_data_t$lang_code, useNA = "ifany"))
barplot(table(activity_data_t$role_id, useNA = "ifany"))
dim(activity_data_t); length(unique(activity_data_t$user_id)); length(unique(activity_data_t$deck_id))
head(activity_data_t,4)
#Stream View Count per user per deck
deck_view = activity_data_t %>%
group_by(user_id, deck_id) %>%
summarise(vc=n())
deck_view = as.data.frame(deck_view)
head(deck_view,4)
table(deck_view$vc);nrow(deck_view[deck_view$vc == 1,])
barplot(table(deck_view$vc, useNA = "ifany"))
deck_view[deck_view$user_id == "0008a603",]; activity_data_t[activity_data_t$user_id == "0008a603",]
deck_view[deck_view$deck_id == "08b4f6d3",]
length(unique(deck_view$user_id)); length(unique(deck_view$deck_id))
deck_view[deck_view$vc == 8,]
deck_view$user_id = as.factor(deck_view$user_id)
deck_view$deck_id = as.factor(deck_view$deck_id)
str(deck_view);dim(deck_view)
#Avg views per user
deck_view %>% group_by(user_id) %>% summarise(avg_vc = mean(vc)) %>%
ggplot(aes(avg_vc)) + geom_histogram()
#Avg views per deck
deck_view %>% group_by(deck_id) %>% summarise(avg_vc = mean(vc)) %>%
ggplot(aes(avg_vc)) + geom_histogram()
#CF Matrix - Using Barry's lib
source("C:\\Dino\\NUS\\Sem2\\RS\\Workshop Files\\day1\\CF-demolib-v3.R")
#Check matrix size
as.numeric(length(unique(deck_view$user_id)))*as.numeric(length(unique(deck_view$deck_id)))/1000000 # to show the size of the ratings matrix if explicitly created (in millions)
memory.limit() # to see the current memory limit you have in MBytes
users = acast(deck_view, user_id ~ deck_id, value.var = "vc")
dim(users)
users[1:10,1:15]
#users[is.na(users)] = 0 #Replace NA with 0
users[users[,"08b4f6d3"] == 2]
#build similarity matrix on users - euclidean similarity for item-item
itemsimsE = getitemsimsmatrix(users, simfun=euclidsim);
itemsimsE[1:10,1:10]
# get recommendations
LoginUser = "00f76716"
#Cold Start
if (length(activity_data_t[activity_data_t$user_id == LoginUser,]$user_id) > 0)
{
targetuser = LoginUser # Regular
}
else {
if (length(activity_data[activity_data$user_id == LoginUser,]$user_id) > 0){
#Get the role_id
}
else {
}
}
if (length(activity_data[activity_data$user_id == LoginUser,]$user_id) > 0){
#Get best user based on role_id from user master (user master is not avialble yet)
}
else {
}
}
target = users[LoginUser,]
#You may be interested in (Similar Items) - Covers longtail as it doesn't scope to role.
getrecommendations_II(target, itemsimsE, topN=10)
#People also viewed (Similar Users)
target_latest_active_d = activity_data_t %>% filter(user_id == LoginUser) %>% summarise(latest=max(user_since_d))
target_latest_role = max(as.integer(activity_data_t[activity_data_t$user_id == LoginUser & activity_data_t$user_since_d == as.Date(target_latest_active_d$latest), ]$role_id))
r_users = users[rownames(users) %in% unique(activity_data_t[activity_data_t$role_id == target_latest_role, "user_id"]),]
getrecommendations_UU(target, r_users, simfun=euclidsim, topN =10) ## - - Limited to role..
getrecommendations_UU(target, users, simfun=euclidsim, topN =10) ## - - Coveres whole activity
#Testing the approach - Euclidean is Best for both UU and II
fillrate(users)
unlist_users = unlist(users)
hist(unlist_users) #Histo of Views. Most viewed 1.
likethreshold_m =1 #Viewed atleast once
likerate_m = length(which(unlist_users>=likethreshold_m))/length(unlist_users) ; cat("% of decks that are viewed=",likerate_m*100,"%")
#Get correlation matrix between users
cor(t(users))
#Correlation without NAs
cor(t(users), use = "pairwise.complete.obs")
# get recommendations for U2 (results with pearson shd be: 3.35 (night), 2.83 (lady), 2.53 (luck))
users[1:2,1:10]
target_u = users["00f76716",]
getrecommendations_UU(target_u, users, simfun=pearsonsim)
#Try various similarity functions
itemsims_u = cor(users, use = "pairwise.complete.obs"); itemsims_u[1:10, 1:10] #Similarity Correlation Matrix (Pairwaise is actually calculate mean only for the matching rows)
itemsimsP_u = getitemsimsmatrix(users, simfun = pearsonsim); itemsimsP_u[1:10, 1:10] #Pearson similarity matrix
itemsimsC_u = getitemsimsmatrix(users, simfun = cosinesim); itemsimsC_u[1:10, 1:10] #Cosin similarity matrix
itemsimsE_u = getitemsimsmatrix(users, simfun = euclidsim); itemsimsE_u[1:10, 1:10] #Euclid similarity matrix
normalizedusers = sweep(users, 1, rowMeans(users, na.rm = TRUE)) #1 means row and 2 means column
itemsimsC_uN = getitemsimsmatrix(normalizedusers, simfun = cosinesim); itemsimsC_uN[1:10, 1:10] #Normalized Cosin
itemsimsE_uN = getitemsimsmatrix(users, simfun = euclidsimF); itemsimsE_uN[1:10, 1:10] #Euclid without square-root distance
#Getrecommendations Item-Item using various similarity functions
getrecommendations_II(target_u, itemsims_u) # using vanilla similarity, based on correlation
getrecommendations_II(target_u, itemsimsP_u) # using Pearson cofficient similarity, based on correlation
getrecommendations_II(target_u, itemsimsC_u) # using Cosine similarity, based on correlation
getrecommendations_II(target_u, itemsimsC_uN) # using Cosine Normalized similarity, based on correlation
getrecommendations_II(target_u, itemsimsE_u) # using Euclid similarity, based on correlation
getrecommendations_II(target_u, itemsimsE_uN) # using Euclid Normalized similarity, based on correlation
#System evalution
numtestusers = 10
test_users_names = sample(rownames(users), min(numtestusers, nrow(users))); test_users_names
train_users_names = setdiff(rownames(users), test_users_names); head(train_users_names,10)
train_users = users[train_users_names,]
test_users = users[test_users_names,]
nrow(users);nrow(train_users);nrow(test_users)
#Prediction using UU
preddeck = predictCF(test_users, train_users, numtestitems = 10, random = FALSE, simfun = pearsonsim);preddeck #Pearson
preddeck_c = predictCF(test_users, train_users, numtestitems = 10, random = FALSE, simfun = cosinesim);preddeck_c #Cosine
preddeck_e = predictCF(test_users, train_users, numtestitems = 10, random = FALSE, simfun = euclidsim);preddeck_e #Eucldin
#Evaluation / Confusion Matrix
cat("Avg UU-based MAEs { For Pearson: ", avgMAE(preddeck), " } , {For Cosine : ", avgMAE(preddeck_c), " } , {For Eucldin : ", avgMAE(preddeck_e), " }")
showCM(preddeck, like = 2)
#Prediction using II
itemsimsE_m_p = getitemsimsmatrix(train_users, simfun = euclidsim)
itemsimsC_m_p = getitemsimsmatrix(train_users, simfun = cosinesim)
itemsimsP_m_p = getitemsimsmatrix(train_users, simfun = pearsonsim)
preddeck_II_E = predictCF(test_users, itemsims = itemsimsE_m_p, numtestitems = 10, random = FALSE); preddeck_II_E
preddeck_II_C = predictCF(test_users, itemsims = itemsimsC_m_p, numtestitems = 10, random = FALSE); preddeck_II_C
preddeck_II_P = predictCF(test_users, itemsims = itemsimsP_m_p, numtestitems = 10, random = FALSE); preddeck_II_P
#Evaluation / Confusion Matrix
cat("Avg II-based MAEs { For Pearson: ", avgMAE(preddeck_II_P), " } , {For Cosine : ", avgMAE(preddeck_II_C), " } , {For Eucldin : ", avgMAE(preddeck_II_E), " }")
RMSE.II <- sqrt(mean(preddeck_II_E$predictedrating - preddeck_II_E$truerating)^2)
RMSE.UU <- sqrt(mean(preddeck_II_E$predictedrating - preddeck_II_E$truerating)^2)
str(test_users)
|
90aee80f1040708a0c3a04e5f57ec7f46e8c142c
|
23b032127f268ff548a409598f34cd325698d77a
|
/code/Pcount_simulation.R
|
6d913b0a26d18efdda93f37a4ab43b66694446be
|
[] |
no_license
|
dlizcano/SeaUrchin
|
7cf54df1af6e51044b32fe122780d2747e188baf
|
ae68a4ea71b94be225eab06905e636169f929d4b
|
refs/heads/master
| 2016-09-06T10:14:52.557997
| 2015-09-16T19:34:25
| 2015-09-16T19:34:25
| 42,591,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 960
|
r
|
Pcount_simulation.R
|
# Simulate data
set.seed(35)
nSites <- 16
nVisits <- 4
x <- rnorm(nSites) # a covariate
beta0 <- 0
beta1 <- 1
lambda <- exp(beta0 + beta1*x) # expected counts at each site
N <- rpois(nSites, lambda) # latent abundance
y <- matrix(NA, nSites, nVisits)
p <- c(0.3, 0.6, 0.8, 0.5) # detection prob for each visit
for(j in 1:nVisits) {
y[,j] <- rbinom(nSites, N, p[j])
}
# Organize data
visitMat <- matrix(as.character(1:nVisits), nSites, nVisits, byrow=TRUE)
umf <- unmarkedFramePCount(y=y, siteCovs=data.frame(x=x),
obsCovs=list(visit=visitMat))
summary(umf)
# Fit a model
fm1 <- pcount(~visit-1 ~ x, umf, K=50)
fm1
plogis(coef(fm1, type="det")) # Should be close to p
# Empirical Bayes estimation of random effects
(fm1re <- ranef(fm1))
plot(fm1re, subset=site \%in\% 1:25, xlim=c(-1,40))
sum(bup(fm1re)) # Estimated population size
sum(N) # Actual population size
|
c99982259837cedf558cbd614758d808273e1b95
|
49e905566ba104f056f36aca58bc18c428d1bacd
|
/R/document.R
|
3153580e08d9208fc67442d3ed5ad06a85d0fb00
|
[] |
no_license
|
jamiepg1/RGCCTUFFI
|
28391c502ef9e35217e7d6e1f87854101fed52a4
|
17d7d81e7738bf4a5feedd12445f1dbe6ef8df3a
|
refs/heads/master
| 2018-01-15T09:16:53.818651
| 2014-09-03T15:04:54
| 2014-09-03T15:04:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,519
|
r
|
document.R
|
# Enumerations - Values and individual variables and coercion methods. Constructors ?
# Use an abstract base class "RAutoDocumentation" which does not have any representation
# then introduce the now RAutoDocumentation. The intent is to allow RTUDocumentation
# have a common base class with the RAutoDocumentation.
setClass("RAbstractAutoDocumentation")
setClass("RAutoDocumentation", contains = c("list", "RAbstractAutoDocumentation"))
setClass("RClassDocumentation", contains = "RAutoDocumentation")
setClass("RFunctionDocumentation", contains = "RAutoDocumentation")
setClass("REnumDocumentation", contains = "RAutoDocumentation")
setClass("RTUDocumentation",
representation(classes = "list", funcs = "list", enums = "list"),
contains = "RAbstractAutoDocumentation")
setGeneric("toFile",
function(obj, file, ...)
standardGeneric("toFile"))
setMethod("toFile", c(file = "character"),
function(obj, file, ...) {
con = file(file, "w")
ans = toFile(obj, con, ...)
on.exit(close(con))
ans
})
trim =
function(x)
gsub("^[[:space:]]|[[:space:]]$", "", x)
setMethod("toFile", "RAutoDocumentation",
function(obj, file, ...) {
obj = obj[ sapply(obj, function(x) length(x) > 0 && nchar(trim(x)) > 0) ]
tmp = mapply(makeSection, names(obj), obj)
cat(unlist(tmp), sep = "\n\n", file = file)
})
setMethod("toFile", c("RTUDocumentation", "missing"),
function(obj, file, dir = "man", ...) {
funFiles = sprintf("%s%s%s.Rd", dir, .Platform$file.sep, names(obj@funcs))
classFiles = sprintf("%s%s%s_class.Rd", dir, .Platform$file.sep, names(obj@classes))
enumFiles = sprintf("%s%s%s_enum.Rd", dir, .Platform$file.sep, names(obj@enums))
mapply(toFile, obj@classes, classFiles)
mapply(toFile, obj@enums, enumFiles)
mapply(toFile, obj@funcs, funFiles)
c(funFiles, classFiles, enumFiles)
})
makeSection =
function(name, content)
{
sprintf("\\%s{%s}", name, content)
}
documentClass =
function(def, name = def@name, refName = sprintf("%sPtr", name))
{
methodAliases = makeMethodAliases(name, refName)
aliases = c(sprintf("%s-class", c(name, refName)),
methodAliases)
txt = list()
txt$name = name
txt$'alias' = aliases
txt$'title' = sprintf("R classes to represent native data type %s as R object or reference to native type", name)
txt$'description' = sprintf("These classes and methods provide an R user with access to the native data type %s. We define an R version of this data structure with R-level fields, and also an R class that can reference an instance of the native data type. For this reference, we define methods that make the object act as if it were a list in R so that one can access fields via the usual subsetting operators. One can also coerce from one representation to an nother and create new objects via the %s constructor function.", name, name)
txt$'usage' = "\n"
txt$'value' = "The names methods returns a character vector. The constructor functions create objects of the class with the same name as the constructor. The $ operator returns an object as the same class as the field, of course."
txt$'examples' = "\n"
txt$'keyword' = c("programming", "interface")
ans = new("RClassDocumentation", txt)
names(ans) = names(txt)
ans
}
makeMethodAliases =
function(name, refName = sprintf("%sPtr", name))
{
# $, coerce, constructor generic, constructor methods (ANY, Ref, externalptr)
# generic constructor for refname, methods external ptr, missing
a = c(sprintf("$,%s", refName),
sprintf("coerce,%s,%s", refName, c(name, refName, "externalptr")),
sprintf("coerce,%s,%s", "externalptr", refName),
sprintf("%s,%s", name, c("ANY", "externalptr", refName)),
sprintf("%s,%s", refName, c("missing", "externalptr")),
sprintf("names,%s", refName)
)
c(name, refName, sprintf("%s-method", a))
}
documentFunction =
function(obj, name = obj$name, dll = NA, ..., .paramText = list(...))
{
if(class(obj) == "list" && is(obj[[1]], "ResolvedNativeRoutine")) {
# handle multiple functions, creating a single document for all of them.
}
mutable = any(RGCCTUFFI:::mutableParams(obj))
txt = list()
txt$name = name
txt$alias = name
txt$title = sprintf("An interface to the native routine %s", name)
txt$description = sprintf("This function allows one to invoke the native routine %s from R, passing R values as arguments to the routine.", name)
txt$usage = ""
argDocs = if("arguments" %in% names(.paramText)) .paramText[["arguments"]] else list()
txt$arguments = makeArgsDoc(obj$parameters, argDocs, mutable)
txt$value = makeValueDoc(obj, mutable)
txt$examples = ""
txt$keyword = c("programming", "interface")
# Take any supplied sections
args = .paramText
if(length(args)) {
i = match("arguments", names(args))
if(!is.na(i))
args = args[ - i]
txt[names(args)] = args
}
ans = new("RFunctionDocumentation", txt)
names(ans) = names(txt)
ans
}
makeValueDoc =
function(def, mutable = RGCCTUFFI:::mutableParams(def))
{
# value, mutable parameters and the callCIF parameters to avoid returning them.
sprintf("the native routine returns an object of class %s. %s",
getRTypeName(def$returnType),
if(any(mutable))
"if returnInputs is \\code{FALSE}, then this value is returned. Otherwise, this function returns a named list with 2 elements: 'value' as returned by the native routine, and 'inputs' which is a list containing all of the mutable parameters, i.e. pointers"
else "")
}
makeArgsDoc =
function(parms, docs = list(), hasMutables = FALSE)
{
#XXX Change names if necessary.
names(parms) = fixParamNames(parms)
defaults = lapply(parms, function(x) sprintf("an object of class \\code{\\link{%s-class}}", getRTypeName(x$type)))
#if(any(grepl("charPtr-class", unlist(defaults)))) recover()
if(length(docs) == 0) {
docs = defaults
} else {
if(length(names(docs))) {
i = match(names(docs), names(defaults))
defaults[i] = docs
docs = defaults
} else {
# correct
if(length(docs) < length(parms)) {
defaults[seq(along = docs)] = docs
docs = defaults
} else {
i = which(sapply(docs, function(x) length(x) == 0 || is.na(x)))
if(any(i))
docs[i] = defaults[i]
}
}
}
docs[[".cif"]] = "the call interface object describing the signature of the native routine"
if(hasMutables)
docs[["returnInputs"]] = "a logical value or vector that is passed to \\code{\\link[Rffi]{callCIF}} in the Rffi package and controls whether arguments that might be modified by the routine are returned by the R function."
docs[["\\dots"]] = "additional parameters that are passed to \\code{\\link[Rffi]{callCIF}}"
paste(c("", sprintf("\\item{%s}{%s}", names(docs), docs)), collapse = "\n")
}
fixParamNames =
function(parms, parmNames = names(parms))
{
if(length(parmNames) == 0)
parmNames = sprintf("x%d", seq(along = parms))
else {
i = grep("^[0-9]+$", parmNames)
if(length(i))
parmNames[i] = sprintf("x%d", i)
}
parmNames
}
documentEnum =
#
# Have to worry about duplicate enum names across enmerations.
#
#
function(def, name = def@name[length(def@name)])
{
ans = new("REnumDocumentation")
ans$name = sprintf("%sValues", name)
ans$alias = c(ans$name, names(def@values), sprintf("%sValues", name),
sprintf("coerce,%s,%s-method", c("numeric", "integer", "character"), rep(name, 3)))
ans$title = sprintf("Enumeration values and class for %s", name)
ans$description = sprintf("We represent the C-level enumerations for the type %s with a collection of R variables and an R class that allows us to coerce from numbers and strings to the enumeration type.", name)
ans
}
genDocumentation =
function(funcs, types, enums, paramDocs = list())
{
funs = lapply(names(funcs), function(id) documentFunction(funcs[[id]], .paramText = paramDocs[[id]]))
names(funs) = names(funcs)
new("RTUDocumentation",
classes = lapply(types, documentClass),
funcs = funs,
enums = lapply(enums, documentEnum))
}
|
032881e6cc05e79a1dffcee473c17718028cfab1
|
089612a894bea798afe72245c718f56eb3e0bae5
|
/plot1.R
|
7ae3ac030f0f51d902341e1363cef7878446bd8d
|
[] |
no_license
|
amb54/ExData_Plotting1
|
e89af308e8d580694f7b495568926973c466b728
|
8df8b6833acb1f0ff1834cdd1f399a32f1086e0d
|
refs/heads/master
| 2020-12-25T07:14:20.889931
| 2014-08-10T18:47:01
| 2014-08-10T18:47:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 800
|
r
|
plot1.R
|
##Read the data into R, and give the data frame column names.
data<-read.table("household_power_consumption.txt", sep=";",skip= 66637, nrow=2880)
cN<-read.table("household_power_consumption.txt", header=TRUE,sep=";", nrow=1)
colnames(data)<-colnames(cN)
##Add a new column to data with Date and Time combined as.POSIXlt
D<-data$Date
D_asDate<-as.Date(D,"%d/%m/%Y")
combDateTime<-paste(D_asDate,data$Time)
p<-as.POSIXlt(combDateTime)
data$DateTime<-p
##Create a histogram on the screen
library(datasets)
par(mfrow=c(1,1),mar=c(5,4,4,2))
hist(data$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)",cex.lab=0.75, cex.axis=0.75,col="red")
##Copy to a PNG file with width 480 pixels and height 480 pixels
dev.copy(png,"plot1.png", width=480,height=480)
dev.off()
|
6ad031ac9db3eff88af2285e72907f14f2f431c4
|
d394b264f5e4d20df3bf03fe39d55281a1faaf71
|
/src/Initial_DESeq2_Analysis_v2.R
|
fe97bbd4f423a8b02bebab98c07150d858ee7525
|
[] |
no_license
|
ercanlab/RNAseq
|
86017f5a33b3049dea7176cff398eb1ea8aaa809
|
8c662697bba6a242f66b646701e6829a8801475c
|
refs/heads/master
| 2021-04-29T18:27:20.452257
| 2019-06-05T14:10:32
| 2019-06-05T14:10:32
| 121,694,005
| 1
| 0
| null | 2019-05-30T21:43:53
| 2018-02-15T23:07:16
|
R
|
UTF-8
|
R
| false
| false
| 17,291
|
r
|
Initial_DESeq2_Analysis_v2.R
|
################################################################################
## Performs standard DEseq analysis. Also saves DEseq results ##
## ##
## usage: Rscript Initial_DESeq2_Analysis.R YAML_CONFIG ##
## ##
## positional arguments: ##
## YAML_CONFIG ##
## An example of the configuration file is on Prince at: ##
## /scratch/cgsb/ercan/scripts/rna/slurm/config_deseq.yaml ##
## or on gdrive: ##
## https://drive.google.com/open?id=1HCEOuFQQsObFf5QVLvF3n0a-894Ts9Ze ##
## ##
################################################################################
################################################################################
## The package DESeq2 provides methods to test for differential expression by ##
## use of negative binomial generalized linear models ##
## Documentation for DESSeq2 is found at: ##
## https://bioconductor.org/packages/3.7/bioc/vignettes/DESeq2/inst/doc/DESeq2.html ##
################################################################################
## Load up required packages
library('stringr')
suppressMessages(library('DESeq2'))
library('yaml')
suppressMessages(library('gplots'))
library('RColorBrewer')
library("openxlsx")
suppressMessages(library("dplyr"))
####################################
##### Define utility functions #####
####################################
## Extract condition and rep names to add to normalized data frame.
getColNames <- function(sampleCondition){
curr_condition <- ""
col_names <- vector(mode="character", length=length(sampleCondition))
i=1
for (cond in sampleCondition){
if (cond != curr_condition){
curr_condition=cond
rep_n <- 1
}
col_names[i] <- paste0(curr_condition, "_r", rep_n)
rep_n <- rep_n + 1
i <- i + 1
}
col_names
}
## Make a dataframe of rpkms
make_fpkm_df <- function(dir, files, sampleCondition, to_threshold=FALSE){
# todo
g1 <- 'T21D12.9'
g2 <- 'F07C6.4'
keep <- c('gene_id', 'FPKM')
df = NULL
for (i in seq_along(files)){
path <- file.path(dir, files[i])
fpkm <- read.csv(path, sep='\t')[keep]
fpkm <- filter(fpkm,gene_id!=g1)
fpkm <- filter(fpkm,gene_id!=g2)
if (to_threshold){
fpkm <- fpkm[fpkm['FPKM'] > 1,]
}
if (is.null(df)){
df <- fpkm
} else {
df <- merge(df, fpkm, by='gene_id', suffixes=c(files[i-1], files[i]))
}
}
rownames(df) <- df$gene_id
df <- df[, !(names(df) %in% 'gene_id')]
names(df) <- getColNames(sampleCondition)
df$gene_id<-rownames(df)
return(df)
}
#This function does comparisons of gene expression between conditions using DEeq
do.DEseq<-function(tableA,tableB,conditionA,conditionB,condA.avg, condB.avg, condition_type){
TABLE<-rbind(tableA, tableB)
DDS<-DESeqDataSetFromHTSeqCount(sampleTable = TABLE, directory = counts_dir, design= ~ condition)
filt.DDS <- DDS[ rowSums(counts(dds)) > 1, ]
if (condition_type[conditionA] == 'control'){
filt.DDS$condition <- relevel(filt.DDS$condition, ref = conditionA)
} else if (condition_type[conditionB] == 'control'){
filt.DDS$condition <- relevel(filt.DDS$condition, ref = conditionB)
}
DDS<-DESeq(filt.DDS)
DDS.res<-results(DDS, alpha=0.05)
return(DDS.res)
}
#Modify data frame to give appropriate headers to filenames
updatePairwiseDataFrame <- function(df, res, col.basename){
df[paste0(col.basename,'.log2.deseq')] <- res$log2FoldChange
df[paste0(col.basename,'.log2')] <- res$unadj.log2
df[paste0(col.basename,'.log2.pval')] <- res$padj
return(df)
}
# Plot pairwise counts comparisons along with Rsquared value for every pairwise comparison
plotPairwiseCountData <- function(df, file){
df<-as.data.frame(df)
reps.list<-colnames(df)
n_reps = length(reps.list)
pdf(file=filepath)
par(mfrow=(c(3,3)))
par(mar=c(4,4,1,1))
for(i in 1:n_reps){
x.data<-df[,reps.list[i]]
for(j in 1:n_reps){
if(j!=i){
y.data<-df[,reps.list[j]]
plot_limits<-c(0,signif(range(df)[2]+(range(df)[2]/10), digits = 2))
plot((x.data),(y.data),xlab=reps.list[i],ylab=reps.list[j],xlim=plot_limits,ylim=plot_limits) #you may want to change the x and y limits
r2<-round(summary(lm(y.data ~ x.data))$r.squared,4)
text(plot_limits[2]*(1.5/5),plot_limits[2]*(4/5),paste('r2=',r2))
}else{
plot_limits<-c(0,signif(range(df)[2]+(range(df)[2]/10), digits = 2))
plot(0,0,type='l',xlab='',ylab='',axes=F,xlim=plot_limits,ylim=plot_limits)
text(x=plot_limits[2]/(1.9), y=plot_limits[2]/2,reps.list[i], cex=2)
}
}
}
dev.off()
}
#y.data<-df[,reps.list[1]]
#round(summary(lm(y.data ~ x.data))$r.squared,4)
#Helps prepare dataframe for plotting and removes mitochondrial dna
joinChromosome <- function(df, c_elegans_annots){
df <- as.data.frame(df)
df$gene.name <- rownames(df)
merged <- merge(df, c_elegans_annots, by.x="gene.name", by.y="Sequence.Name.(Gene)")
merged <- filter(merged, Chr.Name != "MtDNA")
return(merged)
}
#Make a boxplot of log2FC for values of each chromosome
boxPlotDESeqByChromosome <- function(df, file, c_elegans_annots, title){
merged <- joinChromosome(df, c_elegans_annots)
pdf(file=file)
ylow = min(merged$log2FoldChange, na.rm=TRUE)
yhigh = max(merged$log2FoldChange, na.rm=TRUE)
boxplot(log2FoldChange~Chr.Name,data=merged, main=title,
xlab="Chromosome", ylab="Log2 Fold Change", ylim=c(ylow, yhigh))
stats_by_chromosome <-
merged %>%
group_by(Chr.Name) %>%
summarize(mean = mean(log2FoldChange, na.rm = TRUE), std=sd(log2FoldChange, na.rm = TRUE))
mean <- sapply(stats_by_chromosome$mean, sprintf, fmt="%.3f")
std <- sapply(stats_by_chromosome$std, sprintf, fmt="%.3f")
text(c(1), y=0.9*ylow, labels=c("mean:"))
text(seq(6), y=0.98*ylow, labels=mean)
dev.off()
}
#Scatterplot comparing gene expression. X and A are highlighted
scatterPlotDeseq <- function(res, c_elegans_annots, file, title){
merged <- joinChromosome(res, c_elegans_annots)
merged$is_X <- merged$Chr.Name == "X"
pdf(file=file)
ylow <- min(merged$log2FoldChange, na.rm=TRUE)
yhigh <- max(merged$log2FoldChange, na.rm=TRUE)
plotMA(merged[c("baseMean", "log2FoldChange", "is_X")], main=title,
xlab="Base mean", ylab="Log2 Fold Change", colSig='cyan',
ylim=c(ylow, yhigh))
legend('bottomright','groups',c("X genes","Autosomal genes"), pch = 16,
col=c('cyan', 'black'),ncol=2,bty ="n")
dev.off()
}
## This function is used to validate that the config file has right attributes
validateConfig <- function(conf){
required <- c(
'experiment_title',
'infiles',
'c_elegans_wbid_to_gene',
'c_elegans_annots',
'nyuid',
'mail',
'sbatch_scripts'
)
missing <- required[!(required %in% names(conf))]
if (length(missing) > 0){
stop("Attributes missing from configuration: ", paste(missing, collapse="; "))
}
invisible(lapply(conf$infiles, validateInfiles))
}
## Ensure that config file has inputs within it
validateInfiles <- function(x){
required <- c(
"id",
"fastq",
"condition",
"type"
)
missing <- required[!(required %in% names(x))]
if (length(missing) > 0){
stop("Attributes missing from element in infiles: ", paste(missing, collapse="; "))
}
}
#Uses gene names from gtf file to extract out wormbase ID
getGenesWbId <- function(c_elegans_annots, genes){
gene.to.wbid<-read.table(file=c_elegans_annots,header=F,stringsAsFactors=F)
colnames(gene.to.wbid)<-c('gene','wbid')
relevant_genes_ix <- match(genes, gene.to.wbid$gene)
wbid<-gene.to.wbid$wbid[relevant_genes_ix]
}
## Read in annotation file and transform to remove duplicates subset to relevant
## information
getCelegansAnnotations <- function(file){
c_elegans_annots <- read.xlsx(file)
relevant_cols <- c("Gene.WB.ID", "Sequence.Name.(Gene)", "Chr.Name")
c_elegans_annots <- c_elegans_annots[,relevant_cols]
c_elegans_annots <- c_elegans_annots[!duplicated(c_elegans_annots[,"Sequence.Name.(Gene)"]),]
c_elegans_annots <- c_elegans_annots[complete.cases(c_elegans_annots[,relevant_cols]),]
return(c_elegans_annots)
}
pValueLogFoldChangeByChromosome <- function(df){
null_mean <- NULL
for (chr in unique(df$Chr.Name)){
if (chr %in% c("MtDNA", "X")){
next
}
data <- df[df$Chr.Name == chr, "log2FoldChange"]
m <- mean(data, na.rm=TRUE)
if (is.null(null_mean)){
null_mean <- c(m)
} else {
null_mean <- c(null_mean, m)
}
}
null_mean <- mean(null_mean)
res <- list()
for (chr in unique(df$Chr.Name)){
if (chr == "MtDNA"){
next
}
data <- df[df$Chr.Name == chr, "log2FoldChange"]
n <- length(data)
t <- (mean(data, na.rm=TRUE) - null_mean) / (sd(data, na.rm=TRUE)/sqrt(n))
p <- 2*pt(-abs(t),df=n-1)
res[chr] <- p
}
return(res)
}
## Creates a list of input files and experimental metadata
readInFiles <- function(infiles){
idx = 1
id_to_idx = new.env()
for (ele in infiles){
id <- toString(ele$id)
if (is.null(id_to_idx[[id]])){
id_to_idx[[id]] <- idx
idx = idx + 1
}
}
num_files <- length(id_to_idx)
bam_suffix <- "_counts.txt"
fpkm_suffix <- "_cufflinks.genes.fpkm_tracking"
files_by_id <- vector("list", num_files)
conditions <- vector("list", num_files)
types <- vector("list", num_files)
for (ele in infiles){
idx <- id_to_idx[[toString(ele$id)]]
if (is.null(files_by_id[[idx]])){
files_by_id[[idx]] <- list(ele$fastq)
} else {
files_by_id[[idx]] <- list(files_by_id[[idx]], ele$fastq)
}
conditions[[idx]] <- ele$condition
types[[idx]] <- ele$type
}
count_files <- unlist(lapply(files_by_id, function(x)
paste0(paste0(str_replace_all(unlist(x), c(".fastq.gz"="",".fastq"="")), collapse="_"), bam_suffix)))
fpkm_files <- unlist(lapply(files_by_id, function(x)
paste0(paste0(str_replace_all(unlist(x), c(".fastq.gz"="",".fastq"="")), collapse="_"), fpkm_suffix)))
return(list(
"count_files"=count_files,
"fpkm_files"=fpkm_files,
"conditions"=unlist(conditions),
"types"=unlist(types)
))
}
########################
##### Start script #####
########################
## Load in the arguments from command line (location of YAML file) ##
args <- commandArgs(trailingOnly = TRUE)
if (length(args)==0) {
stop("At least one argument must be supplied (the deseq yaml config file).n", call.=FALSE)
}
## HTSeq input - read in config file and check it has the right format
conf <- yaml.load_file(args[1])
validateConfig(conf)
## Define file paths to get to processed experimental files ##
ercan_rna <- "/scratch/cgsb/ercan/rna/deseq"
scratch_dir <- file.path(ercan_rna, conf$experiment_title)
# Checks to see if the experimental comparisons have been made before
if (dir.exists(scratch_dir)){
stop(sprintf("the experiment title '%s' already exists. Change this title or remove the %s directory.", conf$experiment_title, scratch_dir))
}
## Define directories for input/output files
working_dir <- dirname(args[1])
fpkm_dir <- file.path(working_dir, "fpkm")
counts_dir <- file.path(working_dir, "counts")
deseq_dir <- file.path(working_dir, conf$experiment_title)
out_dir <- file.path(deseq_dir, "results")
dir.create(out_dir,showWarnings = FALSE, recursive = TRUE)
## Creates a list of input files and experimental metadata
inFiles_data <- readInFiles(conf$infiles)
conditions <- levels(factor(inFiles_data$conditions))
sampleType <- inFiles_data$type
condition_type <- vector()
#for loop creates a vector of treatments
for (i in 1:length(inFiles_data$conditions)){
cond <- inFiles_data$conditions[i]
if (!(cond %in% names(condition_type))) {condition_type[cond] = sampleType[i]}
}
sampleFiles <- inFiles_data$count_files
fpkm_files <- inFiles_data$fpkm_files
## Read in the HTseq outputs into dds format ##
sampleTable <- data.frame(sampleName=sampleFiles, fileName=sampleFiles, condition=inFiles_data$conditions)
dds <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable, directory = counts_dir, design= ~ condition)
## Filter out low/no count genes. Need more then 1 count per gene ##
filt.dds <- dds[ rowSums(counts(dds)) > 1, ]
print(paste0('Of the ', nrow(dds), ' genes, ', nrow(filt.dds), ' have >1 reads summed across conditions. ', (nrow(filt.dds)/nrow(dds))*100, '% of genes remain'))
## Get normalized count values ##
#DEseq works to estimate variability of samples and applies -ve binomial model
filt.dds<-DESeq(filt.dds)
#Extract out the normalized count value from the DEseq analysis object
normalized.count.data<-(assays(filt.dds)[["mu"]])
colnames(normalized.count.data)<-getColNames(inFiles_data$conditions)
normalized.count.data <- as.data.frame(normalized.count.data)
#Extract out gene names, WB gene names and chr for annotated genes
c_elegans_annots <- getCelegansAnnotations(conf$c_elegans_annots)
# Make heatmap plot of FPKM values of replicates based on spearman correlation
DATANAME <- paste0(conditions, collapse = 'vs')
thresholded_fpkm_data <- make_fpkm_df(fpkm_dir, fpkm_files, sampleCondition, to_threshold = TRUE)
# Add in the Chr info and Wormbase ID
merged <- merge(thresholded_fpkm_data, c_elegans_annots, by.x="gene_id", by.y="Sequence.Name.(Gene)")
# Remove mitochondrial data
merged <- filter(merged, Chr.Name != "MtDNA")
not_for_plot <- c('Chr.Name', 'gene_id', 'Gene.WB.ID')
for (chr in unique(merged$Chr.Name)){
#Set names and titles
filepath <- file.path(out_dir, paste0(DATANAME,'.heatmap.spearman.thresholded.fpkm.', chr, '.pdf'))
title <- paste0("Correlations of FPKM of Chr ", chr, " genes \n DATANAME")
#pull out a single chromosome
df<-filter(merged, Chr.Name == chr)
#Drop uneeded annotation from plotting
df <- df[, !(names(df) %in% not_for_plot)]
#plot the correlations
plotSpearmenHeatmap(df, filepath, title)
}
plot all chromosomes together
filepath <- file.path(out_dir, paste0(DATANAME,'.heatmap.spearman.thresholded.fpkm.all.pdf'))
title <- paste0("Correlations of FPKM values \n DATANAME")
plotSpearmenHeatmap(merged[, !(names(merged) %in% not_for_plot)], filepath, title)
## get average count data table ##
#Extract one condition at a time from normalized data, and then calculate mean
# and stdev for each gene under that condition
conditions_avg <- list()
for (cond in conditions){
conditions_avg[[paste0(cond, '_mean')]] <- rowMeans(normalized.count.data[,inFiles_data$conditions == cond, drop=FALSE])
conditions_avg[[paste0(cond, '_stdev')]] <- rowSds(data.matrix(normalized.count.data[,inFiles_data$conditions == cond, drop=FALSE]))
}
#Add gene names to average count data
genes <- names(conditions_avg[[1]])
wbid <- getGenesWbId(conf$c_elegans_wbid_to_gene, genes)
avg.count.data<-data.frame(wbid=wbid, conditions_avg)
#Save the normalized counts
filepath <- file.path(out_dir,paste0(paste0(conditions, collapse='', sep=c('vs','_')),'avg.count.data.txt'))
write.table(format(avg.count.data, digits=2, scientific=FALSE),file=filepath,row.names=T,col.names=T,quote=F,sep='\t')
#######################################
## plot replicates pairwise with Rsquared values ## CURRENTLY NOT PLOTTING. NEEDS SOME OPTIMISATION TO SCALE CORRECTLY ON A CASE BY CASE BASIS
#######################################
#filepath <- file.path(out_dir, paste0(DATANAME, '.replicates.counts.vs.counts.Rsq.pdf'))
#plotPairwiseCountData(normalized.count.data, filepath)
########################
## do pairwise comparisons ##
########################
#Generate a list with of all count data and its condition
condTables = list()
for (cond in conditions){
condTables[[cond]] = sampleTable[sampleTable['condition']==cond,]
}
n_conditions <- length(conditions)
pairwise_res_df <- data.frame(row.names = genes)
for (i in seq(n_conditions-1)){
for (j in seq(i+1,n_conditions)){
if(i!=j){
deseq.df <- do.DEseq(condTables[[i]], condTables[[j]],
conditions[[i]], conditions[[j]],
conditions_avg[[i]], conditions_avg[[j]], condition_type)
basename <- paste0(conditions[[i]],'vs',conditions[[j]])
filepath <- file.path(out_dir, paste0(basename,'.deseq.txt'))
write.table(format(cbind(wbid,as.data.frame(deseq.df)), digits=2, scientific=FALSE),file=filepath,row.names=T,col.names=T,quote=F,sep='\t')
#Modify data frame col names and minimise to just FC values and pvalues
pairwise_res_df <- updatePairwiseDataFrame(pairwise_res_df, deseq.df, basename)
filepath <- file.path(out_dir, paste0(basename,'.deseq.boxplot.by.chromosome.pdf'))
boxPlotDESeqByChromosome(deseq.df, filepath, c_elegans_annots, "Log Fold Change By Chromosome")
filepath <- file.path(out_dir, paste0(basename,'.deseq.scatterplot.pdf'))
scatterPlotDeseq(deseq.df, c_elegans_annots, filepath, "Log Fold change vs expression")
}
}}
|
f5546cf35f18d836f9a61b7bc2000681bf49b69d
|
924b4fd06d01d1968fd09c26a27e0be9a4aaa289
|
/shiny_tabset_image&video.R
|
2a8a54e6d3e200949b7b2f65fcf26d1bf4277939
|
[] |
no_license
|
arunkumaarb/R
|
24b1b4ec2937885df826a349d15d02a107bfbae5
|
4dc98492e2a41ce86ddc37a75280eb1aa7f181f4
|
refs/heads/master
| 2021-09-10T02:11:49.208268
| 2018-03-20T16:55:21
| 2018-03-20T16:55:21
| 125,955,972
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,408
|
r
|
shiny_tabset_image&video.R
|
library(shiny)
library(shinydashboard)
shinyUI(fluidPage(
headerPanel(title="Shiny Tabset"),
sidebarLayout(
sidebarPanel(
selectInput("ngear","select number of gears",c("cylinders"="cyl","Transmission"="am","Gear"="gear"))
),
mainPanel(
tabsetPanel(type="tab",
#To add image
tabPanel("Image",tags$img(src="R1.png")),
#To add Video
tabPanel("Video",tags$video(src="download.mp4",width="500px",height="350px",type="video/mp4",controls="controls")),
#To add youtube video get the embed link from youtube video share link
tabPanel("Youtube",HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/Gzy_nCkn88U" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>')),
tabPanel("Data",tableOutput("mtcars")),
tabPanel("Summary",verbatimTextOutput("summ")),
tabPanel("plot",plotOutput("plot"))
)
)
)
))
library(shiny)
library(ggplot2)
shinyServer(function(input,output){
output$mtcars = renderTable(
mtcars[,c("mpg",input$ngear)]
)
output$summ = renderPrint({
summary(mtcars[,c("mpg",input$ngear)])
})
output$plot = renderPlot({
ggplot(mtcars,aes(x=factor(input$ngear),y=mpg))+geom_boxplot()
})
})
|
5a89985c40330111cfde4ede07ca98b505b2f861
|
8441bfe4d9012405140a9cd61fe0915cf5749f16
|
/HIVBackCalc/man/KCplwh.Rd
|
93222cd391575a76197b9ea6de93cd52ca81ea35
|
[] |
no_license
|
hivbackcalc/package1.0
|
6fc12d3e5545fcf2dd6d53a5a84ee200e394940d
|
404da82d5db67b6a5885693934ae014e2e3d3d57
|
refs/heads/master
| 2021-01-17T13:07:59.862352
| 2019-07-08T22:17:06
| 2019-07-08T22:17:06
| 30,552,375
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 745
|
rd
|
KCplwh.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{KCplwh}
\alias{KCplwh}
\title{MSM living with HIV in KC 2006-2012}
\format{A data frame with 7 rows and 5 columns:
\describe{
\item{Year}{Year of estimate}
\item{White}{MSM living with HIV, White race}
\item{Black}{MSM living with HIV, Black race}
\item{Hisp}{MSM living with HIV, Hispanic race}
\item{Total}{MSM living with HIV}
}}
\source{
Based on estimates from Public Health Seattle King County
}
\usage{
KCplwh
}
\description{
A dataset containing rough estimates of MSM PLWH in King County, by race.
Estimates are specified by year for unit-tests but reflect a time period
average
}
\keyword{datasets}
|
9376b9e0743cc65bdde580badb00be8cbb2ba779
|
6aeef9dc9bfc68752cf482a1214de670232af593
|
/inst/isoscape example/4-predict_isoscape.R
|
9928242bca00bee07a9a6d2a3a3c79d23309e891
|
[] |
no_license
|
medusaGit/isoscatR
|
14138aaee6ac5d5c2a0d80de924b9b4e198bc4e6
|
c1080d66d2d10fda3858998290b2f98e2cefa71f
|
refs/heads/master
| 2021-01-21T07:30:29.346776
| 2015-03-04T16:17:52
| 2015-03-04T16:17:52
| 48,884,502
| 1
| 0
| null | 2016-01-01T17:28:15
| 2016-01-01T17:28:15
| null |
UTF-8
|
R
| false
| false
| 346
|
r
|
4-predict_isoscape.R
|
library(spBayes)
load(file = file.path(data_dir,"gnip_data.Rdata"))
load(file = file.path(data_dir,"splm_fit.Rdata"))
p = spPredict(l, pred.coords = pred_data[,c("long", "lat")], pred.covars=pred_data, start=10020, end=30000, thin=20)
r = brick( pred_rasts[[1]], nl = ncol(p$y.pred))
values(r) = p$y.pred
save(p,r,file="sp_lm_pred.Rdata")
|
cd3cd07d25007674a5d6385e107094c92fb29f2a
|
7959d755e90a965e9aae96c6dbd0488f9bbd0461
|
/R/ArrangeData.R
|
599660425e06a5cb048e7735ff79e2fd8bc9a82e
|
[] |
no_license
|
shineshen007/shine
|
0fa037b731eefc13d3a28edc5c9335876c867bb0
|
2e53e87a1099fefe16b995732197f9bb0f16738f
|
refs/heads/master
| 2023-01-24T20:55:17.881562
| 2023-01-18T13:52:33
| 2023-01-18T13:52:33
| 124,387,894
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
ArrangeData.R
|
#' @title ArrangeData
#' @description a function to arrange data for svr
#' @author Shine Shen
#' \email{qq951633542@@163.com}
#' @return All the results can be got form other functions and instruction.
#' @export
ArrangeData <- function(){
data <- data.table::fread("peak.table.csv")
data<- data.table::setDF(data)
data<-data[,-c(3:4,6:10)]#remove redundancy columns
colnames(data)[2] <- 'mz'
colnames(data)[3] <- 'rt'
write.csv(data,"data for svr.csv",row.names = F)
}
|
cf72e3929c5a852534bad9309bb5893ba4609bb8
|
c5824359870ca766c2684c7ff3abe956de472377
|
/Column/Make_Pair_Mapping_from_FactorInfo/Paired_to_Metadata.r
|
9dd7f4a89e3932fdfb1209570a32b2f18179b9d9
|
[] |
no_license
|
MedicineAndTheMicrobiome/AnalysisTools
|
ecb8d6fd4926b75744f515b84a070e31f953b375
|
8176ca29cb4c5cba9abfa0a0250378e1000b4630
|
refs/heads/master
| 2023-09-01T21:10:39.942961
| 2023-08-31T22:43:03
| 2023-08-31T22:43:03
| 64,432,395
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,730
|
r
|
Paired_to_Metadata.r
|
#!/usr/bin/env Rscript
###############################################################################
library('getopt');
options(useFancyQuotes=F);
params=c(
"paired_map_file", "p", 1, "character",
"metadata_output", "o", 1, "character",
"category_name", "c", 2, "character",
"acolname", "a", 2, "character",
"bcolname", "b", 2, "character"
);
opt=getopt(spec=matrix(params, ncol=4, byrow=TRUE), debug=FALSE);
script_name=unlist(strsplit(commandArgs(FALSE)[4],"=")[1])[2];
usage = paste(
"\nUsage:\n", script_name, "\n",
" -p <paired map file>\n",
" -o <output metdatafile>\n",
" [-a <column name of sample ID A>]\n",
" [-b <column name of sample ID B>]\n",
" [-c <category name, default=Category>]\n",
"\n",
"This script will read in a paired map file and regenerate\n",
"a metadata/factor file.\n",
"\n",
"The format of the paired map needs the following columns, unless the -a and -b options are specified:\n",
" 1.) subject id\n",
" 2.) first sample type's sample ids\n",
" 3.) second sample type's sample ids\n",
"\n", sep="");
if(
!length(opt$paired_map_file) ||
!length(opt$metadata_output)
){
cat(usage);
q(status=-1);
}
PairedMapFile=opt$paired_map_file;
OutputMetadataFile=opt$metadata_output;
if(length(opt$category_name)){
CategoryName=opt$category_name;
}else{
CategoryName="Category";
}
if(length(opt$acolname)){
Acolname=opt$acolname;
}else{
Acolname="";
}
if(length(opt$bcolname)){
Bcolname=opt$bcolname;
}else{
Bcolname="";
}
cat("\n");
cat("Paired Map File: ", PairedMapFile, "\n", sep="");
cat("Output Metadata File: ", OutputMetadataFile, "\n", sep="");
cat("Category Name: ", CategoryName, "\n", sep="");
cat("\n");
if(Acolname!=""){
cat("A Colname: ", Acolname, "\n");
}
if(Bcolname!=""){
cat("B Colname: ", Bcolname, "\n");
}
##############################################################################
load_paired=function(fname, acn=NULL, bcn=NULL, sbj_idn=NULL){
# acn, bcn and sbj_idn are the column names for the 3 columns expected
cat("Loading Paired Map...\n");
table=data.frame(read.table(fname, sep="\t", header=TRUE,
row.names=c(), check.names=FALSE, comment.char=""));
if(!is.null(acn) && !is.null(bcn)){
table=cbind(table[,1], table[,c(acn, bcn), drop=F]);
}
if(ncol(table)!=3){
cat("\n*************************************************************\n");
cat("Error: This script requires 3 columns in the paired file.\n");
cat(" 1.) subject id\n 2.) sample id A\n 3.) sample id B\n");
cat("\n*************************************************************\n");
cat("\n\n");
quit(status=-1);
}
# Find subject ID column
if(!is.null(sbj_idn)){
subject_ids=table[,sbj_idn];
}else{
subject_ids=table[,1];
}
# Find A & B
if(!is.null(acn) && !is.null(bcn)){
table=table[, c(acn, bcn)];
}else{
# Assume row 2 and 3 are a and b, respectively
table=table[,c(2,3)];
}
rownames(table)=subject_ids;
return(table);
}
##############################################################################
# Load factors
if(Acolname!="" && Bcolname!=""){
paired_map=load_paired(PairedMapFile, Acolname, Bcolname);
}else if(Acolname=="" && Bcolname==""){
paired_map=load_paired(PairedMapFile);
}else{
cat("Error: If A or B colname is specified, then both A & B must be specified.\n");
}
subject_ids=rownames(paired_map);
pair_category=colnames(paired_map);
num_subjects=length(subject_ids);
cat("Num Subjects:", num_subjects, "\n", sep="");
cat("Pairing Category: \n");
print(pair_category);
a_samp_ids=as.character(paired_map[,1]);
b_samp_ids=as.character(paired_map[,2]);
a_subj_ids=subject_ids;
b_subj_ids=subject_ids;
names(a_subj_ids)=a_samp_ids;
names(b_subj_ids)=b_samp_ids;
metadata_out=matrix(NA, nrow=2*num_subjects, ncol=3);
colnames(metadata_out)=c(CategoryName, "SubjectID", "SampleID");
rownames(metadata_out)=c(a_samp_ids, b_samp_ids);
metadata_out[,CategoryName]=c(rep(pair_category[1], num_subjects), rep(pair_category[2], num_subjects));
metadata_out[,"SubjectID"]=c(a_subj_ids, b_subj_ids);
metadata_out[,"SampleID"]=c(a_samp_ids, b_samp_ids);
cat("Removing NAs...\n");
nrows_orig=2*num_subjects;
not_na_ix=!is.na(metadata_out[,"SampleID"]);
metadata_out=metadata_out[not_na_ix, c("SampleID", CategoryName, "SubjectID")];
nrows_after_na_remove=nrow(metadata_out);
num_removed=nrows_orig-nrows_after_na_remove;
cat("Number of Rows Removed: ", num_removed, "\n");
cat("Writing New Factor File into: ", OutputMetadataFile, "\n");
fname=paste(OutputMetadataFile);
write.table(metadata_out, file=fname, row.names=F, append=F, quote=F, sep="\t");
###############################################################################
cat("Done.\n");
#dev.off();
print(warnings());
q(status=0);
|
4e7a364b8e4a54b911d1902e76cf91c21d5b3bb5
|
ce2435ac0d405cc80cfaddc02bb709ea7491a5d5
|
/Big Data Zacatecas/sesion6/tarea6WaffleCharts.R
|
4f68002d9f4343c081a755686a363aee3a8aca82
|
[
"CC0-1.0"
] |
permissive
|
pauEscarcia/BigData-Zacatecas
|
b9e4014ee1242522c04a46a8fd40badd809cfe7c
|
6ed59608d4583f8d0bdb5caa55c80f41a1c3844a
|
refs/heads/master
| 2021-01-10T05:25:26.429723
| 2016-03-14T03:18:03
| 2016-03-14T03:18:03
| 43,478,578
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 407
|
r
|
tarea6WaffleCharts.R
|
#Waffle charts
all.scores <- read.csv("bafijaporcada100habitantes.csv")
all.scores
suscripciones <- all.scores[,"Suscripciones.100.h"]
sus <- suscripciones[5:10]
sus
#anio <- c("2004","2005","2006","2007","2008","2009")
#total <- cbind(sus[i],anio[i])
#total
waffle::waffle(total, rows=5, colors=rainbow(length(sus)), title="Banda Ancha Fija por cada 100 habitantes",
xlab="Suscriptores")
|
4134da6c3e3538352a38345e4df6e745e331391d
|
51fdef26e2b65585f0200d90c2b25fe64444dcc3
|
/One_Proportion_Obama_Care_Fa16.R
|
677b3f6ddd80ca7cff08019f9a71334a2fbeff00
|
[] |
no_license
|
chenqi0805/Bayesian-Statistical-Methods
|
f2bfbec0d83281340cb6bd89b14acbb8c3d8a019
|
4385f862577fefab8681a7f729f789a6d47645f2
|
refs/heads/master
| 2021-01-22T23:26:47.165703
| 2017-03-21T00:08:06
| 2017-03-21T00:08:06
| 85,639,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,180
|
r
|
One_Proportion_Obama_Care_Fa16.R
|
# Bayesian Inferences for θ,
# a related parameter, and future data - A conjugate (Beta) prior analysis
# See pages 51-52 of the Text by CJBH for some of the R commands described below.
# Suppose observed data are summarized as
# 16 favored Obama Care, out of n=48 constituents polled
# NOTE: A sufficient statistic, T=number among n who favor Obama Care, exists in this
# sampling model.
# A priori, theta is believed to be within the interval [0.2 ,0.6] with high probability.
# Fit a prior density from the Beta(a,b) family to match this info - see lecture notes
# old way ( <- )to make assignments to objects in R
a.prior <- 9.2
b.prior <- 13.8
# Current way ( = ) to make assignments to objects in R
a.prior = 9.2
b.prior = 13.8
# you will find both = and <- used interchangeably in R-codes, etc.
# plotting p1(theta) - prior density
theta = seq(0,1,0.01) # Discretize theta using a grid of size 0.01
prr.theta = dbeta(theta,shape1=a.prior,shape2=b.prior)
#print(cbind(theta, prr.theta))
par(mfrow=c(2,2)) # making two rows and two columns for plots
plot(theta,prr.theta, type="l", main=paste("Beta(a=",a.prior,",b=",b.prior,") prior"),
xlab="theta",ylab="Prob",cex.main=0.8)
# Likelihood, p2(tstar|theta),of the actual data from Binomial(n, theta)
n = 48 ## number of people who were polled
tstar = 16 ## actual number who favored Obama Care
lik.theta = dbinom(tstar,size=n,prob=theta)
# Unnormalized Likelihood -kernel
lik.theta = (theta^tstar)*((1-theta)^(n-tstar))
plot(theta,lik.theta, type="l", col="blue", main="Binomial Likelihood",xlab="theta",ylab="",cex.main=0.8)
# Product of Likelihood and Prior, but only
# kernel or unnormalized posterior of theta
unnorm.post.theta = lik.theta*prr.theta
plot(theta,unnorm.post.theta,type="l",main="Likelihood*Prior", col="purple", cex.main=0.8)
# The Posterior is a Beta distribution due to the conjugate structure
# of the Beta prior and Binomial likelihood
# The parameters of the Beta posterior distribution
a.post = tstar + a.prior
b.post = (n-tstar)+ b.prior
a.post
b.post
post.theta = dbeta(theta,shape1=a.post, shape2=b.post)
plot(theta, post.theta, type="l", main=paste("Posterior: Beta distn (a=",
a.post,",b=",b.post,")"),col="red", cex.main=0.8)
# posterior by itself #
# A square region is needed for the next Plot
quartz(width=4,height=4,pointsize=8)
plot(theta, post.theta, type="l",main=paste("Posterior: Beta distn (a=",
a.post,",b=",b.post,")"),col="red", cex.main=0.8)
# Exact Posterior mean
post.mean = a.post/(a.post+b.post)
# Exact Posterior mode
post.mode = (a.post-1)/(a.post+b.post-2)
print(c("Post mean=", post.mean, "Post mode=", post.mode))
#
# Approximate inferences (the posterior, point estimates, 95% credible Interval, posterior
#prob)
# for theta by Direct Monte-Carlo Simulation - i.i.d sampler
# pseudo random sample of size N from the posterior distribution of theta
#
N = 1000
# Fix the seed in sampling the posterior
set.seed(12345)
sample.post.theta = rbeta(N,shape1=a.post,shape2=b.post)
# Approx. posterior dist of theta
# A square region is needed for the next Plot
quartz(width=4,height=4,pointsize=8)
hist(sample.post.theta, xlab = "theta", ylab = "frequency", main =
"Approximate posterior distribution of theta
using N=1000 draws from the posterior")
# Approximate posterior mean, variance of theta
MC.est.post.mean.theta = mean(sample.post.theta)
MC.est.post.var.theta = var(sample.post.theta)
print(c("MC est of Post mean=", MC.est.post.mean.theta, "MC est of Post variance=",
MC.est.post.var.theta))
# Approx. 95% credible Interval for theta
print(quantile(sample.post.theta,probs = c(0.025,0.975)))
# What is the approx. Posterior probability that theta < 0.50?
post.prob0.5 = mean((sample.post.theta < 0.5))
post.prob0.5
# Approximate inferences (posterior median, 95% credible Interval, posterior prob) for theta by numerical methods
# Posterior median
post.med = qbeta(0.5,shape1=a.post,shape2=b.post)
print(c("approx Post median=", post.med))
#95% credible Interval for theta
print(c(qbeta(0.025,shape1=a.post,shape2=b.post),
qbeta(0.975,shape1=a.post,shape2=b.post) ) )
# approx Posterior probability that theta < 0.50
pbeta(0.5,shape1=a.post,shape2=b.post)
# INFERENCE FOR TAU BEGINS
# Use the pseudo random sample of size N from the posterior distribution of theta
# to get a pseudo random sample of size N from the posterior distribution of tau,
# the odds of favoring Obama Care.
sample.post.tau = sample.post.theta/(1-sample.post.theta)
# A square region is needed for the next Plot
quartz(width=4,height=4,pointsize=8)
hist(sample.post.tau, xlab = "tau", ylab = "frequency", main =
"Approximate posterior distribution of tau
using N=1000 draws from the posterior")
#
# Approximate Bayesian Inferences for tau using by Monte-Carlo simulation -
# pseudo random sample of size N from the posterior distribution of tau
#
# A point estimate of tau
MC.approx.post.mean.tau = mean(sample.post.tau)
MC.approx.post.mean.tau
#95% credible Interval for tau
print(quantile(sample.post.tau,probs = c(0.025,0.975)))
# What is the (approximate) Posterior probability that tau >= 1.0?
post.prob1.0 = mean((sample.post.tau >= 1.0))
post.prob1.0
# Approximate predictive Bayesian inferences for W = # of people among the next 40 polled ># who favor Obama Care
# by Monte-Carlo simulation - pseudo random
# sample of size N2 from the posterior predictive distribution of W
m = 40
N2 = length(sample.post.theta)
set.seed(12345)
sample.pred.W = rbinom(n=N2,size=m,prob= sample.post.theta)
# A square region is needed for the next Plot
quartz(width=4,height=4,pointsize=8)
hist(sample.pred.W, xlab = "W = # of people among 40 who favor Obama Care ", ylab = "frequency", main =
"Approximate posterior predictive distribution of W
using N=1000 draws from the posterior predictive")
# A point estimate of W
MC.est.pred.mean.W = mean(sample.pred.W)
MC.est.pred.mean.W
# Approx. 95% posterior predictive Interval for W
print(quantile(sample.pred.W,probs = c(0.025,0.975)))
# What is the (approximate) posterior predictive probability that W >= 25?
pred.prob.25 = mean((sample.pred.W >= 25))
pred.prob.25
# Does the sampling model fit the data?
# check if tstar is or is not in the middle of the histogram of the predictive distribution of the
# future datum, W1 = # of people among the next 48 polled who would favor Obama Care
# Approximate predictive Bayesian inferences for W1 using a
# pseudo random sample of size N2 from the posterior predictive distribution of W1
m1 = 48
N2 = length(sample.post.theta)
set.seed(12345)
sample.pred.W1 = rbinom(n=N2,size=m1,prob= sample.post.theta)
# A square region is needed for the next Plot
quartz(width=4,height=4,pointsize=8)
hist(sample.pred.W1, xlab = "W1 = # of people among 48 who favor Obama Care", ylab = "frequency", main =
"Approximate posterior predictive distribution of W1
using N=1000 draws from the posterior predictive")
abline(v=tstar, lwd=3, col="red")
|
b753d1eb7dfb02a9ae5862f4c1fab5132ef27d0a
|
5c78cf64814e074824b1d9d676aaf88f887d509c
|
/man/se.Rd
|
d7ede50eb0820b16c6a2034f85d169584805976f
|
[] |
no_license
|
osoramirez/resumeRdesc
|
739969df42fb0d60ae1825cb92588449ed98ff80
|
f13df59da87bce98a6a96ed76b4b6b42212270bb
|
refs/heads/master
| 2020-04-24T00:25:59.367878
| 2019-02-19T23:23:17
| 2019-02-19T23:23:17
| 138,966,885
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 501
|
rd
|
se.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/se.R
\name{se}
\alias{se}
\title{A standard error}
\usage{
se(x)
}
\arguments{
\item{x}{is a numeric value, could be a a vector or data.frame}
\item{se}{get a standard error}
}
\value{
a data a standard error
}
\description{
The standard error (SE) of a statistic is the standard deviation of its sampling distribution.
}
\examples{
x<-rnorm(25,2,3)
se(x)
}
\author{
Oscar Ramirez Alan (\email{osoramirez@gmail.com}).
}
|
c23c992ddad2e029823de245e5e8ab8b2a62b4ba
|
e705fdc30047cff721ddd288cf38a5a55fcba2f4
|
/scripts/single dataset workflows/five months soupx.R
|
52bb2c814c96be34a6119d53a8cdf884d636a085
|
[] |
no_license
|
MillayLab/single-myonucleus
|
d5ef96f985a5638d788af5c9ec59e097a3e854d8
|
f7d977fd5e66be286e1a1bf85b1dda36ff6ae2fb
|
refs/heads/master
| 2022-12-28T17:54:53.048831
| 2020-10-12T14:53:06
| 2020-10-12T14:53:06
| 274,753,777
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,939
|
r
|
five months soupx.R
|
fivemonth_soupx <- CreateSeuratObject(counts = fivemonthcounts, project = "A", min.cells = 3, min.features = 200)
fivemonth_soupx[["percent.mt"]] <- PercentageFeatureSet(fivemonth_soupx, pattern = "^MT-")
VlnPlot(fivemonth_soupx, features = c("nFeature_RNA", "nCount_RNA"), ncol = 2)
featurescatterplot <- FeatureScatter(fivemonth_soupx, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
plot(featurescatterplot)
fivemonth_soupx <- subset(fivemonth_soupx, subset = nFeature_RNA > 200 & nFeature_RNA < 3200)
fivemonth_soupx <- NormalizeData(fivemonth_soupx)
fivemonth_soupx <- FindVariableFeatures(fivemonth_soupx, selection.method = "vst", nfeatures = 2000)
top10 <- head(VariableFeatures(fivemonth_soupx), 10)
plot1variable <- VariableFeaturePlot(fivemonth_soupx)
plot2variable <- LabelPoints(plot = plot1variable, points = top10, repel = TRUE)
CombinePlots(plots = list(plot1variable, plot2variable))
all.genes5mo <- rownames(fivemonth_soupx)
fivemonth_soupx <- ScaleData(fivemonth_soupx, features = all.genes5mo)
fivemonth_soupx <- RunPCA(fivemonth_soupx, features = VariableFeatures(object = fivemonth_soupx))
VizDimLoadings(fivemonth_soupx, dims = 1:2, reduction = "pca")
DimPlot(fivemonth_soupx, reduction = "pca")
DimHeatmap(fivemonth_soupx, dims = 1:15, cells = 500, balanced = TRUE)
fivemonth_soupx <- FindNeighbors(fivemonth_soupx, dims = 1:12)
fivemonth_soupx <- FindClusters(fivemonth_soupx, resolution = 0.5)
fivemonth_soupx <- RunUMAP(fivemonth_soupx, dims = 1:12)
#supplying dimensions for consistent graphs
fivemonth_soupx@reductions[["umap"]] <- fivemonth_reductions
DimPlot(fivemonth_soupx, reduction = "umap", label = TRUE)
fivemonth_soupx <- RenameIdents(fivemonth_soupx, "0" = "Type IIb Myonuclei", "1" = "Type IIx Myonuclei", "2" = "Type IIb Myonuclei #2", "3" = "Type IIx Myonuclei #2", "4" = "FAPs", "5" = "Endothelial Cells", "6" = "Myotendinous Junction", "7" = "Smooth Muscle", "8" = "Satellite Cells", "9" = "Immune Cells", "10" = "Smooth Muscle #2", "11" = "Neuromuscular Junction", "12" = "Tenocytes")
#feature expression plots in figure 1
FeaturePlot(fivemonth_soupx, features = c("Myh4"), pt.size = 2.5, cols = c("lightgrey", "red")) + NoAxes() + NoLegend() + ggtitle("")
FeaturePlot(fivemonth_soupx, features = c("Myh1"), pt.size = 2.5, cols = c("lightgrey", "red")) + NoAxes() + NoLegend() + ggtitle("")
FeaturePlot(fivemonth_soupx, features = c("Chrne"), pt.size = 2.5, cols = c("lightgrey", "red")) + NoAxes() + NoLegend() + ggtitle("")
FeaturePlot(fivemonth_soupx, features = c("Col22a1"), pt.size = 2.5, cols = c("lightgrey", "red")) + NoAxes() + NoLegend() + ggtitle("")
FeaturePlot(fivemonth_soupx, features = c("Pax7"), pt.size = 2.5, cols = c("lightgrey", "red")) + NoAxes() + NoLegend() + ggtitle("")
fivemonth_soupx <- RenameIdents(fivemonth_soupx, "Type IIb Myonuclei #2" = "Type IIb Myonuclei", "Type IIx Myonuclei #2" = "Type IIx Myonuclei")
VlnPlot(fivemonth_soupx, features = c("Myh4"), idents = c("Type IIb Myonuclei", "Type IIx Myonuclei", "Neuromuscular Junction", "Myotendinous Junction", "Satellite Cells"), pt.size = 0) + NoLegend()
VlnPlot(fivemonth_soupx, features = c("Myh1"), idents = c("Type IIb Myonuclei", "Type IIx Myonuclei", "Neuromuscular Junction", "Myotendinous Junction", "Satellite Cells"), pt.size = 0) + NoLegend()
VlnPlot(fivemonth_soupx, features = c("Chrne"), idents = c("Type IIb Myonuclei", "Type IIx Myonuclei", "Neuromuscular Junction", "Myotendinous Junction", "Satellite Cells"), pt.size = 0) + NoLegend()
VlnPlot(fivemonth_soupx, features = c("Col22a1"), idents = c("Type IIb Myonuclei", "Type IIx Myonuclei", "Neuromuscular Junction", "Myotendinous Junction", "Satellite Cells"), pt.size = 0) + NoLegend()
VlnPlot(fivemonth_soupx, features = c("Pax7"), idents = c("Type IIb Myonuclei", "Type IIx Myonuclei", "Neuromuscular Junction", "Myotendinous Junction", "Satellite Cells"), pt.size = 0) + NoLegend()
|
8b60ff9dbcd6f594ce7a9b10c1817a4747d86767
|
97f1e3e6e908a83489e4243268ba539316196176
|
/man/getANTsRData.Rd
|
7a25d07b30106181be359abfc139ec310242d3af
|
[
"Apache-2.0"
] |
permissive
|
ANTsX/ANTsRCore
|
1c3d1da3bea84859da7d18f54c34ae13d2af8619
|
8e234fd1363c0d618f9dc21c9566f3d5464655a2
|
refs/heads/master
| 2023-05-24T23:53:30.886217
| 2023-05-22T02:52:39
| 2023-05-22T02:52:39
| 83,897,912
| 8
| 22
| null | 2023-05-22T02:52:40
| 2017-03-04T14:09:48
|
C++
|
UTF-8
|
R
| false
| true
| 896
|
rd
|
getANTsRData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getANTsRData.R
\name{getANTsRData}
\alias{getANTsRData}
\title{getANTsRData}
\usage{
getANTsRData(
fileid,
usefixedlocation = FALSE,
verbose = FALSE,
method = ifelse(Sys.info()["sysname"] == "Linux", "wget", "auto"),
quiet = FALSE
)
}
\arguments{
\item{fileid}{one of the permitted file ids or pass "show" to list all
valid possibilities. Note that most require internet access to download.}
\item{usefixedlocation}{directory to which you download files}
\item{verbose}{optional boolean}
\item{method}{Method to be used for downloading files,
passed to \code{\link{download.file}}}
\item{quiet}{If \code{TRUE}, suppress status messages
(if any), and the progress bar.}
}
\value{
filename string
}
\description{
Downloads antsr test data
}
\examples{
fi <- getANTsRData( "r16" )
}
\author{
Avants BB
}
|
8180343695e72a936ce06b0160b674f91ef0af67
|
4d9be777791f09cdf5c1dfb255c69e8a0cce3e80
|
/getVarCombs Function.R
|
7874fb25c252218f66319bc9dbd9427e1eea91b9
|
[
"MIT"
] |
permissive
|
NFSturm/utility_funs
|
d1f425fecb865b4ba66f778b21453c05e26eab1e
|
4fc69f3c181b39a5029fd160939818788b540f9b
|
refs/heads/master
| 2020-12-14T04:03:47.414152
| 2020-01-24T17:02:29
| 2020-01-24T17:02:29
| 234,632,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,085
|
r
|
getVarCombs Function.R
|
# Variable Combination Generator Function (getVarCombs)
# Inputs:
# df: A dataframe or similar structure (data.table does not work at the moment)
# y: Name of independent variable
getVarCombs <- function(df, y) {
if(typeof(df) != "list") stop("Input must be a dataframe or similar structure")
if(typeof(y) != "character") stop("Dependent variable must be specified as a character string")
df_vars <- df[!(names(df) %in% y)]
var_num <- length(names(df_vars))
vars <- names(df_vars)
getCombs <- function(vars, m) {
obj <- combn(vars, m)
out <- c()
for (combn in 1:ncol(obj)) {
vc <- paste0(obj[,combn], collapse = "+")
out <- append(out, vc)
}
formula <- c()
for (form in out) {
fm <- paste0(y, " ~ ", form)
formula <- append(formula, fm)
}
formula
}
all_combs <- c()
for (i in 1:var_num) {
combs <- getCombs(vars, m = i)
all_combs <- append(all_combs, combs)
}
all_combs
}
# Variable combinations must be turned into formula by using "formula('''Name of getVarComb-Output''')"
|
b1fef19b2bdb841cb2f39f1784389c212d1ee5d3
|
5f2469bb233cde73acef9c59371bf8f9db12c782
|
/ui.r
|
2907d2c7af6cfafe37e5065dbfce1c79b3624323
|
[] |
no_license
|
msolanoo/WineQuality
|
19be0c6cac89c010e8a1a8fd6d5b1f9a9b9cb06c
|
1f11553b63ea5d93d817fd58014de68994fdc217
|
refs/heads/master
| 2021-01-10T06:48:43.779985
| 2015-05-24T18:46:34
| 2015-05-24T18:46:34
| 36,185,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,283
|
r
|
ui.r
|
library(shiny)
filenames<-list.files(pattern="\\.csv$")
shinyUI(
navbarPage("Wine Quality",
tabPanel("Understanding Wine Quality",
h2("Wine Quality"),
hr(),
h3("This dataset is public available for research. The details are described in [Cortez et al., 2009].
Please include this citation if you plan to use this database:
P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis.
Modeling wine preferences by data mining from physicochemical properties.
In Decision Support Systems, Elsevier, 47(4):547-553. ISSN: 0167-9236."),
helpText("Available at: [@Elsevier] http://dx.doi.org/10.1016/j.dss.2009.05.016",
" [Pre-press (pdf)] http://www3.dsi.uminho.pt/pcortez/winequality09.pdf",
" [bib] http://www3.dsi.uminho.pt/pcortez/dss09.bib."),
h3("Format"),
p("A data frame with 6497 observations on 11 variables."),
p(" 1. Title: Wine Quality"),
p(" 2. Sources
Created by: Paulo Cortez (Univ. Minho), Antonio Cerdeira, Fernando Almeida, Telmo Matos and Jose Reis (CVRVV) @ 2009"),
p(" 3. Past Usage:
P. Cortez, A. Cerdeira, F. Almeida, T. Matos and J. Reis.
Modeling wine preferences by data mining from physicochemical properties.
In Decision Support Systems, Elsevier, 47(4):547-553. ISSN: 0167-9236.
In the above reference, two datasets were created, using red and white wine samples.
The inputs include objective tests (e.g. PH values) and the output is based on sensory data
(median of at least 3 evaluations made by wine experts). Each expert graded the wine quality
between 0 (very bad) and 10 (very excellent). Several data mining methods were applied to model
these datasets under a regression approach. The support vector machine model achieved the
best results. Several metrics were computed: MAD, confusion matrix for a fixed error tolerance (T),
etc. Also, we plot the relative importances of the input variables (as measured by a sensitivity
analysis procedure). DISCLAIMER: Mario Solano has merged the 2 datasets into a single one for thi Shiny Project"),
p(" 4. Relevant Information:
The two datasets are related to red and white variants of the Portuguese Vinho Verde wine.
For more details, consult: http://www.vinhoverde.pt/en/ or the reference [Cortez et al., 2009].
Due to privacy and logistic issues, only physicochemical (inputs) and sensory (the output) variables
are available (e.g. there is no data about grape types, wine brand, wine selling price, etc.).
These datasets can be viewed as classification or regression tasks.
The classes are ordered and not balanced (e.g. there are munch more normal wines than
excellent or poor ones). Outlier detection algorithms could be used to detect the few excellent
or poor wines. Also, we are not sure if all input variables are relevant. So
it could be interesting to test feature selection methods. "),
p(" 5. Number of Instances: red wine - 1599; white wine - 4898.
"),
p(" 6. Number of Attributes: 11 + output attribute"),
p(" 7. Attribute information:
For more information, read [Cortez et al., 2009].
Input variables (based on physicochemical tests):
1 - fixed acidity
2 - volatile acidity
3 - citric acid
4 - residual sugar
5 - chlorides
6 - free sulfur dioxide
7 - total sulfur dioxide
8 - density
9 - pH
10 - sulphates
11 - alcohol
Output variable (based on sensory data):
12 - quality (score between 0 and 10)
13 - Wine Type (red or white"),
p(" 8. Missing Attribute Values: None"),
h3("Source"),
p("Available at: [@Elsevier] http://dx.doi.org/10.1016/j.dss.2009.05.016,
[Pre-press (pdf)] http://www3.dsi.uminho.pt/pcortez/winequality09.pdf,
[bib] http://www3.dsi.uminho.pt/pcortez/dss09.bib.")
),
tabPanel("How to use this app?",
h2("Follow these steps and you are good to go!"),
hr(),
h3("The App has four tabs"),
p("1 - Understanding Wine Quality: bring the user to the world of wine making"),
p("2 - How to use this app?"),
p("3 - Analyzing White and Red Wine: loads a datase set that allows user to use boxplots for each attributes vs Wine Type, and also run Linear Regressions againts the Wine Quality Label"),
p("4 - Source Code: Github repository containint the code to buil this Shiny App"),
helpText(""),
h2("Using the App to Analyze Wine Quality"),
h3("Left Navigation Tab"),
p("The drop down menu has the 11 available variables from the data set. Click on the drop down menu, to select 1 variable and it will automatically change the behavior of the boxplot and linear regression"),
h3("Main Panel Navigation Tab"),
hr(),
h3("Data Tab"),
p("Loads the winequality.csv file. Shows the first 20 rows as reference for the user"),
hr(),
h3("BoxPlot"),
p("Show a box plot of the variable selected in the drop down menu in Y vs the Wine Type in X. Wine Type = Red or White Wine"),
hr(),
h3("Regression Model"),
p("Generates a regression model of quality ~ variable selected in drop down menu")
),
tabPanel("Analyzing White and Red Wine",
fluidPage(
titlePanel("Wine Quality vs its production variables"),
sidebarLayout(
sidebarPanel(
selectInput("variable", "Variable:",
c("Fixed Acidity" = "fixed.acidity",
"Volatile Acidity" = "volatile.acidity",
"Citric Acid" = "citric.acid",
"Residual Sugar" = "residual.sugar",
"Chlorides" = "chlorides",
"Free Sulfur Dioxide" = "free.sulfur.dioxide",
"Total Sulfur Dioxide" = "total.sulfur.dioxide",
"Density" = "density",
"pH" = "pH",
"Sulphates" = "sulphates",
"Alcohol Output variable (based on sensory data)" = "alcohol"
))
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Data",tableOutput("data_table"),id="myTab"),
tabPanel("BoxPlot", h3(textOutput("caption")), plotOutput("QualityBoxPlot")),
tabPanel("Regression model", h3(textOutput("caption2")), plotOutput("QualityPlot")),
verbatimTextOutput("fit")
)
)
)
)
),
tabPanel("SourceCode",
p("Developing_Data_Products_Coursera_Part1"),
a("https://github.com/msolanoo/WineQuality")
)
)
)
|
05ecbba153ef9faf44d873c72b4833ee458d195a
|
3b21d51af3869a589e1eb96eae4fd756b3db6062
|
/funciones-tipo.R
|
21f30ae81cacfc30294ed482a3b1c9725cdec6f8
|
[] |
no_license
|
cristobalortizvilches/r4ds-cov
|
a515a9d98a6d76493e9fc1721234a2bccdccc698
|
b38115e71aeff96d7632156d4365dc6453a0d067
|
refs/heads/master
| 2023-08-15T00:20:46.342840
| 2021-09-14T20:04:06
| 2021-09-14T20:04:06
| 394,477,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 331
|
r
|
funciones-tipo.R
|
# Agregar nuevo vetor a df ------------------------------------------------
add.vect.df <- fuction(df, vect) { #indico los argumentos (inputs)
new.df <- cbind((df, vect)) #procesamiento de los inputs que me entrega un resultado
return(new.df) #indico a la función que devuelva el resultado (output)
}
|
2741973178f02b435731ca2d7bf2759697485dba
|
da3fef2b47b1a586192d8f291dfb11edb589db3b
|
/10X_V_Gene_Plotting/3a_make combined vh file_v1.2.R
|
25d4e99862bc577ddc1464ab23e373128d6eb0bf
|
[] |
no_license
|
RachelBonami/AHAB
|
d25f9d7df74ae9de9d7f530eb641f9032fb8d75d
|
b431041317d56c1e8a87936ccd6656ace9f2119a
|
refs/heads/main
| 2023-03-02T07:07:48.995486
| 2021-02-08T20:39:26
| 2021-02-08T20:39:26
| 303,472,507
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,606
|
r
|
3a_make combined vh file_v1.2.R
|
#Run the 2_vh... Rscript first to generate the input files needed here.
#This script combines all files into single summary file
#that includes VH family proportion and counts per sample.
#It also adds columns to use as grouping variables
#in ggplot and removes groups by sample for which n<10 sequences
#to avoid inappropriate skewing of the data.
#A summary table is output as a CSV file to show the count
#and frequency of all Vgenes combined per sample (file.id)
#and subset.dx. This allows you to determine if one sample
#is skewing the data set.
#Things you may need to revise are marked:
######ALL CAPS#########
library(tidyverse)
library(magrittr)
library(readr)
#error here if you try to run as one block of code with above
check.create.dir <- function(the.dir) {
if (!dir.exists(the.dir)) {
dir.create(the.dir, recursive = TRUE) }
}
######DEFINE DIR IN########
#this is where files come from
dir.in <- "~/desktop/10X_BCR_pipelines/output/vh/freq_summary/"
######DEFINE DIR OUT########
#this is where output files go
dir.out <- "~/desktop/10X_BCR_pipelines/output/vh/graphs"
directories <- c(dir.in, dir.out)
lapply(directories,
FUN = check.create.dir)
library(dplyr)
library(tidyr)
file.list <- list.files(dir.in, pattern = "*vh_fam_freq_sum",
full.names = TRUE)
file.list
#this puts all the data into one larger dataframe containing all samples
data.list <- lapply(file.list, read.csv)
smaller.df <- subset(do.call(rbind, data.list), select = -c(X))
##########NEED TO REPLACE SUBSET NAMES FOR YOUR SUBSET NAMES#############
########E.G. REPLACE "all" AND "cd21low" ACCORDINGLY
#this adds subset group id column
smaller.df$subset.group <- ifelse(grepl("all", smaller.df$file.id),
"all", "cd21low")
#if additional subset groups are present, could do the following:
#smaller.df$subset.group <- ifelse(grepl("all",
# smaller.df$file.id), "all",
# ifelse(grepl("cd21low", smaller.df$file.id),
# "cd21low", "other"))
########### EDIT SAMPLE METADATA BELOW #######################
#adding T1D vs. CTL and B cell subset grouping columns
library(stringr)
combined.vh.df <- smaller.df %>%
mutate(dx.group = case_when(
#all
smaller.df$file.id == "4025-RB-1_all_1" ~ "FDR",
smaller.df$file.id == "4025-RB-1_all_2" ~ "T1D",
smaller.df$file.id == "4025-RB-1_all_5" ~ "T1D",
smaller.df$file.id == "4025-RB-2_all_1" ~ "FDR",
smaller.df$file.id == "4025-RB-2_all_2" ~ "FDR",
smaller.df$file.id == "4025-RB-2_all_5" ~ "T1D",
smaller.df$file.id == "4025-RB-3_all_1" ~ "FDR",
smaller.df$file.id == "4025-RB-3_all_2" ~ "T1D",
smaller.df$file.id == "4025-RB-3_all_5" ~ "FDR",
smaller.df$file.id == "4025-RB-4_all_1" ~ "T1D",
smaller.df$file.id == "4025-RB-4_all_2" ~ "FDR",
smaller.df$file.id == "4025-RB-4_all_5" ~ "T1D",
smaller.df$file.id == "4025-RB-5_all_1" ~ "T1D",
smaller.df$file.id == "4025-RB-5_all_2" ~ "FDR",
smaller.df$file.id == "4025-RB-5_all_5" ~ "FDR",
smaller.df$file.id == "4025-RB-6_all_1" ~ "T1D",
smaller.df$file.id == "4025-RB-6_all_2" ~ "T1D",
smaller.df$file.id == "4025-RB-6_all_5" ~ "FDR",
#cd21low
smaller.df$file.id == "4025-RB-1_cd21low_1" ~ "FDR",
smaller.df$file.id == "4025-RB-1_cd21low_2" ~ "T1D",
smaller.df$file.id == "4025-RB-1_cd21low_5" ~ "T1D",
smaller.df$file.id == "4025-RB-2_cd21low_1" ~ "FDR",
smaller.df$file.id == "4025-RB-2_cd21low_2" ~ "FDR",
smaller.df$file.id == "4025-RB-2_cd21low_5" ~ "T1D",
smaller.df$file.id == "4025-RB-3_cd21low_1" ~ "FDR",
smaller.df$file.id == "4025-RB-3_cd21low_2" ~ "T1D",
smaller.df$file.id == "4025-RB-3_cd21low_5" ~ "FDR",
smaller.df$file.id == "4025-RB-4_cd21low_1" ~ "T1D",
smaller.df$file.id == "4025-RB-4_cd21low_2" ~ "FDR",
smaller.df$file.id == "4025-RB-4_cd21low_5" ~ "T1D",
smaller.df$file.id == "4025-RB-5_cd21low_1" ~ "T1D",
smaller.df$file.id == "4025-RB-5_cd21low_2" ~ "FDR",
smaller.df$file.id == "4025-RB-5_cd21low_5" ~ "FDR",
smaller.df$file.id == "4025-RB-6_cd21low_1" ~ "T1D",
smaller.df$file.id == "4025-RB-6_cd21low_2" ~ "T1D",
smaller.df$file.id == "4025-RB-6_cd21low_5" ~ "FDR"
)
)
#this creates a column with subset_dx as added column
combined.vh.df <- cbind(combined.vh.df,
data.frame(paste(combined.vh.df$subset.group,
combined.vh.df$dx_group, sep = "_")))
colnames(combined.vh.df) [6:7] <- c("dx.group", "subset.dx")
write.csv(combined.vh.df, file = paste(dir.out,
"/combined_vh_fam_freq.csv", sep = ""))
#data_agg <- aggregate(value ~ index, smaller_df, mean)
#combine VH_fam and subset_dx into single column
vh.fam.subset.dx <- (paste(combined.vh.df$vh.family,
combined.vh.df$subset.dx, sep = "_"))
combined.vh.fam.subset.dx.df <- cbind(vh.fam.subset.dx,
combined.vh.df)
#reorganize columns in df
combined.vh.fam.subset.dx.df <- combined.vh.fam.subset.dx.df[,c(2:8,1)]
smaller.df <- data.frame(combined.vh.fam.subset.dx.df[, 1:4],
combined.vh.fam.subset.dx.df[, 7])
#rename column in df
colnames(smaller.df) [5] <- c("subset.dx")
#eliminate groups in any sample for which n<10
smaller.df <- smaller.df %>%
group_by(file.id) %>%
filter(sum(count) >= 10)
#eliminating all or cd21low from file.id
smaller.df <- smaller.df %>%
#split by "_" and give arbitrary column names to each
#of 3 new columns that were added
separate(file.id, c("A", "B", "C"), "_") %>%
unite(file.id, A, C, sep = "_") %>%
#ditching column B
select(!(B)) %>%
mutate(file.id = as.factor(file.id))
write.csv(smaller.df, file = paste(dir.out,
"/filtered_combined_vh_fam_freq.csv",
sep = ""))
#summary table output of n per group by sample
BCR.count.df<- smaller.df %>%
group_by(subset.dx, file.id) %>%
summarise(counts = sum(count, na.rm = TRUE)) %>%
mutate(freq = counts / sum(counts))
write.csv(BCR.count.df, file = paste(dir.out,
"/VH_count_table_by_sample_subsetdx.csv",
sep = ""))
#reorganizing structure of df to make group comparisons easier
#by adding each as a separate factor in df
vh.fam.wide.df <- smaller.df %>%
pivot_wider(
names_from = subset.dx,
values_from = c(frequency, count)
) %>%
#include this call or the output is a tbl, dataframe, and something else
data.frame()
######### RENAME SUBSET NAMES BELOW ACCORDINGLY FOR YOUR DATA ###########
#I left my subset names because i thought it was better than 1 vs. 2
#for you to follow what is happening and is also nice to have that
#defined in your output CSV files. You'll need to edit through the
#end of this code.
#Generating mean values by vh_gene per group (B cell subset)
#keep dplyr call here, it was screwing up without it
#library(dplyr)
all.FDR.mean.freq <- vh.fam.wide.df %>%
group_by(vh.family) %>%
summarize(mean(frequency_all_FDR, na.rm = TRUE))
all.T1D.mean.freq <- vh.fam.wide.df %>%
group_by(vh.family) %>%
summarize(mean(frequency_all_T1D, na.rm = TRUE))
cd21low.FDR.mean.freq <- vh.fam.wide.df %>%
group_by(vh.family) %>%
summarize(mean(frequency_cd21low_FDR, na.rm = TRUE))
cd21low.T1D.mean.freq <- vh.fam.wide.df %>%
group_by(vh.family) %>%
summarize(mean(frequency_cd21low_T1D, na.rm = TRUE))
mean.Vh.fam.freq <- merge(merge(merge(all.FDR.mean.freq,
all.T1D.mean.freq,
by = "vh.family", all = TRUE),
cd21low.FDR.mean.freq,
by = "vh.family", all = TRUE),
cd21low.T1D.mean.freq,
by = "vh.family", all = TRUE)
colnames(mean.Vh.fam.freq) [2:5] <- c("all.FDR.mean",
"all.T1D.mean",
"cd21low.FDR.mean",
"cd21low.T1D.mean")
#this pulls VH gene id's for which cd21low T1D > cd21low_FDR
#frequency is TRUE, and writes csv for use with ggplot2
cd21low.T1D.vh.plot.df <- subset(
mean.Vh.fam.freq, cd21low.T1D.mean > cd21low.FDR.mean)
VH.plot.list <- data.frame(cd21low.T1D.vh.plot.df$vh.family)
#write.csv(mean.VH.gene.freq, file = paste(dir.out,
# "/mean_vh_fam_freq_by_subset.csv",
# sep = ""))
write.csv(VH.plot.list, file = paste(dir.out,
"/vh_fam_plot_when_cd21low_T1D_increased.csv",
sep = ""))
#-----------------------------------
#############STOP HERE##############
#not sure the following is helpful, but code works
file.id.list <- c(as.character(unique(smaller.df$file_fam)))
sample.id <- file.id.list %>%
str_remove("all_") %>%
str_remove("cd21low_")
sample.id
uni.sample.id <- unique(sample.id)
uni.sample.id
matched.dx.group.list <- c("FDR", "T1D", "T1D",
"FDR", "FDR", "T1D",
"FDR", "T1D", "FDR",
"T1D", "FDR", "T1D",
"T1D", "FDR", "FDR",
"T1D", "T1D", "FDR")
sample.id.dx.cat <- data.frame(cbind(uni.sample.id, matched.dx.group.list))
str(sample.id.dx.cat)
|
fd4c6714bf1eb50194fbf577fa982348bd508a31
|
ecbf6731fc0c9db0fab7c055e106db7a1a9efb2e
|
/man/neglogLik.Rd
|
5f9bdd36a14112514e195c1efc645997be28bfce
|
[] |
no_license
|
cran/PtProcess
|
462a88b5e203417b58af57a28b36d04d6092e5ba
|
9a28067100be5e04cf73a961881f82c624557142
|
refs/heads/master
| 2021-07-08T18:17:45.206871
| 2021-05-03T17:30:02
| 2021-05-03T17:30:02
| 17,681,633
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,791
|
rd
|
neglogLik.Rd
|
\name{neglogLik}
\alias{neglogLik}
\title{Negative Log-Likelihood}
\description{
Calculates the log-likelihood multiplied by negative one. It is in a format that can be used with the functions \code{\link[stats]{nlm}} and \code{\link[stats]{optim}}.
}
\usage{
neglogLik(params, object, pmap = NULL, SNOWcluster=NULL)
}
\arguments{
\item{params}{a vector of revised parameter values.}
\item{object}{an object of class \code{"\link{mpp}"}.}
\item{pmap}{a user provided function mapping the revised parameter values \code{params} into the appropriate locations in \code{object}. If \code{NULL} (default), an untransformed one to one mapping is used.}
\item{SNOWcluster}{an object of class \code{"cluster"} created by the package \pkg{parallel}; default is \code{NULL}. Enables parallel processing if not \code{NULL}. See \code{\link{logLik}} for further details.}
}
\value{
Value of the log-likelihood times negative one.
}
\details{
This function can be used with the two functions \code{\link{nlm}} and \code{\link{optim}} (see \dQuote{Examples} below) to maximise the likelihood function of a model specified in \code{object}. Both \code{\link{nlm}} and \code{\link{optim}} are \emph{minimisers}, hence the \dQuote{negative} log-likelihood. The topic \code{\link{distribution}} gives examples of their use in the relatively easy situation of fitting standard probability distributions to data assuming independence.
The maximisation of the model likelihood function can be restricted to be over a subset of the model parameters. Other parameters will then be fixed at the values stored in the model \code{object}. Let \eqn{\Theta_0}{Theta_0} denote the full model parameter space, and let \eqn{\Theta}{Theta} denote the parameter sub-space (\eqn{\Theta \subseteq \Theta_0}{Theta subseteq Theta_0}) over which the likelihood function is to be maximised. The argument \code{params} contains values in \eqn{\Theta}{Theta}, and \code{pmap} is assigned a function that maps these values into the full model parameter space \eqn{\Theta_0}{Theta_0}. See \dQuote{Examples} below.
The mapping function assigned to \code{pmap} can also be made to impose restrictions on the domain of the parameter space \eqn{\Theta}{Theta} so that the minimiser cannot jump to values such that \eqn{\Theta \not\subseteq \Theta_0}{Theta not subseteq Theta_0}. For example, if a particular parameter must be positive, one can work with a transformed parameter that can take any value on the real line, with the model parameter being the exponential of this transformed parameter. Similarly a modified logit like transform can be used to ensure that parameter values remain within a fixed interval with finite boundaries. Examples of these situations can be found in the topic \code{\link{distribution}} and the \dQuote{Examples} below.
}
\seealso{\code{\link[stats]{nlm}}, \code{\link[stats]{optim}}
}
\examples{
# SRM: magnitude is iid exponential with bvalue=1
# maximise exponential mark density too
TT <- c(0, 1000)
bvalue <- 1
params <- c(-2.5, 0.01, 0.8, bvalue*log(10))
x <- mpp(data=NULL,
gif=srm_gif,
marks=list(dexp_mark, rexp_mark),
params=params,
gmap=expression(params[1:3]),
mmap=expression(params[4]),
TT=TT)
x <- simulate(x, seed=5)
allmap <- function(y, p){
# map all parameters into model object
# transform exponential param so it is positive
y$params[1:3] <- p[1:3]
y$params[4] <- exp(p[4])
return(y)
}
params <- c(-2.5, 0.01, 0.8, log(bvalue*log(10)))
z <- nlm(neglogLik, params, object=x, pmap=allmap,
print.level=2, iterlim=500, typsize=abs(params))
print(z$estimate)
# these should be the same:
print(exp(z$estimate[4]))
print(1/mean(x$data$magnitude))
}
\keyword{optimize}
|
9ff6521ea3e159948d3d49bc266e2d96f55a136f
|
1ea4338bc1036eca930cecd6d9be4c97b48d6072
|
/TP1/Pruebas-Matias/analisis_usuarios.R
|
b8afb78ecc16d783ad974fb6e5b29a643f3167c0
|
[] |
no_license
|
blukitas/Tp-Data-Mining-2020
|
fd13ad48e1057b6e0336a8a7bca4c4f9d473eeb5
|
83f057ba25f9b39df3187f770f7b6049f5030a59
|
refs/heads/master
| 2022-11-17T08:23:22.890202
| 2020-07-16T23:16:22
| 2020-07-16T23:16:22
| 264,476,975
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 7,420
|
r
|
analisis_usuarios.R
|
library(infotheo)
names(df_users)
# Distribucion y Escalado
#nota: a todas se les suma un numero e, para evitar Log10(0)
# Sin transformacion: LOG10
# postXyear
summary(df_users$postsXyear)
plot(sort(df_users$postsXyear))
hist(df_users$postsXyear, xlab = "posteosXaño", ylab ="Frecuencia", main="Distribución de la variable \n SIN transformación")
boxplot(df_users$postsXyear)
e <- 0.000001
df_users$followers_friends_ratio_log <- log10(df_users$followers_friends_ratio + e)
df_users$postsXyear_log <- log10(df_users$postsXyear + e)
df_users$friends_count_log <-log10(df_users$friends_count + e)
df_users$followers_count_log <- log10(df_users$followers_count + e)
df_users$statuses_count_log <-log10(df_users$statuses_count + e)
# con transformacion log10
summary(df_users$postsXyear_log)
plot(df_users$postsXyear_log)
plot(sort(df_users$postsXyear_log), ylab = "Log10(posteosXaño)", xlab = "Observaciones", main="Actividad SIN discretizar")
hist(df_users$postsXyear_log, xlab = "Log10(posteosXaño)", ylab ="Frecuencia", main="Distribución de la variable \n CON transformación")
boxplot(df_users$postsXyear_log,ylab="log10(postsXyear)", main ="Actividad SIN discretizar")
# statuses_count
summary(df_users$statuses_count_log)
plot(sort(df_users$statuses_count_log))
hist(df_users$statuses_count_log)
boxplot(df_users$statuses_count_log)
# followers_count
summary(df_users$followers_count_log)
plot(sort(df_users$followers_count_log))
hist(df_users$followers_count_log)
boxplot(df_users$followers_count_log)
# friends_count
summary(df_users$friends_count_log)
plot(sort(df_users$friends_count_log))
hist(df_users$friends_count_log)
boxplot(df_users$friends_count_log)
# con transformacion log10
summary(df_users$followers_friends_ratio_log)
plot(df_users$followers_friends_ratio_log)
plot(sort(df_users$followers_friends_ratio_log))
hist(df_users$followers_friends_ratio_log, xlab = "Log10(posteosXaño)", ylab ="Frecuencia", main="Distribución de la variable \n CON transformación")
boxplot(df_users$followers_friends_ratio_log)
# chequeo de correlacion s/ Log10
#names(df_users)
#pairs(df_users[,c(3,6,7,8,9,10)])
#cor(df_users[,c(3,6,7,8,9,10)], use = 'complete.obs')
# c/ log10
#cor(df_users[,c(3,11,12,13,14,15)], use = 'complete.obs')
#pairs(df_users[,c(3,11,12,13,14,15)], use = 'complete.obs')
##### Binning eqwidth/eqfreq/Floor
# Floor
#df_users$followers_friends_ratio_log_s <- floor(df_users$followers_friends_ratio_log)
#df_users$postsXyear_log_s <- floor(df_users$postsXyear_log)
#df_users$friends_count_log_s <-floor(df_users$friends_count_log)
#df_users$followers_cont_log_s <- floor(df_users$followers_count_log)
#df_users$statuses_count_log_s <-floor(df_users$statuses_count_log)
# pruebo con df_users$postsXyear_log
#plot(sort(df_users$postsXyear_log) , type = "l", col="red", ylab = "postsXyear", xlab = "Observaciones", main = "Dato original vs suavizado_floor")
#lines(sort(df_users$postsXyear_log_s),type = "l", col="blue")
#legend("topleft", legend=c("Original", "Suavizado"), col=c("red", "blue"), lty=1)
# corr c/floor
#cor(df_users[,c(3,11,12,13,14,15)], use = 'complete.obs')
#pairs(df_users[,c(3,11,12,13,14,15)], use = 'complete.obs')
# binning
# eqwidth/eqfreq
#nbins<- sqrt(nrow(df_users))
nbins<- nrow(df_users) ^ (1/3)
#nbins
# Discretize recibe el atributo, el método de binning y la cantidad de bins
bin_eq_width_postsXyear_log <- discretize(df_users$postsXyear_log,disc="equalwidth", nbins = nbins)
# Por cada bin calculamos la media y reemplazamos en el atributo suavizado
for(bin in 1:nbins){
bin_eq_width$suavizado_postsXyear_log[bin_eq_width_postsXyear_log$X==bin] = mean(df_users$postsXyear_log[bin_eq_width_postsXyear_log$X==bin])
}
bin_eq_width_followers_friends_ratio_log <- discretize(df_users$followers_friends_ratio_log,disc="equalwidth", nbins = nbins)
for(bin in 1:nbins){
bin_eq_width$suavizado_followers_friends_ratio_log[bin_eq_width_followers_friends_ratio_log$X==bin] = mean(df_users$followers_friends_ratio_log[ bin_eq_width_followers_friends_ratio_log$X==bin])
}
#View(bin_eq_width)
plot(sort(df_users$postsXyear_log) , type = "l", col="red", ylab = "log10(postsXyear)", xlab = "Observaciones", main = "Actividad")
#plot(sort(df_users$followers_friends_ratio_log) , type = "l", col="red", ylab = "log10(followers_friends_ratio)", xlab = "Observaciones", main = "Popularidad")
# Agrego la serie de la variable media
lines(sort(bin_eq_width$suavizado_postsXyear_log),type = "l", col="blue")
legend("topleft", legend=c("Original", "Suavizado"), col=c("red", "blue"), lty=1)
#lines(sort(bin_eq_width$suavizado_followers_friends_ratio_log),type = "l", col="blue")
#legend("topleft", legend=c("Original", "Suavizado"), col=c("red", "blue"), lty=1)
df_users$followers_friends_ratio_log_s <- bin_eq_width$suavizado_followers_friends_ratio_log
df_users$postsXyear_log_s <- bin_eq_width$suavizado_postsXyear_log
#df_users$friends_count_log_s <-
#df_users$statuses_count_log_s <-
#df_users$followers_count_log_s <-
#summary(data)
plot(df_users$postsXyear_log_s)
plot(sort(df_users$postsXyear_log_s), ylab = "Log10(posteosXaño)", xlab = "Observaciones", main="Actividad discretizada \n en bins de igual ancho")
hist(df_users$postsXyear_log_s)
boxplot(df_users$postsXyear_log_s, ylab="log10(postsXyear)", main ="Actividad discretizada n\ en bins de igual ancho")
# correlacion con variables suavizadas
#cor(df_users[,c(3,17,18,19,20,21)], use = 'complete.obs')
#pairs(df_users[,c(3,17,18,19,20,21)], use = 'complete.obs')
# variables redundantes
# outliers no lo use pero lo probe
#saco los datos correspondientes al bin inferior de una de las varoiables
#data <- df_users
#min(df_users$followers_friends_ratio_log_s)
#data <- data[data$followers_friends_ratio_log_s>min(df_users$followers_friends_ratio_log_s),]
#nrow(data)
#nrow(df_users)
# filtrado
sp <- ggplot(data=df_users, aes(x=postsXyear_log_s, y=followers_friends_ratio_log_s), xlab="Actividad (log10(postsXyear)", ylab="Popularidad (log10(followers/friends))") + geom_point()
# Add horizontal line at y = 2O
sp + geom_hline(yintercept=3)
# Change line type and color
sp <- sp + geom_hline(yintercept=3, linetype="dashed", color = "red",
color = "red", size=1.5)
# Change line size
#sp <- sp + geom_hline(yintercept=3, linetype="dashed",
#color = "red", size=1.5)
sp + geom_vline(xintercept = 3)
# Change line type, color and size
sp <- sp + geom_vline(xintercept = 3, linetype="dashed",
color = "red", size=1.5)
sp <- sp +
geom_point(colour="blue") +
geom_point(data=df_users[df_users$postsXyear_log_s > 3 & df_users$followers_friends_ratio_log_s > 3,], aes(x=postsXyear_log_s, y=followers_friends_ratio_log_s), colour="black")
sp
plot(df_users$postsXyear_log_s, df_users$followers_friends_ratio_log_s, xlab="Actividad", ylab="Popularidad")
abline(h =3, untf = FALSE)
abline(v =3, untf = FALSE)
# x actividad y Popularidad
data_filtrada_1 <- df_users[df_users$postsXyear_log_s > 3 & df_users$followers_friends_ratio_log_s > 3,]
casos_1 <- nrow(data_filtrada_1)
casos_1
# X tipo de tweet
data_filtrada_2 <- data_filtrada_1[data_filtrada_1$tweet_type == "TW",]
nrow(data_filtrada_2)
# X presencia de urls
data_filtrada_3 <- data_filtrada_2[as.numeric(data_filtrada_2$url_tweets_ratio) >= 1, ]
nrow(data_filtrada_3)
names(data_filtrada_3)
#view(data_filtrada_3)
data_filtrada_3[["screen_name"]]
|
384a052f98a6d56c34a7400b6c6edd9a35c5324e
|
026b4d56086e2e6709b08f0e922e363dd9776a2c
|
/R/MTA_pattern.R
|
40d041fc601bb50195ce7d353c65c30331f84fcc
|
[] |
no_license
|
chanw0/MTA
|
96f56493375905005e86064c9a20716a5f50e0ea
|
98de0cf6910c601363c9eb9fbe6f2db1e5f30cfd
|
refs/heads/master
| 2021-07-15T05:25:32.863236
| 2021-07-06T21:32:39
| 2021-07-06T21:32:39
| 230,279,409
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,904
|
r
|
MTA_pattern.R
|
############Extracted the dynamic trends from a group of subjects
MTA_pattern=function(x,M,proportion.explained, k,Laplacian.matrix,timevec,lambda1.set,
lambda2.set,lambda3.set)
{
N=dim(x)[1]; P=dim(x)[2];T=dim(x)[3];
if(is.null(timevec)) timevec=timevec=1:T
BS = create.bspline.basis(timevec, norder=4)
B = getbasismatrix(timevec, BS) #basis function
Omega = getbasispenalty(BS)
if(is.null(M)) {
ff=cc=NULL
explained.variance=NULL
xx=x
for(i in 1:50)
{
predict.res=MTA01(xx,k,Laplacian.matrix,timevec,lambda1.set,lambda2.set,lambda3.set)
c.updated=predict.res[[1]];f.updated=predict.res[[2]];
cc=rbind(cc,c.updated);ff=cbind(ff,f.updated)
predict.matrix=ff%*%cc%*%(t(B))
explained.variance=sum(predict.matrix^2)*N/(sum(x^2))
for(j in 1:N) xx[j,,]=xx[j,,]-predict.matrix
if (explained.variance>proportion.explained) break;}} else {
ff=cc=NULL
xx=x
for(i in 1:M)
{
predict.res=MTA01(xx,k,Laplacian.matrix,timevec,lambda1.set,lambda2.set,lambda3.set)
c.updated=predict.res[[1]];f.updated=predict.res[[2]];
cc=rbind(cc,c.updated);ff=cbind(ff,f.updated)
predict.matrix=ff%*%cc%*%(t(B))
for(j in 1:N) xx[j,,]=xx[j,,]-predict.matrix
}}
plot.data=data.frame(cbind(timevec,sapply(1:nrow(cc),function(x,cc,B){return(cc[x,]%*%t(B))},cc=cc,B=B)))
colnames(plot.data)=c("time",paste(sapply(1:nrow(cc), toOrdinal), "common trend"))
plot.data=melt(plot.data,id.vars = 1,variable.name ="factor",value.name = "Escore")
pc.plot=ggplot(plot.data, aes(time, Escore))+geom_point()+
# geom_smooth(se=FALSE)+
geom_line()+theme_bw()+ylab("Microbial common trend")+
facet_wrap(~factor,scales="free")
AA=list(pc.plot,ff,cc)
return(AA)
}
|
423d7dca86d28bb49f13c1a6f0a6f4a8becf6785
|
b3315fa1dfe0dfefff0213db814284d7288cdbd4
|
/2-otu_analysis/R-specaccum_cur.r
|
3921eaee3e712302bd5a584e62f441098fdaa42c
|
[] |
no_license
|
myshu2017-03-14/16S_analysis_pipline
|
dee8ab3fb9b8dba9af7882e0fa8eac68c58d173b
|
f8866d61e07f72a44bde744617c0f53ccb3d281a
|
refs/heads/master
| 2020-03-25T01:11:41.354516
| 2019-09-27T00:49:49
| 2019-09-27T00:49:49
| 143,225,718
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,343
|
r
|
R-specaccum_cur.r
|
#!/usr/bin/Rscript
library(getopt)
# get options, using the spec as defined by the enclosed list.
# we read the options from the default: commandArgs(TRUE).
# character logical integer double
spec = matrix(c(
'input_table_file_with_taxa', 'i', 1, "character",
'help' , 'h', 0, "logical",
'output_file' , 'o' , 1, "character"
), byrow=TRUE, ncol=4);
opt = getopt(spec);
# if help was asked for print a friendly message
# and exit with a non-zero error code
if ( !is.null(opt$help) ) {
cat(getopt(spec, usage=TRUE));
q(status=1);
}
#if ( is.null(opt$legend_size ) ) { opt$legend_size = 7.5}
#if ( is.null(opt$x_size ) ) { opt$x_size = 5 }
#if ( is.null(opt$x_dirct ) ) { opt$x_dirct = 90 }
#if ( is.null(opt$group_size ) ) { opt$group_size = 1.5 }
#Load vegan library
library(vegan)
# 读取OTU表
#otu_table_file <-"D:/program/16S/test_data/otu_table_with_taxonomy.txt"
otu_table = read.delim(opt$input_table_file_with_taxa, row.names= 1, header=T, sep="\t",check.names = F)
# data <-t(otu_table)
data <- t(otu_table[,-ncol(otu_table)])
# plot
pdf(paste(opt$output_file))
sp1 <- specaccum(data, method="random")
plot(sp1, ci.type="poly", col="blue", lwd=2, ci.lty=0, ci.col="lightblue",xlab = "number of samples",ylab = "OTUs detected")
boxplot(sp1, col="yellow", add=TRUE, pch="+")
dev.off()
|
8a46ae9c821f63846e67121dd714f82a26a424a2
|
1e36964d5de4f8e472be681bad39fa0475d91491
|
/man/SDMXServiceProviders.Rd
|
6685355757541fc3a0e63c647947918adee494f3
|
[] |
no_license
|
cran/rsdmx
|
ea299980a1e9e72c547b2cca9496b613dcf0d37f
|
d6ee966a0a94c5cfa242a58137676a512dce8762
|
refs/heads/master
| 2023-09-01T03:53:25.208357
| 2023-08-28T13:00:02
| 2023-08-28T13:30:55
| 23,386,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,084
|
rd
|
SDMXServiceProviders.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-SDMXServiceProviders.R,
% R/SDMXServiceProviders-methods.R
\docType{class}
\name{SDMXServiceProviders}
\alias{SDMXServiceProviders}
\alias{SDMXServiceProviders-class}
\alias{SDMXServiceProviders,SDMXServiceProviders-method}
\title{Class "SDMXServiceProviders"}
\usage{
SDMXServiceProviders(providers)
}
\arguments{
\item{providers}{an object of class "list" (of \link{SDMXServiceProvider})
configured by default and/or at runtime in \pkg{rsdmx}}
}
\value{
an object of class "SDMXServiceProviders"
}
\description{
A class to wrap a list of SDMX service providers
}
\section{Slots}{
\describe{
\item{\code{providers}}{an object of class "list" (of \link{SDMXServiceProvider})
configured by default and/or at runtime in \pkg{rsdmx}}
}}
\section{Warning}{
this class is not useful in itself, but all SDMX non-abstract classes will
encapsulate it as slot, when parsing an SDMX-ML document.
}
\author{
Emmanuel Blondel, \email{emmanuel.blondel1@gmail.com}
}
|
fca60b299f14caa4b428032c6e0804d77a7107ba
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tswge/examples/fig6.2nf.Rd.R
|
be116938c3715156aad2e9899a974d9f75a56c7c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 167
|
r
|
fig6.2nf.Rd.R
|
library(tswge)
### Name: fig6.2nf
### Title: Data in Figure 6.2 without the forecasts
### Aliases: fig6.2nf
### Keywords: datasets
### ** Examples
data(fig6.2nf)
|
78cddedb83a5ff61e3ede24937305ed47ff1209d
|
ebb2a6c304eff697a7a016cc64218ba507f2af27
|
/implementation/jd_ift_quadrature.r
|
be67d9c9c6ccb28667d8b3dfe38a38dcc9aebac0
|
[] |
no_license
|
Blunde1/it-ift
|
1a57c55d0f3b7106530e2b80e4cca4c82d1136eb
|
45014dd16118fb90fe490ebbdf4ec8f198ae9ba6
|
refs/heads/master
| 2021-08-24T02:15:14.548045
| 2017-12-07T16:08:14
| 2017-12-07T16:08:14
| 103,902,408
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,424
|
r
|
jd_ift_quadrature.r
|
setwd("C:/Users/Berent/Projects/it-ift/implementation")
setwd("~/Projects/UiS-Git/it-ift/implementation")
library(TMB)
compile("jd_ift_quadrature.cpp")
dyn.load(dynlib("jd_ift_quadrature"))
# real data
real_data <- TRUE
if(real_data){
library(Quandl)
start_date <- "1950-01-01"; end_training <- "2017-01-01";
test <- Quandl("BCB/UDJIAD1",trim_start=start_date)
library(Quandl)
DJIA<-Quandl("BCB/UDJIAD1",trim_start=start_date, trim_end=end_training)
#DJIA <- Quandl("BCB/UDJIAD1")
DJIA <- DJIA[rev(rownames(DJIA)),]
plot(DJIA,type="l")
log_price <- log(DJIA$Value)
data <- list(X=log_price, dt=1/250, process=2, scheme=1, jump=0, qiter=100, quadrature=2)
}
# Simulated data
simulate = TRUE
if(simulate){
source("simulation/Simulation_GBM.R")
set.seed(123)
time = 100
N=12*time
mu = 0.1
sigma = 0.2
x0 = 1
dt <- time / N
seed = 123
X <- GBM_process(time, N, mu, sigma, x0, seed)
par_true <- c(mu,sigma)
data <- list(X=log(X), dt=1/12, process=2, scheme=1, jump=0, qiter=100, quadrature=2)
plot(X, type="l", main="Simulated GBM")
}
##### GBM Estimation ####
par_diff <- c(kappa=0.1,sigma=0.2)
par_jump <- c()
param <- list(par = c(par_diff,par_jump))
obj <- MakeADFun(data, param)
opt <- nlminb(obj$par, obj$fn, obj$gr, obj$he, control=list(trace=1))
res <- sdreport(obj)
res
##### MJD Estimation ####
data$process = 3
data$jump = 1
data$qiter = 180
par_jump <- c(30,0,0.05)
param <- list(par = c(par_diff, par_jump))
obj <- MakeADFun(data, param)
opt <- nlminb(obj$par, obj$fn, obj$gr, control=list(trace=1))
res <- sdreport(obj)
res
##### MJD TD Plotting #####
param <- list(par=c(0.4,log(0.3),log(10),-0.01,log(0.05))) # Actual master
# param$par <- c(0.55,log(0.2),log(18),-0.0063,log(0.4))
x0 <- 0; dt <- 1/250
data$X <- rep(x0,2)
data$process = 3
data$jump = 1
data$qiter = 180
data$dt = 1/250
# Matsuda multimodal settings
param <- list(par=c("r"=0.03,"sigma"=log(0.2),"lambda"=log(1),"mu"=-0.5,"nu"=log(0.1)))
data$dt <- 1/4
data$X <- rep(x0<-0, 2)
x_vals <- seq(-1.7,0.5,by=0.01)
x_vals <- seq(-0.12, 0.12, by=0.001)
y_vals <- numeric(length(x_vals))
for(i in 1:length(x_vals)){
data$X <- c(x0,x_vals[i])
obj <- MakeADFun(data, param)
y_vals[i] <- exp(-obj$fn()) # obj returns nll
}
lines(x_vals,y_vals,col="blue")
plot(x_vals, y_vals, type="l") # bimodal works
dyn.unload(dynlib("jd_ift_quadrature"))
|
23390623b432d8fb80f743bfa7cafc96628f73df
|
57a607818308047a9c729a27afd112267556e5ce
|
/R/interaction.R
|
e167b3a0cc425c184dc303aaa4e9d710296f827d
|
[] |
no_license
|
oscarperpinan/pdcluster
|
bf16799943a4e75bd6c4f7811b268e4e02cb0cf5
|
db2c47535a5807ef9dc12670368fe40216c8cdd9
|
refs/heads/master
| 2021-01-02T08:56:24.038707
| 2018-02-18T10:11:17
| 2018-02-18T10:11:17
| 11,253,765
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,674
|
r
|
interaction.R
|
setGeneric('identifyPD', function(object, ...){standardGeneric('identifyPD')})
setMethod('identifyPD', signature=(object='PD'),
definition=function(object, label='energy', column=1, row=1, pch=13, cex=0.6, col='darkgreen',...){
trellis.focus('panel', column, row, ...)
trellisType <- as.character(trellis.last.object()$call)[1]
if (trellisType=='splom'){
idx <- panel.link.splom(pch=pch, cex=cex, col=col,...)
object[idx,]
} else {
lbl=round(object@data[label], 1)
idx <- panel.identify(label=lbl[,1], pch=pch, cex=cex, col=col,...)
as.data.frame(object)[idx,]
}
trellis.unfocus()
}
)
choosePoints <- function(...){
trellis.focus('panel', 1, 1)
x <- trellis.panelArgs()$x
y <- trellis.panelArgs()$y
xy <- xy.coords(x, y, recycle = TRUE)
x <- xy$x
y <- xy$y
px <- convertX(unit(x, "native"), "points", TRUE)
py <- convertY(unit(y, "native"), "points", TRUE)
pointsData <- cbind(px, py)
border <- as.numeric()
while (TRUE){
ll <- grid.locator(unit='native')
if (!is.null(ll)){
lpoints(ll, col='red', cex=0.7, pch=3)
lx <- convertX(unit(ll$x, 'native'), 'points', FALSE)
ly <- convertY(unit(ll$y, 'native'), 'points', FALSE)
border <- rbind(border, c(lx, ly))
} else {
break
}
}
inside <- in.out(border, pointsData)
dataInside <- data.frame(xin=x[inside], yin=y[inside])
drawLayer(layer(panel.points(xin, yin, col='red', cex=0.4),
data=dataInside)
)
trellis.unfocus()
result <- inside
}
|
db8a180dbbf7141661e0755d6bc169fc111ba441
|
493ffb86b0a2d34cde36418185a0dd8380179aa3
|
/R/sample_beetles.R
|
68425969e8f9fe97a6de420b3c59221b69edc491
|
[
"MIT"
] |
permissive
|
atyre2/tribolium
|
6b326598f5bc6c47c171ccbf1acbc1b78bb79d3d
|
60f7a83f5b9e0386b164f272e1501f9133a2ab51
|
refs/heads/main
| 2023-03-09T14:17:29.594069
| 2021-02-11T18:27:59
| 2021-02-25T15:35:20
| 338,089,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,301
|
r
|
sample_beetles.R
|
#' Title
#'
#' @param N vector of larvae, pupae, adult to draw sample from
#' @param V total volume of habitat in units of 20 g
#' @param n vector of number of samples to take from each stage
#' @param v volume of sample
#' @param replacement (logical) sample with or without replacement
#'
#' @return data frame with a column for each stage sampled, and a row for each sample.
#' @export
#'
#' @examples
#' sample_beetles(c(larvae = 75, pupae = 35, adults = 150), V = 5, n = 5, v = 0.15)
#' beetles <- iterate(parms = controlA, N0 = c(larvae = 75, pupae = 35, adults = 60), popfun = LPA_deter)
#' ## Not run:
#' samples <- purrr::map_dfr(1:20, function(t)sample_beetles(unlist(beetles[t, 9:11]), V = 1, n = c(0,0,5), v = 0.15), .id = "t")
#'
#' ## End(Not run)
sample_beetles <- function(N, V, n, v, replacement = TRUE){
if (length(n) == 1){
# assume only adults sampled
nn = rep(0, length(N))
nn[length(N)] = n
n = nn
}
if(!replacement) stop("only sampling with replacement implemented")
samples <- matrix(data = NA, nrow = max(n), ncol = length(N))
for(i in 1:length(N)){
if (n[i] > 0) samples[1:n[i],i] <- rbinom(n[i], N[i], v/V)
}
results <- data.frame(rep = 1:max(n),
samples)
names(results)[2:ncol(results)] <- names(N)
results
}
|
37043786741230357607096cdbedf85371ff291f
|
c7a6c5249ffd79d262dbdbe42c9efaa313119a03
|
/Scripts/Figures/Figure S2.r
|
16a1e897aac830ef7dd36a3d13c9566507378607
|
[] |
no_license
|
YaojieLu/ESS-paper
|
30b9ea52e0a6bdb67bc21870b2416da7c53b909b
|
0ea34306e8a06a74e5d4a5d9e426f00b98787d28
|
refs/heads/master
| 2022-03-19T05:15:23.292379
| 2019-12-08T00:35:55
| 2019-12-08T00:35:55
| 106,539,077
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,323
|
r
|
Figure S2.r
|
options(digits=22)
source("Scripts/Derived variables/SII-F.r")
data <- read.csv("Results/SII-DV.csv")
# Parameterization
LAI <- 3
Vcmax <- 50
cp <- 30
Km <- 703
Rd <- 1
a <- 1.6
nZ <- 0.5
p <- 43200
l <- 1.8e-5
VPD <- 0.02
pe <- -1.58*10^-3
b <- 4.38
kxmax <- 5
c <- 2.64
#d <- 3.54
h <- l*a*LAI/nZ*p
h2 <- l*LAI/nZ*p/1000
# Environmental conditions
ca <- 400
k <- 0.05
MAP <- 1825
gamma <- 1/((MAP/365/k)/1000)*nZ
pkx <- 0.5
d <- 5
# ESS
h3 <- 25
wLL <- subset(data, h3==25 & d==5, select="wL")[[1]]
wLLL <- wLLf(wLL)
fL <- Vectorize(function(w)gswLf(w, wLL))
xL <- seq(wLLL, 1, by=(1-wLLL)/100)
yL <- fL(xL)
res <- data.frame(w=xL, E=yL*VPD*h)
colnames(res) <- c("w", "E")
# Figures
windows(8, 6)
par(mgp=c(2.2, 1, 0), xaxs="i", yaxs="i", lwd=2, mar=c(3.3, 3.5, 0.9, 0.7), mfrow=c(1, 1))
# gs - w
plot(0, 0, type="n", xaxt="n", yaxt="n", xlab=NA, ylab=NA,
xlim=c(0, 1), ylim=c(0, 0.04), cex.lab=1.3)
points(res, type = "l")
f <- function(x)x/100
curve(f, 0, 1, lty = 2, add = T)
axis(1, xlim=c(0, 1), pos=0, lwd=2)
mtext(expression(italic(s)),side=1, line=2, cex=1.3)
axis(2, ylim=c(0, 0.4), pos=0, lwd=2)
mtext(expression(italic(E~or~DI~(day^-1))),side=2,line=1.8, cex=1.3)
box()
legend("topleft", c(expression(italic(E)), expression(italic(DI))), lty=c(1, 2))
dev.copy2pdf(file = "Figures/Figure S2.pdf")
|
9d8b83444223450e8680eea920f96921586ede06
|
4db2fca3393454228150cff9810407b03ce7e390
|
/runner.R
|
ea5b8c11c97390fda6eaaa66400703d03076f3e4
|
[] |
no_license
|
mozilla/glamvalid
|
336b8730ccc70119a293ca7601661eb75932cba1
|
737d6591d836fa21dd2c0b8491b9d0ecf62fa9e4
|
refs/heads/master
| 2023-08-31T08:23:01.262946
| 2020-07-08T16:33:58
| 2020-07-08T16:33:58
| 277,903,555
| 0
| 2
| null | 2020-11-19T23:24:03
| 2020-07-07T19:22:26
|
R
|
UTF-8
|
R
| false
| false
| 2,253
|
r
|
runner.R
|
source("libs.R")
basicConfig()
os = Sys.getenv("OS")
channel = Sys.getenv("CHANNEL")
date_start = Sys.getenv("DATE_START")
date_end = Sys.getenv("DATE_END")
build_start = Sys.getenv("BUILD_START")
build_end = Sys.getenv("BUILD_END")
major_ver = Sys.getenv("MAJOR_VER")
histos = Sys.getenv("HISTOS")
histo_path = '/root/histo.txt'
## build the filter string
if(channel != 'release'){
date_start = strftime(as.Date(build_start,'%Y%m%d'),'%Y-%m-%d')
date_end= strftime(as.Date(build_end,'%Y%m%d')+7,'%Y-%m-%d')
fil = glue("
where normalized_channel = '{channel}'
and environment.system.os.name = '{os}'
and substr(application.build_id,1,8)>='{build_start}'
and substr(application.build_id,1,8)<='{build_end}'
and DATE(submission_timestamp)>='{date_start}'
and DATE(submission_timestamp)<='{date_end}'
", if(major_ver!="NG") "and substr(metadata.uri.app_version,1,2)='{major_ver}'" else "")
}else{
fil = glue("
where normalized_channel = '{channel}'
and environment.system.os.name = '{os}'
and DATE(submission_timestamp)>='{date_start}'
and DATE(submission_timestamp)<='{date_end}'
and sample_id=42
", if(major_ver!="NG") "and substr(metadata.uri.app_version,1,2)='{major_ver}'" else "")
}
if(histos==""){
histos = paste(readLines(histo_path),collapse="\n")
}
loginfo(glue("os={os}, channel={channel}, date=({date_start},{date_end}), major_version={major_ver}"))
if(nchar(histos) > 0){
loginfo("histograms")
loginfo(histos)
k = list(clauz = fil, channel = channel, os=os, h = histos)
tx = tempfile(pattern='glam_')
save(k, file=tx)
nf = sprintf("glam_%s.html",digest(k))
loginfo(glue("saved variables to {tx}, output html will be written to /tmp/{nf}"))
rmarkdown::render("./sitegen.Rmd",params=list(f = tx),
output_file=sprintf("/tmp/%s",nf))
loginfo(glue("Otput html will be written to {nf} and if you specified --mount type=bind,source=\"$(pwd)\"/outputs,target=/tmp/ then look inside outputs"))
} else {
stop(glue("Histograms are empty, neither environment HISTOS and the file {histo_path} did not help"))
}
#payload.histograms.fx_session_restore_file_size_bytes
#payload.histograms.telemetry_compress
#payload.histograms.cycle_collector_worker_visited_ref_counted
|
4b140ab49bbad7e1863c93b87f4e2d1df4fa83c2
|
d48518ce86622333073b2cf6bbf040b5a149e483
|
/R/preprocess_macro.R
|
fd831b8e750c56b337dddad2844b56c6e6cfd118
|
[] |
no_license
|
gdario/sberbank
|
66c50072e7acdbaebd82c732e831d476b8e87777
|
c6c865f907efef3d5ec33a084d08cae34a92d47e
|
refs/heads/master
| 2020-12-30T18:02:17.860507
| 2017-06-05T14:34:30
| 2017-06-05T14:34:30
| 90,940,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,638
|
r
|
preprocess_macro.R
|
library(magrittr)
library(tidyverse)
source("R/clean_dataset.R")
match_timestamps <- function(ts1, ts2) {
df1 <- data_frame(ts = ts1, idx_dataset = seq_along(ts1))
df2 <- data_frame(ts = ts2, idx_macro = seq_along(ts2))
df <- inner_join(df1, df2)
df
}
macro <- readr::read_csv("data/macro.csv.zip")
timestamp_macro <- macro$timestamp
macro %<>% select(-timestamp)
###
fix_separator <- function(x) {
as.numeric(sub(",", ".", x))
}
macro %<>% mutate(
child_on_acc_pre_school = fix_separator(child_on_acc_pre_school),
modern_education_share = fix_separator(modern_education_share),
old_education_build_share = fix_separator(old_education_build_share)
)
macro <- as.matrix(macro)
### Discard the columns with more than 30% of NAs
perc_na <- apply(macro, 2, function(x) mean(is.na(x)))
idx_na <- perc_na < 0.3
macro <- macro[, idx_na]
### After applying findCorrelation we end up with a vector of names
### to remove. This is stored in data/remove_macro.txt
to_remove <- scan("data/remove_macro.txt", sep = "\t", what = "")
idx <- match(to_remove, colnames(macro))
macro <- macro[, -idx]
### Combine with the cleaned training and test sets
load("output/preprocess_train_and_test.RData")
mapping_train <- match_timestamps(timestamp_train, timestamp_macro)
macro_train <- macro[mapping_train$idx_macro, ]
mapping_test <- match_timestamps(timestamp_test, timestamp_macro)
macro_test <- macro[mapping_test$idx_macro, ]
cleaned_train$macro <- macro_train
cleaned_test$macro <- macro_test
save(
id_test,
timestamp_train,
timestamp_test,
cleaned_train,
cleaned_test,
file = "output/preprocess_macro.RData"
)
|
d3cb61254abd381ab2b028200c79ab9ed4deb6d6
|
7b8b5630a5cef2a21428f97b2c5b26b0f63e3269
|
/tests/testthat.R
|
c42c70c7f58da0ee167145e6de519135b3b6d332
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
cells2numbers/migrationminer
|
eb257733c4999f9af57ce10f2faf051d1e0b82fa
|
c25c692615953c33b3d73430117129fea980bcdb
|
refs/heads/master
| 2021-01-23T07:34:53.509775
| 2019-04-29T17:45:18
| 2019-04-29T17:45:18
| 102,511,560
| 7
| 0
|
NOASSERTION
| 2019-04-09T16:13:56
| 2017-09-05T17:37:14
|
R
|
UTF-8
|
R
| false
| false
| 72
|
r
|
testthat.R
|
library(testthat)
library(migrationminer)
test_check("migrationminer")
|
beb204bd3923dc7aee6362d099cffafe234b4672
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961094-test.R
|
f9b413992feee097cab12667b1470193a3d1c928
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
1609961094-test.R
|
testlist <- list(x = c(618011183L, -1L, -1L), y = c(1869359146L, 1660944384L, 0L, 1944398335L, 16777215L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
c91581b38b27f3030c493fa63f884bb895fedc88
|
42554442d39db2549f5b221adc3f4020ced752c7
|
/A07_dim3.r
|
00056aac8aeae109427f0572fbaabb36a3958985
|
[] |
no_license
|
aky3100/TestR2
|
ea2330ac2f54335a9e3564806834d992b5cca2f8
|
87d50fc5ff0abcfbc2b5d116cf2a8e90e5cd030b
|
refs/heads/master
| 2020-12-02T08:16:00.738077
| 2017-07-10T16:22:27
| 2017-07-10T16:22:27
| 96,657,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 333
|
r
|
A07_dim3.r
|
pdf(file="plot7.pdf")
library(lattice)
a <- 1:10
b <- 1:15
eg <- expand.grid(x=a,y=b)
eg$z <- eg$x^2 + eg$x*eg$y
wireframe(z~x+y, eg)
t<-seq(-2*pi, 2*pi, length.out=200)
cloud(z~x+y,data.frame(x=3*cos(t),y=3*sin(t), z=2*t))
t<-seq(-2*pi, 2*pi, length.out=200)
cloud(z~x+y,data.frame(x=3*cos(t),y=3*sin(t), z=2*t))
|
1986ab9a295127fa779bff264df08b0004a34b44
|
608adcf47ef5c776429dfe2e555c20c0ef54547a
|
/R/H.Earth.solar.R
|
040788de4cc801c1dcd0592f7900bc3ff9415292
|
[] |
no_license
|
cran/widals
|
b722ad1e1e0938998461d8fe83e8b76437cbc031
|
c431b52c0455ad4568072220838b571bacc3b6ba
|
refs/heads/master
| 2021-05-15T01:43:27.321897
| 2019-12-07T21:20:02
| 2019-12-07T21:20:02
| 17,700,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 671
|
r
|
H.Earth.solar.R
|
H.Earth.solar <-
function(x, y, dateDate) { ######################
Hst.ls <- list()
n <- length(y)
tau <- length(dateDate)
equinox <- strptime( "20110320", "%Y%m%d" )
for(i in 1:tau) {
this.date <- dateDate[i]
dfe <- as.integer( difftime(this.date, equinox, units="day")) ; dfe
psi <- 23.5 * sin( 2*pi*dfe/365.25 ) ; psi
eta <- 90 - (360/(2*pi)) * acos( cos(2*pi*y/360) * cos(2*pi*psi/360) + sin(2*pi*y/360) * sin(2*pi*psi/360) )
surface.area <- sin(2*pi*eta/360) ; surface.area
# surface.area[ surface.area < 0 ] <- 0
Hst.ls[[i]] <- cbind( surface.area )
}
return(Hst.ls)
}
|
585c79a0621339b52bbf4d99fbbbadd6e698ee73
|
f02aae99becc67d3ee700d4cdd205a1e55d5ade2
|
/testAlgo.R
|
279b8f2b9a1642ade9e88d875a61b567e3d77cb1
|
[] |
no_license
|
sohamsaha99/mdp
|
56cfc5ae958ef9df934cf8fdd9acdeb86aa5b702
|
df809031d8ca452fff74746749f9d9d041fc97f7
|
refs/heads/master
| 2023-04-16T05:16:39.424315
| 2021-04-30T08:01:01
| 2021-04-30T08:01:01
| 315,692,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,617
|
r
|
testAlgo.R
|
# Create Reward matrix
F_levels = c("zero", "negative_low", "positive_low", "negative_high", "positive_high")
F_values = c(0, -5, 5, -10, 10)
n_actions = length(F_levels)
Reward_matrix = matrix(0, nrow=n_states, ncol=n_actions)
for(i in 1:nrow(Reward_matrix)) {
v = getState(i)
if((which(x_levels == v[, 1]) %in% c(1, x_nlevels)) | (which(theta_levels == v[, 3]) %in% c(1, theta_nlevels))) {
# if((which(theta_levels == v[, 3]) %in% c(1, theta_nlevels))) {
Reward_matrix[i, ] = -10.0
} else if((which(x_levels == v[, 1]) %in% c((1 + x_nlevels) / 2)) & (which(theta_levels == v[, 3]) %in% c((1 + theta_nlevels) / 2))) {
Reward_matrix[i, ] = 2.0
} else {
Reward_matrix[i, ] = 0.0
}
}
# Run MDPtoolbox with the transition matrices and reward matrix
library(MDPtoolbox)
neg = read.table("F=-10.0_transition_matrix_0111.csv", header=FALSE, sep=","); neg = as.matrix(neg)
pos = read.table("F=10.0_transition_matrix_0111.csv", header=FALSE, sep=","); pos = as.matrix(pos)
zero = read.table("F=0.0_transition_matrix_0111.csv", header=FALSE, sep=","); zero = as.matrix(zero)
neg_low = read.table("F=-5.0_transition_matrix_0111.csv", header=FALSE, sep=","); neg_low = as.matrix(neg_low)
pos_low = read.table("F=5.0_transition_matrix_0111.csv", header=FALSE, sep=","); pos_low = as.matrix(pos_low)
neg_high = read.table("F=-10.0_transition_matrix_0111.csv", header=FALSE, sep=","); neg_high = as.matrix(neg_high)
pos_high = read.table("F=10.0_transition_matrix_0111.csv", header=FALSE, sep=","); pos_high = as.matrix(pos_high)
T = list(zero=zero, negative=neg, positive=pos)
# T = list(positive=pos, negative=neg)
T = list(zero=zero, negative_low=neg_low, positive_low=pos_low, negative_high=neg_high, positive_high=pos_high)
mdp_check(T, Reward_matrix) # empty string => ok
# m <- mdp_policy_iteration(P=T, R=Reward_matrix, discount=0.8)
# m <- mdp_value_iteration(P=T, R=Reward_matrix, discount=0.9, max_iter=50)
m <- mdp_policy_iteration_modified(P=T, R=Reward_matrix, discount=0.9)
print(m$iter)
# Call python gym environment
library(reticulate)
use_python("bin/python")
py_run_string("import gym")
py_run_file("utils.py")
py_run_string("env = gym.make('CartPole-v0')")
# while(TRUE)
bad_j = NULL
DEATH = NULL
for (i_try in 1:3) {
py_run_string("observation = env.reset()")
py_run_string("frames = []")
for(j in 1:200) {
# py_run_string("frames.append(env.render(mode='rgb_array'))")
i = get_discrete_state(py$observation[1], py$observation[2], (py$observation[3] + pi) %% (2 * pi) - pi, py$observation[4])$index
# i = get_discrete_state(py$observation[1] + rnorm(1, 0, 0.25), py$observation[2] + rnorm(1, 0, 0.16), (py$observation[3] + pi) %% (2 * pi) - pi + + rnorm(1, 0, 0.06), py$observation[4] + rnorm(1, 0, 0.16))$index
i = get_discrete_state(py$observation[1] + rnorm(1, 0, 0.25), py$observation[2] + rnorm(1, 0, 0.04), (py$observation[3] + pi) %% (2 * pi) - pi + + rnorm(1, 0, 0.001), py$observation[4] + rnorm(1, 0, 0.04))$index
if(names(T)[m$policy[i]] == "positive_high") {
action = 10
py_run_string("observation, reward, done, info = env.step(1)")
} else if(names(T)[m$policy[i]] == "negative_high") {
action = -10
py_run_string("observation, reward, done, info = env.step(0)")
} else if(names(T)[m$policy[i]] == "positive_low") {
action = 5
py_run_string("env.env.force_mag = 5.0")
py_run_string("observation, reward, done, info = env.step(1)")
py_run_string("env.env.force_mag = 10.0")
} else if(names(T)[m$policy[i]] == "negative_low") {
action = -5
py_run_string("env.env.force_mag = 5.0")
py_run_string("observation, reward, done, info = env.step(0)")
py_run_string("env.env.force_mag = 10.0")
} else {
action = 0
py_run_string("env.env.force_mag = 0.0")
py_run_string("observation, reward, done, info = env.step(0)")
py_run_string("env.env.force_mag = 10.0")
}
# py_run_string("observation, reward, done, info = env.step(action)")
py_run_string("env.render()")
# print(c(py$observation, action))
Sys.sleep(0.02)
if(py$done) {
bad_j = c(bad_j, j)
print(sprintf("FINISHED AFTER %d STEPS", j))
Sys.sleep(1)
DEATH = c(DEATH, j)
break
}
}
# py_run_string("save_frames_as_gif(frames)")
py_run_string("env.close()")
# py_run_string("env.reset()")
}
|
e8142310408d34f0813bf7e35e28a5c786cbb17f
|
5b55d8d4a1e6275605e7e740cfb3cec5528b485b
|
/R/getXlist.R
|
8f03be0ef01d5f06aab564850b0aed532574c62d
|
[] |
no_license
|
cran/MasterBayes
|
2103a6dfddb562c02b37f32c79ca51bce477a6e6
|
a2bbdc296453f21114f7fd9e1a8d825ed6d86730
|
refs/heads/master
| 2022-07-23T18:11:50.598009
| 2022-06-22T12:00:10
| 2022-06-22T12:00:10
| 17,691,892
| 1
| 2
| null | 2017-09-27T20:22:15
| 2014-03-13T02:32:13
|
C++
|
UTF-8
|
R
| false
| false
| 35,065
|
r
|
getXlist.R
|
getXlist<-function(PdP, GdP=NULL, A=NULL, E1=0.005, E2=0.005, mm.tol=999){
if(is.null(GdP$id)==FALSE & is.null(PdP$id)==FALSE){
if(FALSE%in%(GdP$id%in%PdP$id)){
stop("genotype data exists for individuals not in PdataPed object")
}
if(FALSE%in%(PdP$id%in%GdP$id)){
stop("some individuals in PdataPed object have no genotype data: replace with NA")
}
}
if(is.null(PdP$id)){
X.list<-list(id=NULL)
unique_id<-as.character(unique(GdP$id))
X.list$id<-unique_id
}else{
null_mat<-t(as.matrix(as.numeric(NULL)))
X.list<-list(id=NULL,beta_map=NULL, merge=c(), mergeUS=c(), X=lapply(PdP$id[which(PdP$offspring==1)], function(x){x=list(dam.id=NULL, sire.id=NULL, mergeN=matrix(NA,2,0), XDus=null_mat, vtDus=NULL, XDs=null_mat, vtDs=NULL, XSus=null_mat, vtSus=NULL, XSs=null_mat, vtSs=NULL, XDSus=null_mat, vtDSus=NULL,XDSs=null_mat, vtDSs=NULL, G=NULL)}))
unique_id<-as.character(unique(PdP$id))
X.list$id<-unique_id
PdP$id<-match(PdP$id, unique_id) # convert phenotypic id's to numeric
if(length(PdP$USdam)!=1 | PdP$USdam[1]!=FALSE){
PdP$id<-c(PdP$id, length(unique_id)+1)
if(is.null(PdP$sex)==FALSE){
PdP$sex<-as.factor(c(as.character(PdP$sex), "Female"))
}
ud<-TRUE
}else{
ud<-FALSE
}
if(length(PdP$USsire)!=1 | PdP$USsire[1]!=FALSE){
PdP$id<-c(PdP$id, length(unique_id)+ud+1)
if(is.null(PdP$sex)==FALSE){
PdP$sex<-as.factor(c(as.character(PdP$sex), "Male"))
}
us<-TRUE
}else{
us<-FALSE
}
data_us<-matrix(NA, ud+us, length(PdP$data[1,]))
PdP$timevar<-c(PdP$timevar, rep(NA, ud+us))
colnames(data_us)<-colnames(PdP$data)
PdP$data<-rbind(PdP$data, data_us)
names(X.list$X)<-PdP$id[which(PdP$offspring==1)]
findrest<-function(x){ # function for finding restriction variables
if(length(grep("restrict *= *NULL" , as.character(x)))==0 & length(grep("restrict" , as.character(x)))!=0){
int<-1
}else{
int<-0
}
int
}
restrictions<-which(unlist(lapply(PdP$formula, findrest))==1)
main_effects<-which(unlist(lapply(PdP$formula, length))==1)
main_effects<-main_effects[main_effects%in%restrictions==FALSE]
interactions<-which(unlist(lapply(PdP$formula, length))==2)
interactions<-interactions[interactions%in%restrictions==FALSE]
tmain_effects<-length(main_effects)
if(length(interactions)>0){
for(i in 1:length(interactions)){
form.comb<-match(PdP$formula[[interactions[i]]], PdP$formula[main_effects[1:tmain_effects]])
if(any(is.na(form.comb))){
main_effects<-c(main_effects, length(PdP$formula)+1:sum(is.na(form.comb)))
PdP$formula[length(PdP$formula)+1:sum(is.na(form.comb))]<-PdP$formula[[interactions[i]]][which(is.na(form.comb))]
}
}
}
for(off in 1:sum(PdP$offspring==1)){
PdP$off_record<-which(PdP$offspring==1)[off]
PdP$keepDam<-unique(PdP$id)
PdP$keepSire<-unique(PdP$id)
PdP$restDam<-unique(PdP$id)
PdP$restSire<-unique(PdP$id)
predictors<-lapply(PdP$formula[restrictions], eval, envir=PdP)
if(length(predictors)!=0){
for(i in 1:length(predictors)){
PdP$keepDam<-PdP$keepDam[which(PdP$keepDam%in%predictors[[i]]$Dam$id==TRUE)]
PdP$keepSire<-PdP$keepSire[which(PdP$keepSire%in%predictors[[i]]$Sire$id==TRUE)]
PdP$restDam<-PdP$restDam[which(PdP$restDam%in%predictors[[i]]$Dam_restrict$id==TRUE)]
PdP$restSire<-PdP$restSire[which(PdP$restSire%in%predictors[[i]]$Sire_restrict$id==TRUE)]
}
}else{
if(length(PdP$sex)>0){
PdP$keepDam<-unique(PdP$keepDam[which(PdP$sex=="Female")])
PdP$keepSire<-unique(PdP$keepSire[which(PdP$sex=="Male")])
PdP$restDam<-unique(PdP$restDam[which(PdP$sex=="Female")])
PdP$restSire<-unique(PdP$restSire[which(PdP$sex=="Male")])
}
}
predictors<-lapply(PdP$formula[main_effects], eval, envir=PdP)
nvar<-rep(0, 6) # no parameters
if(length(predictors)!=0){
for(i in 1:tmain_effects){ # itterate through variables
if(length(predictors[[i]]$Dam$X)!=0){
nvar[1]<-nvar[1]+sum(is.na(colSums(predictors[[i]]$Dam$X))) # starting column no. for each dam factor
nvar[2]<-nvar[2]+sum(is.na(colSums(predictors[[i]]$Dam$X))==FALSE) # starting column no. for each dam factor
}
if(length(predictors[[i]]$Sire$X)!=0){
nvar[3]<-nvar[3]+sum(is.na(colSums(predictors[[i]]$Sire$X))) # starting column no. for each dam factor
nvar[4]<-nvar[4]+sum(is.na(colSums(predictors[[i]]$Sire$X))==FALSE) # starting column no. for each dam factor
}
if(length(predictors[[i]]$DamSire$X)!=0){
nvar[5]<-nvar[5]+sum(is.na(colSums(predictors[[i]]$DamSire$X)))
nvar[6]<-nvar[6]+sum(is.na(colSums(predictors[[i]]$DamSire$X))==FALSE)
}
}
}
nbeta<-sum(nvar)
X.list$X[[off]]$dam.id<-PdP$keepDam
X.list$X[[off]]$sire.id<-PdP$keepSire
X.list$X[[off]]$restdam.id<-PdP$restDam
X.list$X[[off]]$restsire.id<-PdP$restSire
ndam<-length(X.list$X[[off]]$dam.id)
nsire<-length(X.list$X[[off]]$sire.id)
if(nvar[1]>0){
X.list$X[[off]]$XDus<-matrix(NA, ndam, nvar[1])
colnames(X.list$X[[off]]$XDus)<-rep("G", nvar[1])
X.list$X[[off]]$vtDus<-rep(NA, nvar[1])
}
if(nvar[2]>0){
X.list$X[[off]]$XDs<-matrix(NA, ndam, nvar[2])
colnames(X.list$X[[off]]$XDs)<-rep("G", nvar[2])
X.list$X[[off]]$vtDs<-rep(NA, nvar[2])
}
if(nvar[3]>0){
X.list$X[[off]]$XSus<-matrix(NA, nsire, nvar[3])
colnames(X.list$X[[off]]$XSus)<-rep("G", nvar[3])
X.list$X[[off]]$vtSus<-rep(NA, nvar[3])
}
if(nvar[4]>0){
X.list$X[[off]]$XSs<-matrix(NA, nsire, nvar[4])
colnames(X.list$X[[off]]$XSs)<-rep("G", nvar[4])
X.list$X[[off]]$vtSs<-rep(NA, nvar[4])
}
if(nvar[5]>0){
X.list$X[[off]]$XDSus<-matrix(NA, ndam*nsire, nvar[5])
colnames(X.list$X[[off]]$XDSus)<-rep("G",nvar[5])
X.list$X[[off]]$vtDSus<-rep(NA, nvar[5])
}
if(nvar[6]>0){
X.list$X[[off]]$XDSs<-matrix(NA, ndam*nsire, nvar[6])
colnames(X.list$X[[off]]$XDSs)<-rep("G",nvar[6])
X.list$X[[off]]$vtDSs<-rep(NA, nvar[6])
}
# sets up empty design matrix ncolumns = npredictors+1 for genetic likelihoods
##########################################################################################################
###################################### main effects ######################################################
##########################################################################################################
if(tmain_effects!=0){
nvar_tmp<-rep(0,6)
for(i in 1:tmain_effects){ # iterates through the variables
# Dam variables
if(length(predictors[[i]]$Dam$X)!=0){
if(is.na(sum(predictors[[i]]$Dam$X))==TRUE){
for(c in 1:ncol(predictors[[i]]$Dam$X)){
nvar_tmp[1]<-nvar_tmp[1]+1
X.list$X[[off]]$vtDus[nvar_tmp[1]]<-predictors[[i]]$Dam$var_type
X.list$X[[off]]$XDus[,nvar_tmp[1]]<-predictors[[i]]$Dam$X[,c]
colnames(X.list$X[[off]]$XDus)[nvar_tmp[1]]<-predictors[[i]]$Dam$var_name[c]
if(any(is.na(X.list$X[[off]]$XDus[,nvar_tmp[1]][-ndam]))){stop("Missing covariate data")}
if(predictors[[i]]$Dam$merge==TRUE){
if(off==1){
X.list$merge<-c(X.list$merge, nvar_tmp[1])
X.list$mergeUS<-c(X.list$mergeUS, 0)
}
X.list$X[[off]]$mergeN<-cbind(X.list$X[[off]]$mergeN, c(sum(predictors[[i]]$Dam$X[,c]==1, na.rm=T), sum(predictors[[i]]$Dam$X[,c]==0, na.rm=T)))
}
}
}else{
for(c in 1:ncol(predictors[[i]]$Dam$X)){
nvar_tmp[2]<-nvar_tmp[2]+1
X.list$X[[off]]$vtDs[nvar_tmp[2]]<-predictors[[i]]$Dam$var_type
X.list$X[[off]]$XDs[,nvar_tmp[2]]<-predictors[[i]]$Dam$X[,c]
colnames(X.list$X[[off]]$XDs)[nvar_tmp[2]]<-predictors[[i]]$Dam$var_name[c]
if(any(is.na(X.list$X[[off]]$XDs[,nvar_tmp[2]]))){stop("Missing covariate data")}
if(predictors[[i]]$Dam$merge==TRUE){
if(off==1){
X.list$merge<-c(X.list$merge, nvar[1]+nvar_tmp[2])
X.list$mergeUS<-c(X.list$mergeUS, ud*((predictors[[i]]$Dam$X[,c][nrow(predictors[[i]]$Dam$X)]==0)+1))
}
X.list$X[[off]]$mergeN<-cbind(X.list$X[[off]]$mergeN, c(sum(predictors[[i]]$Dam$X[,c]==1), sum(predictors[[i]]$Dam$X[,c]==0)))
}
}
}
}
#Sire variables
if(length(predictors[[i]]$Sire$X)!=0){
if(is.na(sum(predictors[[i]]$Sire$X))==TRUE){
for(c in 1:ncol(predictors[[i]]$Sire$X)){
nvar_tmp[3]<-nvar_tmp[3]+1
X.list$X[[off]]$vtSus[nvar_tmp[3]]<-predictors[[i]]$Sire$var_type
X.list$X[[off]]$XSus[,nvar_tmp[3]]<-predictors[[i]]$Sire$X[,c]
colnames(X.list$X[[off]]$XSus)[nvar_tmp[3]]<-predictors[[i]]$Sire$var_name[c]
if(any(is.na(X.list$X[[off]]$XSus[,nvar_tmp[3]][-nsire]))){stop("Missing covariate data")}
if(predictors[[i]]$Sire$merge==TRUE){
if(off==1){
X.list$merge<-c(X.list$merge, sum(nvar[1:2])+nvar_tmp[3])
X.list$mergeUS<-c(X.list$mergeUS, 0)
}
X.list$X[[off]]$mergeN<-cbind(X.list$X[[off]]$mergeN, c(sum(predictors[[i]]$Sire$X[,c]==1, na.rm=T), sum(predictors[[i]]$Sire$X[,c]==0, na.rm=T)))
}
}
}else{
for(c in 1:ncol(predictors[[i]]$Sire$X)){
nvar_tmp[4]<-nvar_tmp[4]+1
X.list$X[[off]]$vtSs[nvar_tmp[4]]<-predictors[[i]]$Sire$var_type
X.list$X[[off]]$XSs[,nvar_tmp[4]]<-predictors[[i]]$Sire$X[,c]
colnames(X.list$X[[off]]$XSs)[nvar_tmp[4]]<-predictors[[i]]$Sire$var_name[c]
if(any(is.na(X.list$X[[off]]$XSs[,nvar_tmp[4]]))){stop("Missing covariate data")}
if(predictors[[i]]$Sire$merge==TRUE){
if(off==1){
X.list$merge<-c(X.list$merge, sum(nvar[1:3])+nvar_tmp[4])
X.list$mergeUS<-c(X.list$mergeUS, us*((predictors[[i]]$Sire$X[,c][nrow(predictors[[i]]$Sire$X)]==0)+1))
}
X.list$X[[off]]$mergeN<-cbind(X.list$X[[off]]$mergeN, c(sum(predictors[[i]]$Sire$X[,c]==1, na.rm=T), sum(predictors[[i]]$Sire$X[,c]==0, na.rm=T)))
}
}
}
}
#Dam/Sire variables
if(length(predictors[[i]]$DamSire$X)!=0){
if(is.na(sum(predictors[[i]]$DamSire$X))==TRUE){
for(c in 1:ncol(predictors[[i]]$DamSire$X)){
nvar_tmp[5]<-nvar_tmp[5]+1
X.list$X[[off]]$vtDSus[nvar_tmp[5]]<-predictors[[i]]$DamSire$var_type
X.list$X[[off]]$XDSus[,nvar_tmp[5]]<-predictors[[i]]$DamSire$X[,c]
colnames(X.list$X[[off]]$XDSus)[nvar_tmp[5]]<-predictors[[i]]$DamSire$var_name[c]
if(us==TRUE){rem.var<-seq(nsire,ndam*nsire, nsire)}
if(ud==TRUE){rem.var<-((((ndam-1)*nsire)+1):(ndam*nsire))}
if(us==TRUE & ud==TRUE){rem.var<-c(seq(nsire,ndam*nsire, nsire), (((ndam-1)*nsire)+1):c((ndam*nsire)-1))}
if(any(is.na(X.list$X[[off]]$XDSus[,nvar_tmp[5]][-rem.var]))){stop("Missing covariate data")}
}
}else{
for(c in 1:ncol(predictors[[i]]$DamSire$X)){
nvar_tmp[6]<-nvar_tmp[6]+1
X.list$X[[off]]$vtDSs[nvar_tmp[6]]<-predictors[[i]]$DamSire$var_type
X.list$X[[off]]$XDSs[,nvar_tmp[6]]<-predictors[[i]]$DamSire$X[,c]
colnames(X.list$X[[off]]$XDSs)[nvar_tmp[6]]<-predictors[[i]]$DamSire$var_name[c]
if(any(is.na(X.list$X[[off]]$XDSs[,nvar_tmp[6]]))){stop("Missing covariate data")}
}
}
}
}
}
###################################################################################################################
################################## interactions ##################################################################
###################################################################################################################
if(length(interactions)>0){
for(i in 1:length(interactions)){
form.comb<-match(PdP$formula[[interactions[i]]], PdP$formula[main_effects])
t1<-predictors[[form.comb[1]]]
t2<-predictors[[form.comb[2]]]
if(off==1){
if(i==1){
dam.dam=rep(FALSE, length(interactions))
sire.sire=rep(FALSE, length(interactions))
dam.sire=rep(FALSE, length(interactions))
sire.dam=rep(FALSE, length(interactions))
sire.damsire=rep(FALSE, length(interactions))
damsire.sire=rep(FALSE, length(interactions))
dam.damsire=rep(FALSE, length(interactions))
damsire.dam=rep(FALSE, length(interactions))
damsire.damsire=rep(FALSE, length(interactions))
dam_nus=rep(1, length(interactions))
sire_nus=rep(1, length(interactions))
}
if(is.null(t1$Dam$X)==FALSE & is.null(t1$Sire$X)==FALSE){
if(is.null(t2$Dam$X)==FALSE & is.null(t2$Sire$X)==FALSE){
dam.dam[i]=TRUE
sire.sire[i]=TRUE
}else{
stop("interactions between a genderless variable and a sex-specific variable not possible")
}
}
if(is.null(t1$Dam$X)==FALSE & is.null(t2$Dam$X)==FALSE){
dam.dam[i]=TRUE
if(TRUE%in%(is.na(t1$Dam$X)) | TRUE%in%(is.na(t2$Dam$X))){
dam_nus[i]<-0
}
}
if(is.null(t1$Sire$X)==FALSE & is.null(t2$Sire$X)==FALSE){
sire.sire[i]=TRUE
if(TRUE%in%(is.na(t1$Sire$X)) | TRUE%in%(is.na(t2$Sire$X))){
sire_nus[i]<-0
}
}
if(is.null(t1$Dam$X)==FALSE & is.null(t2$Sire$X)==FALSE){
if(is.null(t2$Dam$X) & is.null(t1$Sire$X)){
dam.sire[i]=TRUE
if(TRUE%in%(is.na(t1$Dam$X)) | TRUE%in%(is.na(t2$Sire$X))){
sire_nus[i]<-0
dam_nus[i]<-0
}
}
}
if(is.null(t2$Dam$X)==FALSE & is.null(t1$Sire$X)==FALSE){
if(is.null(t1$Dam$X) & is.null(t2$Sire$X)){
sire.dam[i]=TRUE
if(TRUE%in%(is.na(t1$Sire$X)) | TRUE%in%(is.na(t2$Dam$X))){
sire_nus[i]<-0
dam_nus[i]<-0
}
}
}
if(is.null(t1$DamSire$X)==FALSE & is.null(t2$DamSire$X)==FALSE){
damsire.damsire[i]=TRUE
if(TRUE%in%(is.na(t1$DamSire$X)) | TRUE%in%(is.na(t2$DamSire$X))){
sire_nus[i]<-0
dam_nus[i]<-0
}
}
if(is.null(t1$Dam$X)==FALSE & is.null(t2$DamSire$X)==FALSE){
dam.damsire[i]=TRUE
if(TRUE%in%(is.na(t1$Dam$X)) | TRUE%in%(is.na(t2$DamSire$X))){
sire_nus[i]<-0
dam_nus[i]<-0
}
}
if(is.null(t1$DamSire$X)==FALSE & is.null(t2$Dam$X)==FALSE){
damsire.dam[i]=TRUE
if(TRUE%in%(is.na(t1$DamSire$X)) | TRUE%in%(is.na(t2$Dam$X))){
sire_nus[i]<-0
dam_nus[i]<-0
}
}
if(is.null(t1$Sire$X)==FALSE & is.null(t2$DamSire$X)==FALSE){
sire.damsire[i]=TRUE
if(TRUE%in%(is.na(t1$Sire$X)) | TRUE%in%(is.na(t2$DamSire$X))){
sire_nus[i]<-0
dam_nus[i]<-0
}
}
if(is.null(t1$DamSire$X)==FALSE & is.null(t2$Sire$X)==FALSE){
damsire.sire[i]=TRUE
if(TRUE%in%(is.na(t1$DamSire$X)) | TRUE%in%(is.na(t2$Sire$X))){
sire_nus[i]<-0
dam_nus[i]<-0
}
}
}
col<-0
if(dam.dam[i]){
int.tmp<-matrix(NA,nrow(t1$Dam$X), ncol(t1$Dam$X)*ncol(t2$Dam$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t1$Dam$X)){
for(v2 in 1:ncol(t2$Dam$X)){
col<-col+1
int.tmp[,col]<-t1$Dam$X[,v1]*t2$Dam$X[,v2]
colnames(int.tmp)[col]<-paste(t1$Dam$var_name[v1], t2$Dam$var_name[v2], sep=".")
}
}
if(dam_nus[i]==0){
for(c in 1:ncol(int.tmp)){
nvar[1]<-nvar[1]+1
if(ncol(X.list$X[[off]]$XDus)==0){
X.list$X[[off]]$XDus<-matrix(int.tmp[,c], nrow(int.tmp), 1)
}else{
X.list$X[[off]]$XDus<-as.matrix(cbind(X.list$X[[off]]$XDus, int.tmp[,c]))
}
if(t1$Dam$var_type == "factor" & t2$Dam$var_type == "factor"){
X.list$X[[off]]$vtDus<-c(X.list$X[[off]]$vtDus, "factor")
}else{
X.list$X[[off]]$vtDus<-c(X.list$X[[off]]$vtDus, "numeric")
}
colnames(X.list$X[[off]]$XDus)[nvar[1]]<-colnames(int.tmp)[c]
}
}else{
for(c in 1:ncol(int.tmp)){
nvar[2]<-nvar[2]+1
if(ncol(X.list$X[[off]]$XDs)==0){
X.list$X[[off]]$XDs<-matrix(int.tmp[,c], nrow(int.tmp), 1)
}else{
X.list$X[[off]]$XDs<-as.matrix(cbind(X.list$X[[off]]$XDs, int.tmp[,c]))
}
if(t1$Dam$var_type == "factor" & t2$Dam$var_type == "factor"){
X.list$X[[off]]$vtDs<-c(X.list$X[[off]]$vtDs, "factor")
}else{
X.list$X[[off]]$vtDs<-c(X.list$X[[off]]$vtDs, "numeric")
}
colnames(X.list$X[[off]]$XDs)[nvar[2]]<-colnames(int.tmp)[c]
}
}
}
col<-0
if(sire.sire[i]){
int.tmp<-matrix(NA,nrow(t1$Sire$X), ncol(t1$Sire$X)*ncol(t2$Sire$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t1$Sire$X)){
for(v2 in 1:ncol(t2$Sire$X)){
col<-col+1
int.tmp[,col]<-t1$Sire$X[,v1]*t2$Sire$X[,v2]
colnames(int.tmp)[col]<-paste(t1$Sire$var_name[v1], t2$Sire$var_name[v2], sep=".")
}
}
if(sire_nus[i]==0){
for(c in 1:ncol(int.tmp)){
nvar[3]<-nvar[3]+1
if(ncol(X.list$X[[off]]$XSus)==0){
X.list$X[[off]]$XSus<-matrix(int.tmp[,c], nrow(int.tmp), 1)
}else{
X.list$X[[off]]$XSus<-as.matrix(cbind(X.list$X[[off]]$XSus, int.tmp[,c]))
}
if(t1$Sire$var_type == "factor" & t2$Sire$var_type == "factor"){
X.list$X[[off]]$vtSus<-c(X.list$X[[off]]$vtSus, "factor")
}else{
X.list$X[[off]]$vtSus<-c(X.list$X[[off]]$vtSus, "numeric")
}
colnames(X.list$X[[off]]$XSus)[nvar[3]]<-colnames(int.tmp)[c]
}
}else{
for(c in 1:ncol(int.tmp)){
nvar[4]<-nvar[4]+1
if(ncol(X.list$X[[off]]$XSs)==0){
X.list$X[[off]]$XSs<-matrix(int.tmp[,c], nrow(int.tmp), 1)
}else{
X.list$X[[off]]$XSs<-as.matrix(cbind(X.list$X[[off]]$XSs, int.tmp[,c]))
}
if(t1$Sire$var_type == "factor" & t2$Sire$var_type == "factor"){
X.list$X[[off]]$vtSs<-c(X.list$X[[off]]$vtSs, "factor")
}else{
X.list$X[[off]]$vtSs<-c(X.list$X[[off]]$vtSs, "numeric")
}
colnames(X.list$X[[off]]$XSs)[nvar[4]]<-colnames(int.tmp)[c]
}
}
}
col<-0
if(dam.sire[i] | sire.dam[i] | damsire.damsire[i] | dam.damsire[i] | damsire.dam[i] | sire.damsire[i] | damsire.sire[i]){
if(dam.sire[i]){
int.tmp<-matrix(NA,nrow(t1$Dam$X), ncol(t1$Dam$X)*ncol(t2$Sire$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t1$Dam$X)){
for(v2 in 1:ncol(t2$Sire$X)){
col<-col+1
nsires<-length(X.list$X[[off]]$sire.id)
ndams<-length(X.list$X[[off]]$dam.id)
int.tmp[,col]<-rep(t1$Dam$X[,v1], each=nsires)*rep(t2$Sire$X[,v2], ndams)
colnames(int.tmp)[col]<-paste(t1$Dam$var_name[v1], t2$Sire$var_name[v2], sep=".")
}
}
}
if(sire.dam[i]){
int.tmp<-matrix(NA,nrow(t2$Dam$X), ncol(t2$Dam$X)*ncol(t1$Sire$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t2$Dam$X)){
for(v2 in 1:ncol(t1$Sire$X)){
col<-col+1
nsires<-length(X.list$X[[off]]$sire.id)
ndams<-length(X.list$X[[off]]$dam.id)
int.tmp[,col]<-rep(t2$Dam$X[,v1], each=nsires)*rep(t1$Sire$X[,v2], ndams)
colnames(int.tmp)[col]<-paste(t2$Dam$var_name[v1], t1$Sire$var_name[v2], sep=".")
}
}
}
if(damsire.damsire[i]){
int.tmp<-matrix(NA,nrow(t1$DamSire$X), ncol(t1$DamSire$X)*ncol(t2$DamSire$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t1$DamSire$X)){
for(v2 in 1:ncol(t2$DamSire$X)){
col<-col+1
int.tmp[,col]<-t1$DamSire$X[,v1]*t2$DamSire$X[,v2]
colnames(int.tmp)[col]<-paste(t1$DamSire$var_name[v1], t2$DamSire$var_name[v2], sep=".")
}
}
}
if(dam.damsire[i]){
int.tmp<-matrix(NA,nrow(t2$DamSire$X), ncol(t1$Dam$X)*ncol(t2$DamSire$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t1$Dam$X)){
for(v2 in 1:ncol(t2$DamSire$X)){
col<-col+1
nsires<-length(X.list$X[[off]]$sire.id)
ndams<-length(X.list$X[[off]]$dam.id)
int.tmp[,col]<-rep(t1$Dam$X[,v1], each=nsires)*t2$DamSire$X[,v2]
colnames(int.tmp)[col]<-paste(t1$Dam$var_name[v1], t2$DamSire$var_name[v2], sep=".")
}
}
}
if(damsire.dam[i]){
int.tmp<-matrix(NA,nrow(t1$DamSire$X), ncol(t2$Dam$X)*ncol(t1$DamSire$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t2$Dam$X)){
for(v2 in 1:ncol(t1$DamSire$X)){
col<-col+1
nsires<-length(X.list$X[[off]]$sire.id)
ndams<-length(X.list$X[[off]]$dam.id)
int.tmp[,col]<-rep(t2$Dam$X[,v1], each=nsires)*t1$DamSire$X[,v2]
colnames(int.tmp)[col]<-paste(t2$Dam$var_name[v1], t1$DamSire$var_name[v2], sep=".")
}
}
}
if(sire.damsire[i]){
int.tmp<-matrix(NA,nrow(t2$DamSire$X), ncol(t1$Sire$X)*ncol(t2$DamSire$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t1$Sire$X)){
for(v2 in 1:ncol(t2$DamSire$X)){
col<-col+1
nsires<-length(X.list$X[[off]]$sire.id)
ndams<-length(X.list$X[[off]]$dam.id)
int.tmp[,col]<-rep(t1$Sire$X[,v1], ndams)*t2$DamSire$X[,v2]
colnames(int.tmp)[col]<-paste(t1$Sire$var_name[v1], t2$DamSire$var_name[v2], sep=".")
}
}
}
if(damsire.sire[i]){
int.tmp<-matrix(NA,nrow(t1$DamSire$X), ncol(t2$Sire$X)*ncol(t1$DamSire$X))
colnames(int.tmp)<-rep("G", ncol(int.tmp))
for(v1 in 1:ncol(t2$Sire$X)){
for(v2 in 1:ncol(t1$DamSire$X)){
col<-col+1
nsires<-length(X.list$X[[off]]$sire.id)
ndams<-length(X.list$X[[off]]$dam.id)
int.tmp[,col]<-rep(t2$Sire$X[,v1], ndams)*t1$DamSire$X[,v2]
colnames(int.tmp)[col]<-paste(t2$Sire$var_name[v1], t1$DamSire$var_name[v2], sep=".")
}
}
}
if(sire_nus[i]==0){
if(ncol(X.list$X[[off]]$XDSus)==0){
X.list$X[[off]]$XDSus<-matrix(0, length(X.list$X[[off]]$sire.id)*length(X.list$X[[off]]$dam.id), 0)
}
for(c in 1:ncol(int.tmp)){
nvar[5]<-nvar[5]+1
X.list$X[[off]]$XDSus<-as.matrix(cbind(X.list$X[[off]]$XDSus, int.tmp[,c]))
if(dam.sire[i]){
if(t1$Dam$var_type == "factor" & t2$Sire$var_type == "factor"){
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "factor")
}else{
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "numeric")
}
}
if(sire.dam[i]){
if(t2$Dam$var_type == "factor" & t1$Sire$var_type == "factor"){
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "factor")
}else{
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "numeric")
}
}
if(damsire.damsire[i]){
if(t1$DamSire$var_type == "factor" & t2$DamSire$var_type == "factor"){
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "factor")
}else{
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "numeric")
}
}
if(dam.damsire[i]){
if(t1$Dam$var_type == "factor" & t2$DamSire$var_type == "factor"){
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "factor")
}else{
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "numeric")
}
}
if(damsire.dam[i]){
if(t1$DamSire$var_type == "factor" & t2$Dam$var_type == "factor"){
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "factor")
}else{
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "numeric")
}
}
if(sire.damsire[i]){
if(t1$Sire$var_type == "factor" & t2$DamSire$var_type == "factor"){
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "factor")
}else{
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "numeric")
}
}
if(damsire.sire[i]){
if(t1$DamSire$var_type == "factor" & t2$Sire$var_type == "factor"){
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "factor")
}else{
X.list$X[[off]]$vtDSus<-c(X.list$X[[off]]$vtDSus, "numeric")
}
}
colnames(X.list$X[[off]]$XDSus)[nvar[5]]<-colnames(int.tmp)[c]
}
}else{
if(ncol(X.list$X[[off]]$XDSs)==0){
X.list$X[[off]]$XDSs<-matrix(0, length(X.list$X[[off]]$sire.id)*length(X.list$X[[off]]$dam.id), 0)
}
for(c in 1:ncol(int.tmp)){
nvar[6]<-nvar[6]+1
X.list$X[[off]]$XDSs<-as.matrix(cbind(X.list$X[[off]]$XDSs, int.tmp[,c]))
if(dam.sire[i]){
if(t1$Dam$var_type == "factor" & t2$Sire$var_type == "factor"){
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "factor")
}else{
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "numeric")
}
}
if(sire.dam[i]){
if(t2$Dam$var_type == "factor" & t1$Sire$var_type == "factor"){
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "factor")
}else{
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "numeric")
}
}
if(damsire.damsire[i]){
if(t1$DamSire$var_type == "factor" & t2$DamSire$var_type == "factor"){
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "factor")
}else{
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "numeric")
}
}
if(dam.damsire[i]){
if(t1$Dam$var_type == "factor" & t2$DamSire$var_type == "factor"){
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "factor")
}else{
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "numeric")
}
}
if(damsire.dam[i]){
if(t1$DamSire$var_type == "factor" & t2$Dam$var_type == "factor"){
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "factor")
}else{
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "numeric")
}
}
if(sire.damsire[i]){
if(t1$Sire$var_type == "factor" & t2$DamSire$var_type == "factor"){
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "factor")
}else{
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "numeric")
}
}
if(damsire.sire[i]){
if(t1$DamSire$var_type == "factor" & t2$Sire$var_type == "factor"){
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "factor")
}else{
X.list$X[[off]]$vtDSs<-c(X.list$X[[off]]$vtDSs, "numeric")
}
}
colnames(X.list$X[[off]]$XDSs)[nvar[6]]<-colnames(int.tmp)[c]
}
}
}
}
}
}
if(sum(nvar)>0){
beta_map<-1:sum(nvar)
if(sum(nvar[3:4])>0){
Dlinked<-c(grep("linked", colnames(X.list$X[[1]]$XDus)), grep("linked", colnames(X.list$X[[1]]$XDs))+nvar[1])
Dlinked_names<-c(colnames(X.list$X[[1]]$XDus), colnames(X.list$X[[1]]$XDs))[Dlinked]
Slinked<-match(c(colnames(X.list$X[[1]]$XSus), colnames(X.list$X[[1]]$XSs)), Dlinked_names)
Slinked[which(is.na(Slinked)==FALSE)]<-Dlinked
Slinked[which(is.na(Slinked)==TRUE)]<-sum(nvar[1:2])+c(1:sum(is.na(Slinked)))
beta_map[sum(nvar[1:2])+(1:sum(nvar[3:4]))]<-Slinked
}
if(sum(nvar[5:6])>0 & sum(nvar[1:4])>0){
beta_map[sum(nvar[1:4])+(1:sum(nvar[5:6]))]<-c(max(beta_map[1:sum(nvar[1:4])])+(1:sum(nvar[5:6])))
}
}else{
beta_map<--999
}
X.list$beta_map<-beta_map
# contrast with base parents
for(off in 1:sum(PdP$offspring==1)){
if(is.null(X.list$merge)==FALSE){
for(m in 1:length(X.list$merge)){
X.list$X[[off]]$mergeN[,m][X.list$mergeUS[m]]<-X.list$X[[off]]$mergeN[,m][X.list$mergeUS[m]]-1
# need to take 1 off the mergeN class as it is actually unsampled
n1<-X.list$X[[off]]$mergeN[,m][1]+(X.list$mergeUS[m]==1)
n2<-X.list$X[[off]]$mergeN[,m][2]+(X.list$mergeUS[m]==2)
if(n1==0 | n2==0){
X.list$X[[off]]$mergeN[,m]<-1
}
# if all individuals (sampled and unsampled are in 1 class numerical problems occur)
# however mergeN can be safley replaced with what ever since they don't contribute
# to the likelihood or pedigree estimation as all individuals are monomorphic!
}
}
if(nvar[1]>0){
nrowX=dim(X.list$X[[off]]$XDus)[1]
ncolX=dim(X.list$X[[off]]$XDus)[2]
base<-X.list$X[[off]]$XDus[1,]
X.list$X[[off]]$XDus<-X.list$X[[off]]$XDus-matrix(rep(base,each=nrowX), nrowX, ncolX)
col2scale<-which(X.list$X[[off]]$vtDus=="numeric")
if(length(col2scale)>0){
center.val<-colMeans(as.matrix(X.list$X[[off]]$XDus[,col2scale]), na.rm=T)
X.list$X[[off]]$XDus[,col2scale]<-scale(X.list$X[[off]]$XDus[,col2scale], center=center.val, scale=FALSE)
}
}
if(nvar[2]>0){
nrowX=dim(X.list$X[[off]]$XDs)[1]
ncolX=dim(X.list$X[[off]]$XDs)[2]
base<-X.list$X[[off]]$XDs[1,]
X.list$X[[off]]$XDs<-X.list$X[[off]]$XDs-matrix(rep(base,each=nrowX), nrowX, ncolX)
col2scale<-which(X.list$X[[off]]$vtDs=="numeric")
if(length(col2scale)>0){
center.val<-colMeans(as.matrix(X.list$X[[off]]$XDs[,col2scale]), na.rm=T)
X.list$X[[off]]$XDs[,col2scale]<-scale(X.list$X[[off]]$XDs[,col2scale], center=center.val, scale=FALSE)
}
}
if(nvar[3]>0){
nrowX=dim(X.list$X[[off]]$XSus)[1]
ncolX=dim(X.list$X[[off]]$XSus)[2]
base<-X.list$X[[off]]$XSus[1,]
X.list$X[[off]]$XSus<-X.list$X[[off]]$XSus-matrix(rep(base,each=nrowX), nrowX, ncolX)
col2scale<-which(X.list$X[[off]]$vtSus=="numeric")
if(length(col2scale)>0){
center.val<-colMeans(as.matrix(X.list$X[[off]]$XSus[,col2scale]), na.rm=T)
X.list$X[[off]]$XSus[,col2scale]<-scale(X.list$X[[off]]$XSus[,col2scale], center=center.val, scale=FALSE)
}
}
if(nvar[4]>0){
nrowX=dim(X.list$X[[off]]$XSs)[1]
ncolX=dim(X.list$X[[off]]$XSs)[2]
base<-X.list$X[[off]]$XSs[1,]
X.list$X[[off]]$XSs<-X.list$X[[off]]$XSs-matrix(rep(base,each=nrowX), nrowX, ncolX)
col2scale<-which(X.list$X[[off]]$vtSs=="numeric")
if(length(col2scale)>0){
center.val<-colMeans(as.matrix(X.list$X[[off]]$XSs[,col2scale]), na.rm=T)
X.list$X[[off]]$XSs[,col2scale]<-scale(X.list$X[[off]]$XSs[,col2scale], center=center.val, scale=FALSE)
}
}
if(nvar[5]>0){
nrowX=dim(X.list$X[[off]]$XDSus)[1]
ncolX=dim(X.list$X[[off]]$XDSus)[2]
base<-X.list$X[[off]]$XDSus[1,]
X.list$X[[off]]$XDSus<-X.list$X[[off]]$XDSus-matrix(rep(base,each=nrowX), nrowX, ncolX)
col2scale<-which(X.list$X[[off]]$vtDSus=="numeric")
if(length(col2scale)>0){
center.val<-colMeans(as.matrix(X.list$X[[off]]$XDSus[,col2scale]), na.rm=T)
X.list$X[[off]]$XDSus[,col2scale]<-scale(X.list$X[[off]]$XDSus[,col2scale], center=center.val, scale=FALSE)
}
}
if(nvar[6]>0){
nrowX=dim(X.list$X[[off]]$XDSs)[1]
ncolX=dim(X.list$X[[off]]$XDSs)[2]
base<-X.list$X[[off]]$XDSs[1,]
X.list$X[[off]]$XDSs<-X.list$X[[off]]$XDSs-matrix(rep(base,each=nrowX), nrowX, ncolX)
col2scale<-which(X.list$X[[off]]$vtDSs=="numeric")
if(length(col2scale)>0){
center.val<-colMeans(as.matrix(X.list$X[[off]]$XDSs[,col2scale]), na.rm=T)
X.list$X[[off]]$XDSs[,col2scale]<-scale(X.list$X[[off]]$XDSs[,col2scale], center=center.val, scale=FALSE)
}
}
}
if(is.null(GdP$G)==FALSE){
if(is.null(A)==TRUE){
A<-extractA(GdP$G)
}else{
for(i in 1:length(GdP$G)){
A[[i]]<-A[[i]][order(A[[i]], decreasing=T)]
GdP$G[[i]]<-genotype(GdP$G[[i]], alleles=names(A[[i]]), reorder="no")
}
}
Gid<-GdP$id[-duplicated(GdP$id)==FALSE]
G<-lapply(GdP$G, function(x){x[-duplicated(GdP$id)==FALSE]})
grouped_by_id<-order(match(Gid, unique_id))
G<-lapply(G, function(x){x[grouped_by_id]})
Gid<-grouped_by_id
X.list<-mismatches(X.list, G=G, mm.tol=mm.tol)
if(is.null(E1)==TRUE){
E1<-0.005
}
if(is.null(E2)==TRUE){
E2<-0.005
}
X.list<-fillX.G(X.list, A=A, G=G, E1=E1, E2=E2, marker.type=GdP$marker.type)
X.list<-reordXlist(X.list, marker.type=GdP$marker.type)
}
npdam<-unlist(lapply(X.list$X, function(x){length(x$restdam.id)}))
npsire<-unlist(lapply(X.list$X, function(x){length(x$restsire.id)}))
if(any(npdam==0)){ stop(paste("Indiviudals", paste(X.list$id[as.numeric(names(X.list$X)[which(npdam==0)])], collapse=" "), "have no possible dams"))}
if(any(npsire==0)){stop(paste("Individuals", paste(X.list$id[as.numeric(names(X.list$X)[which(npsire==0)])], collapse=" "), "have no possible sires"))}
}
X.list
}
|
a2c1255911e0b52f8f63b500599d4cf6311cf4ef
|
8a8236ff110fd8876c38bf151327b75690f02d94
|
/empirical_estimation.R
|
a7ee7f93ca04ca5155eaa4689bb2d0d11b743b3a
|
[
"MIT"
] |
permissive
|
sl-bergquist/cancer_classification
|
b3bd46ce051b755693267b24e3fbd03c13e9ad9f
|
22623bd8b86cc3efa3955859898639f9a1ecffde
|
refs/heads/main
| 2023-06-15T09:21:31.326232
| 2021-07-06T04:51:12
| 2021-07-06T04:51:12
| 381,496,706
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,019
|
r
|
empirical_estimation.R
|
#########################################################
# CancerCLAS Multiclass Prediction
# Empirical Data
# Naive No Variation, Naive Bootstrap, Weighted Bootstrap
#########################################################
options(scipen = 999)
library(MASS)
library(tidyverse)
library(glmnet)
library(nnet)
library(gbm)
library(mgcv)
library(xgboost)
library(polspline)
library(randomForest)
library(caret)
library(pROC)
library(survival)
library(doParallel)
library(ranger)
detectCores()
library(nnls)
library(SuperLearner)
library(survminer)
library(ggfortify)
library(rms)
library(pec)
library(riskRegression)
# source misc_funs.R: contains function for calculating class measures
source("~/misc_funs_empirical_v3.R")
# Generate some example training data
set.seed(33)
#### Data Set Up ####
# select number of bootstrap iterations
n_bs <- 500
# alpha for setting label thresholds
alpha <- .10
### Development data: 2010-2011 ###
load("~/lung_20102011_072819.RData")
data_dev <- lung_20102011_072819; rm(lung_20102011_072819)
order_dev <- colnames(data_dev) # column order -- apply to validation data below
# shuffle data
data_dev <- data_dev[sample(nrow(data_dev)),]
# assign row ids for splitting
data_dev <- data_dev %>% mutate(id=row_number())
# use caret for stratified CV folds
data_split <- createFolds(factor(data_dev$StageGroup_AJCC6), k=2, list=T)
# not perfectly even b/c of stratification on y
# then make foldids a column
data_dev <- data_dev %>% mutate(foldid = ifelse((id %in% data_split[[1]]),1,2))
foldid <- data_dev$foldids
# remove ref groups, survival, and others for fitting
data_dev_fit <- data_dev %>% dplyr::select(-Early_Stage, -StageGroup_Predicted,
-Region_West, -Surgery_Segmental,
-CauseOfDeath_LungCancer,
-Days_First_Chemo_to_Death, -Censored)
rm(data_split)
id_drops <- c("id", "foldid")
## Naive fit on development data ##
fitMN <- function(data){
data <- data[, !(names(data) %in% id_drops)]
fit_mn <- nnet::multinom(StageGroup_AJCC6 ~., data=data, maxit=500)
return(fit_mn)}
fitGLMNET <- function(data,alpha){
x_drops <- c("id", "foldid", "StageGroup_AJCC6")
xdata <- as.matrix(data[, !names(data) %in% x_drops])
fit_glmnet <- glmnet(x=xdata, y=as.factor(data[,"StageGroup_AJCC6"]),family="multinomial", alpha=alpha)
return(fit_glmnet)}
fitRF <- function(data){
data <- data[, !(names(data) %in% id_drops)]
fit_rf <- randomForest(as.factor(StageGroup_AJCC6) ~., data=data,
ntree=500, nodesize=250,
strata=as.factor(data$StageGroup_AJCC6))
return(fit_rf)}
fitGAM <- function(data){
# make Y be numeric, scaled from 0-2
data[,"StageGroup_AJCC6"] <- as.numeric(data$StageGroup_AJCC6)-1
# create gam formula (from misc_funs); cubic splines with k=3 knots
# could select number of knots via internal cross validation, but
# because we have so many variables this would take a long time,
# so going for practical approach of setting uniform number
# same thing with the smoothing penalty
f <- CreateGAMFormula(data=data[,!names(data) %in% id_drops],
y="StageGroup_AJCC6", type="regspline")
# remove id and foldid in formula step
f1 <- f[[1]]
f2 <- f[[2]]
# s=0.6 and k=3 for all terms
fit <- mgcv::gam(list(f1,f2), data=data,family=multinom(K=2))
fit_gam <- list(fit=fit, data=data)
return(fit_gam)
}
fitXGB <- function(data){
data <- data[,!names(data) %in% id_drops]
data[,"StageGroup_AJCC6"] <- as.numeric(data$StageGroup_AJCC6)-1
data <- as.matrix(data)
fit <- xgboost(data=subset(data, select=-StageGroup_AJCC6),
label=data[,"StageGroup_AJCC6"],
max.depth=3, eta=1, nthread=3, nrounds=2,
objective="multi:softprob", num_class=3,
eval_metric="mlogloss")
fit_xgb <- list(fit=fit, data=data)
return(fit_xgb)
}
devNaiveFitFun <- function(data){
# mn
fit_mn <- fitMN(data=data)
# lasso
fit_lasso <- fitGLMNET(data=data,alpha=1)
# ridge
fit_ridge <- fitGLMNET(data=data, alpha=0)
# enet
fit_enet <- fitGLMNET(data=data, alpha=.5)
# rf
fit_rf <- fitRF(data)
# gam
fit_gam <- fitGAM(data)
# xgb
fit_xgb <- fitXGB(data)
out <- list(fit_mn=fit_mn, fit_lasso=fit_lasso, fit_ridge=fit_ridge,
fit_enet=fit_enet, fit_rf=fit_rf, fit_gam=fit_gam, fit_xgb=fit_xgb)
}
dev_naive_results <- devNaiveFitFun(data=data_dev_fit)
### now use split for algorithm fitting
devWtdFitFun <- function(data,thld_fold_num){
holdoutIndex <- which(data[,"foldid"]==thld_fold_num, arr.ind=T)
holdoutFold <- data[holdoutIndex,]
fitFolds <- data[-holdoutIndex,]
# multinomial algorithm
holdoutFold_mn <- as.matrix(holdoutFold[, !names(holdoutFold) %in% id_drops])
fit_mn <- fitMN(data=fitFolds)
pred_mn <- predict(fit_mn, holdoutFold_mn, type="prob")
y_pred <- max.col(pred_mn)
pred_mn <- cbind(pred_mn, y_pred, holdoutFold$id, holdoutFold$foldid, holdoutFold$StageGroup_AJCC6)
colnames(pred_mn) <- c("g1", "g2", "g3","y_pred", "id", "foldid", "y_obs")
# lasso -- specify variable thresholds
#drop vars and convert to matrix for input
x_drops <- c("id", "foldid", "StageGroup_AJCC6")
fitFolds_glmnet <- as.matrix(fitFolds[, !names(fitFolds) %in% x_drops])
# use fitFolds_glmnet for prediction below
holdoutFold_glmnet <- as.matrix(holdoutFold[, !names(holdoutFold) %in% x_drops])
fit_lasso <- fitGLMNET(data=data,alpha=1)
# get the minimum lambda, get coefs for each category
lambda_min <- min(fit_lasso$lambda)
fit_lasso_coefs <- coef(fit_lasso, s = lambda_min)
pred_lasso <- predict(fit_lasso, as.matrix(holdoutFold_glmnet),
type="response", s=lambda_min)
fit_lasso_values <- predict(fit_lasso, as.matrix(fitFolds_glmnet),
type="response", s=lambda_min)
y_pred <- max.col(pred_lasso[,1:3,])
pred_lasso <- as.matrix(cbind(as.data.frame(pred_lasso),y_pred,
holdoutFold$id, holdoutFold$foldid,
holdoutFold$StageGroup_AJCC6))
colnames(pred_lasso) <- c("g1", "g2", "g3","y_pred", "id", "foldid", "y_obs")
print("lasso")
# ridge
fit_ridge <- fitGLMNET(data=data,alpha=0)
lambda_min_ridge <- min(fit_ridge$lambda)
pred_ridge <- predict(fit_ridge, as.matrix(holdoutFold_glmnet),
type="response", s=lambda_min_ridge)
fit_ridge_values <- predict(fit_ridge, as.matrix(fitFolds_glmnet),
type="response", s=lambda_min_ridge)
y_pred <- max.col(pred_ridge[,1:3,])
pred_ridge <- as.matrix(cbind(as.data.frame(pred_ridge),y_pred,
holdoutFold$id, holdoutFold$foldid,
holdoutFold$StageGroup_AJCC6))
colnames(pred_ridge) <- c("g1", "g2", "g3","y_pred", "id", "foldid", "y_obs")
print("ridge")
# elastic net
fit_enet <- fitGLMNET(data=data, alpha=.5)
lambda_min_enet <- min(fit_enet$lambda)
pred_enet <- predict(fit_enet, as.matrix(holdoutFold_glmnet),
type="response", s=lambda_min_enet)
fit_enet_values <- predict(fit_enet, as.matrix(fitFolds_glmnet),
type="response", s=lambda_min_enet)
y_pred <- max.col(pred_enet[,1:3,])
pred_enet <- as.matrix(cbind(as.data.frame(pred_enet), y_pred,
holdoutFold$id, holdoutFold$foldid,
holdoutFold$StageGroup_AJCC6))
colnames(pred_enet) <- c("g1", "g2", "g3","y_pred", "id", "foldid", "y_obs")
print("enet")
# random forest
holdoutFold_rf <- holdoutFold[,!(names(holdoutFold) %in% id_drops)]
fit_rf <- fitRF(fitFolds)
pred_rf <- predict(fit_rf, holdoutFold_rf, type="prob")
y_pred <- max.col(pred_rf)
pred_rf <- cbind(pred_rf,y_pred, holdoutFold$id, holdoutFold$foldid,
holdoutFold$StageGroup_AJCC6)
colnames(pred_rf) <- c("g1", "g2", "g3","y_pred", "id", "foldid", "y_obs")
print("rf")
# GAM
fit_gam <- fitGAM(data=fitFolds)
# prediction step
holdoutFold_gam <- holdoutFold
holdoutFold_gam[,"StageGroup_AJCC6"] <- as.numeric(holdoutFold_gam[,"StageGroup_AJCC6"])-1
pred_gam <- predict(fit_gam$fit, newdata=holdoutFold_gam, type="response")
fit_gam_values <- predict(fit_gam$fit, newdata=fit_gam$data, type="response")
y_pred <- max.col(pred_gam)
pred_gam <- cbind(pred_gam,y_pred, holdoutFold$id, holdoutFold$foldid,
holdoutFold$StageGroup_AJCC6)
colnames(pred_gam) <- c("g1", "g2", "g3","y_pred", "id", "foldid", "y_obs")
print("gam")
# Boosting
# convert fitFolds and holdoutFold to matrices
holdoutFold_xgb <- holdoutFold
holdoutFold_xgb$StageGroup_AJCC6 <- as.numeric(holdoutFold_xgb$StageGroup_AJCC6)-1
holdoutFold_xgb_noids <- holdoutFold_xgb[,!names(holdoutFold_xgb) %in% id_drops]
holdoutFold_xgb_noids <- as.matrix(holdoutFold_xgb_noids)
holdoutFold_xgb_noids <- xgb.DMatrix(data=holdoutFold_xgb_noids[,-95])
# remove label column (StageGroup_AJCC6)
fit_xgb <- fitXGB(fitFolds)
# predict outputs data*nclass vector, turn into
# data*nclass matrix
pred_xgb <- matrix(predict(fit_xgb$fit, holdoutFold_xgb_noids),
nrow=nrow(holdoutFold_xgb_noids), byrow=T)
# havee to drop out StageGroup_AJCC6 from fitFolds_xgb before predicting
fit_xgb_values <- matrix(predict(fit_xgb$fit, fit_xgb$data[,-95]),
nrow=nrow(fit_xgb$data), byrow=T)
y_pred <- max.col(pred_xgb)
pred_xgb <- cbind(pred_xgb,y_pred, holdoutFold$id, holdoutFold$foldid,
holdoutFold$StageGroup_AJCC6)
colnames(pred_xgb) <- c("g1", "g2", "g3","y_pred", "id", "foldid", "y_obs")
out <- list(fit_mn=fit_mn, pred_mn=pred_mn,
fit_lasso = fit_lasso, pred_lasso=pred_lasso,
fit_lasso_coefs= fit_lasso_coefs,fit_lasso_values=fit_lasso_values,
fit_ridge=fit_ridge, pred_ridge=pred_ridge,fit_ridge_values=fit_ridge_values,
fit_enet=fit_enet, pred_enet=pred_enet,fit_enet_values=fit_enet_values,
fit_rf=fit_rf, pred_rf=pred_rf,
fit_gam=fit_gam, pred_gam=pred_gam,fit_gam_values=fit_gam_values,
fit_xgb=fit_xgb, pred_xgb=pred_xgb, fit_xgb_values=fit_xgb_values)
return(out)
}
# fit on I1 fold, predict on I2
dev_wtd_results <- devWtdFitFun(data=data_dev_fit, thld_fold_num=2)
# define thresholds and create label sets for I2
devLABELFun <- function(data,dev_results,...){
# threshold estimation
conf_thlds <- function(phat,Y2,class_alphas){
m = nrow(phat)
K = 3
score = rep(0,m)
for(i in 1:m){
score[i] = phat[i,Y2[i]]
}
## Class-specific coverage
class_thlds <- rep(NA,K)
for (k in 1:3){
class_thlds[k] <- sort(score[Y2==k])[ceiling(class_alphas[k]*(sum(Y2==k)+1)-1)]
}
return(class_thlds=class_thlds)
}
# use fitted values and obs Ys to estimate thresholds, and then apply to predicted values
thlds_est_mn <- conf_thlds(phat=dev_results$pred_mn,
Y2=data[,"StageGroup_AJCC6"],class_alphas=c(.1,.1,.1))
# do this for all the other algs
thlds_est_lasso <- conf_thlds(phat=as.data.frame(dev_results$pred_lasso),
Y2=data[,"StageGroup_AJCC6"], class_alphas=c(.1,.1,.1))
thlds_est_ridge <- conf_thlds(phat=as.data.frame(dev_results$pred_ridge),
Y2=data[,"StageGroup_AJCC6"], class_alphas=c(.1,.1,.1))
thlds_est_enet <- conf_thlds(phat=as.data.frame(dev_results$pred_enet),
Y2=data[,"StageGroup_AJCC6"], class_alphas=c(.1,.1,.1))
thlds_est_rf <- conf_thlds(phat=dev_results$pred_rf,
Y2=data[,"StageGroup_AJCC6"],class_alphas=c(.1,.1,.1))
thlds_est_gam <- conf_thlds(phat=dev_results$pred_gam,
Y2=data[,"StageGroup_AJCC6"], class_alphas=c(.1,.1,.1))
thlds_est_xgb <- conf_thlds(phat=dev_results$pred_xgb,
Y2=data[,"StageGroup_AJCC6"], class_alphas=c(.1,.1,.1))
hstarClassFun <- function(pred_name, thlds_name){
tmp <- dev_results[[pred_name]]
tmp <- tmp[,1:3]
t(apply(tmp, 1,
function(x){as.numeric(x >= thlds_name)}))
}
Hstar_classcov_mn <- hstarClassFun("pred_mn", thlds_est_mn)
Hstar_classcov_lasso <- hstarClassFun("pred_lasso", thlds_est_lasso)
Hstar_classcov_ridge <- hstarClassFun("pred_ridge", thlds_est_ridge)
Hstar_classcov_enet <- hstarClassFun("pred_enet", thlds_est_enet)
Hstar_classcov_rf <- hstarClassFun("pred_rf", thlds_est_rf)
Hstar_classcov_gam <- hstarClassFun("pred_gam", thlds_est_gam)
Hstar_classcov_xgb <- hstarClassFun("pred_xgb", thlds_est_xgb)
out <- list(thlds_est_mn=thlds_est_mn,
thlds_est_lasso=thlds_est_lasso,
thlds_est_ridge=thlds_est_ridge,
thlds_est_enet=thlds_est_enet,
thlds_est_rf=thlds_est_rf,
thlds_est_gam=thlds_est_gam,
thlds_est_xgb=thlds_est_xgb)
return(out)
}
data_dev_I2 <- data_dev %>% filter(foldid==2) %>% as.data.frame
data_dev_thlds <- devLABELFun(data=data_dev_I2, dev_results=dev_wtd_results)
### Validation data: 2012-2013 ###
load("~/lung_20122013_072819.RData")
data_val <- lung_20122013_072819; rm(lung_20122013_072819)
# make column names match 2010-2011 data/ arrange in same order
data_val <- data_val %>% select(order_dev)
# drop if Days_First_Chemo_to_Death is <0
data_val <- data_val %>% filter(Days_First_Chemo_to_Death>=0|is.na(Days_First_Chemo_to_Death))
# assign ids to keep track across resamples
data_val <- data_val %>% mutate(id=row_number())
data_val_pred <- data_val %>% select(-StageGroup_Predicted, -Early_Stage, -Region_West,
-Surgery_Segmental)
## WTD Validation Prediction ##
# use conditional probability estimators fit on I1_dev; thresholds based on I2_dev
# pull out fitted algorithms (fit on I1)
valWTDPredFun <- function(data, data_for_pred){
# remove non-prediction vars from prediction for glmnet-based algs
x_drops <- c("StageGroup_AJCC6", "CauseOfDeath_LungCancer", "Censored", "Days_First_Chemo_to_Death",
"id", "foldid")
data_glmnet <- as.matrix(data_val_pred[, !names(data_val_pred) %in% x_drops])
# list to hold predication output
out_list <- vector("list", 7)
## predictions in 2012-2013 validation data ##
# create matrix with predicted probabilities for each class, the
# single predicted class based on highest probability, and
# observed class
# mn
pred_mn <- predict(dev_wtd_results$fit_mn, data_for_pred, type="prob")
y_pred <- max.col(pred_mn[,1:3])
pred_mn <- cbind(pred_mn, y_pred, data$StageGroup_AJCC6, data$id)
colnames(pred_mn) <- c("p1", "p2", "p3", "y_pred","y_obs", "id")
out_list[[1]] <- as.data.frame(pred_mn)
## lasso
# pull out lambda min first (for bootstrap version, do this outside bootstrap)
lambda_min <- min(dev_wtd_results$fit_lasso$lambda)
pred_lasso <- predict(dev_wtd_results$fit_lasso, as.matrix(data_glmnet),
type="response", s=lambda_min)
y_pred <- max.col(pred_lasso[,1:3,])
pred_lasso <- as.matrix(cbind(as.data.frame(pred_lasso),y_pred, data$StageGroup_AJCC6, data$id))
colnames(pred_lasso) <- c("p1", "p2", "p3","y_pred", "y_obs", "id")
out_list[[2]] <- as.data.frame(pred_lasso)
## ridge
lambda_min_ridge <- min(dev_wtd_results$fit_ridge$lambda)
pred_ridge <- predict(dev_wtd_results$fit_ridge, as.matrix(data_glmnet),
type="response", s=lambda_min_ridge)
y_pred <- max.col(pred_ridge[,1:3,])
pred_ridge <- as.matrix(cbind(as.data.frame(pred_ridge), y_pred,data$StageGroup_AJCC6, data$id))
colnames(pred_ridge) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[3]] <- as.data.frame(pred_ridge)
## enet
lambda_min_enet <- min(dev_wtd_results$fit_enet$lambda)
pred_enet <- predict(dev_wtd_results$fit_enet, as.matrix(data_glmnet),
type="response", s=lambda_min_enet)
y_pred <- max.col(pred_enet[,1:3,])
pred_enet <- as.matrix(cbind(as.data.frame(pred_enet), y_pred,data$StageGroup_AJCC6, data$id))
colnames(pred_enet) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[4]] <- as.data.frame(pred_enet)
## rf
pred_rf <- predict(dev_wtd_results$fit_rf, data_for_pred, type="prob")
y_pred <- max.col(pred_rf[,1:3])
pred_rf <- cbind(pred_rf,y_pred, data$StageGroup_AJCC6, data$id)
colnames(pred_rf) <- c("p1", "p2", "p3","y_pred","y_obs","id")
out_list[[5]] <- as.data.frame(pred_rf)
## gam
pred_gam <- predict(dev_wtd_results$fit_gam$fit, newdata=data_for_pred, type="response")
y_pred <- max.col(pred_gam[,1:3])
pred_gam <- cbind(pred_gam,y_pred, data$StageGroup_AJCC6, data$id)
colnames(pred_gam) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[6]] <- as.data.frame(pred_gam)
## xgb
data_xgb <- as.matrix(data_for_pred)
data_xgb_pred <- xgb.DMatrix(data=subset(data_xgb, select=c(-StageGroup_AJCC6,
-CauseOfDeath_LungCancer,
-Censored,
-Days_First_Chemo_to_Death,
-id)),
label=data_xgb[,"StageGroup_AJCC6"])
pred_xgb <- matrix(predict(dev_wtd_results$fit_xgb$fit, data_xgb_pred),
nrow=nrow(data_xgb_pred), byrow=T)
y_pred <- max.col(pred_xgb[,1:3])
pred_xgb <- cbind(pred_xgb,y_pred, data$StageGroup_AJCC6, data$id)
colnames(pred_xgb) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[7]] <- as.data.frame(pred_xgb)
return(out_list)
}
val_wtd_results <- valWTDPredFun(data=data_val, data_for_pred=data_val_pred)
alg_names <- c("mn", "lasso", "ridge", "enet", "rf", "gam","xgb")
names(val_wtd_results) <- alg_names
# function labeling predicted classes in 2012-2013 data based on thresholds set in
# split 2010-2011 data
val_label_results <- LABELFun(val_wtd_results, thlds=data_dev_thlds, data_val=data_val)
# val label results captures wtd sample coverage,
## Wtd Validation ambiguity ##
# val_label_results already captures wtd sample coverage, ambiguity
## Naive Validation Prediction ##
valNaivePredFun <- function(data, data_for_pred){
# remove non-prediction vars from prediction for glmnet-based algs
x_drops <- c("StageGroup_AJCC6", "CauseOfDeath_LungCancer", "Censored", "Days_First_Chemo_to_Death",
"id", "foldid")
data_glmnet <- as.matrix(data_val_pred[, !names(data_val_pred) %in% x_drops])
# list to hold predication output
out_list <- vector("list", 7)
## predictions in 2012-2013 validation data ##
# create matrix with predicted probabilities for each class, the
# single predicted class based on highest probability, and
# observed class, and id
# mn
pred_mn <- predict(dev_naive_results$fit_mn, data_for_pred, type="prob")
y_pred <- max.col(pred_mn[,1:3])
pred_mn <- cbind(pred_mn, y_pred, data$StageGroup_AJCC6,data$id)
colnames(pred_mn) <- c("p1", "p2", "p3", "y_pred","y_obs","id")
out_list[[1]] <- as.data.frame(pred_mn)
## lasso
# pull out lambda min first (for bootstrap version, do this outside bootstrap)
lambda_min <- min(dev_naive_results$fit_lasso$lambda)
pred_lasso <- predict(dev_naive_results$fit_lasso, as.matrix(data_glmnet),
type="response", s=lambda_min)
y_pred <- max.col(pred_lasso[,1:3,])
pred_lasso <- as.matrix(cbind(as.data.frame(pred_lasso),y_pred, data$StageGroup_AJCC6,data$id))
colnames(pred_lasso) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[2]] <- as.data.frame(pred_lasso)
## ridge
lambda_min_ridge <- min(dev_naive_results$fit_ridge$lambda)
pred_ridge <- predict(dev_naive_results$fit_ridge, as.matrix(data_glmnet),
type="response", s=lambda_min_ridge)
y_pred <- max.col(pred_ridge[,1:3,])
pred_ridge <- as.matrix(cbind(as.data.frame(pred_ridge), y_pred,data$StageGroup_AJCC6,data$id))
colnames(pred_ridge) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[3]] <- as.data.frame(pred_ridge)
## enet
lambda_min_enet <- min(dev_naive_results$fit_enet$lambda)
pred_enet <- predict(dev_naive_results$fit_enet, as.matrix(data_glmnet),
type="response", s=lambda_min_enet)
y_pred <- max.col(pred_enet[,1:3,])
pred_enet <- as.matrix(cbind(as.data.frame(pred_enet), y_pred,data$StageGroup_AJCC6,data$id))
colnames(pred_enet) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[4]] <- as.data.frame(pred_enet)
## rf
pred_rf <- predict(dev_naive_results$fit_rf, data_for_pred, type="prob")
y_pred <- max.col(pred_rf[,1:3])
pred_rf <- cbind(pred_rf,y_pred, data$StageGroup_AJCC6,data$id)
colnames(pred_rf) <- c("p1", "p2", "p3","y_pred","y_obs","id")
out_list[[5]] <- as.data.frame(pred_rf)
## gam
pred_gam <- predict(dev_naive_results$fit_gam$fit, newdata=data_for_pred, type="response")
y_pred <- max.col(pred_gam[,1:3])
pred_gam <- cbind(pred_gam,y_pred, data$StageGroup_AJCC6,data$id)
colnames(pred_gam) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[6]] <- as.data.frame(pred_gam)
## xgb
data_xgb <- as.matrix(data_for_pred)
data_xgb_pred <- xgb.DMatrix(data=subset(data_xgb, select=c(-StageGroup_AJCC6,
-CauseOfDeath_LungCancer,
-Censored,
-Days_First_Chemo_to_Death,
-id)),
label=data_xgb[,"StageGroup_AJCC6"])
pred_xgb <- matrix(predict(dev_naive_results$fit_xgb$fit, data_xgb_pred),
nrow=nrow(data_xgb_pred), byrow=T)
y_pred <- max.col(pred_xgb[,1:3])
pred_xgb <- cbind(pred_xgb,y_pred, data$StageGroup_AJCC6,data$id)
colnames(pred_xgb) <- c("p1", "p2", "p3","y_pred", "y_obs","id")
out_list[[7]] <- as.data.frame(pred_xgb)
return(out_list)
}
val_naive_results <- valNaivePredFun(data=data_val, data_for_pred=data_val_pred)
names(val_naive_results) <- alg_names
## Naive Validation Sample Coverage ##
covNaiveSampleFun <- function(data){
pred_tmp <- data %>% transmute(g1=ifelse(y_pred==1,1,0),
g2=ifelse(y_pred==2,1,0),
g3=ifelse(y_pred==3,1,0))
sample_coverage <- sapply(1:3,function(k)mean(pred_tmp[k==data[,"y_obs"],k]))
return(sample_coverage)
}
val_naive_sample_coverage <- lapply(val_naive_results, covNaiveSampleFun)
## In sample naive no-boot classification performance
val_naive_pred_in_sample <- lapply(val_naive_results, perfFun)
## In sample km based on observed class -- same across all algsm just use data_val
# create censoring indicator (1=dead/not censored, 0=alive to play well with Surv function)
eventFun <- function(x){ifelse(is.na(x[,"Days_First_Chemo_to_Death"]),0,1)}
timeFun <- function(x){ifelse(x[,"event"]==0,max(x[,"Days_First_Chemo_to_Death"],na.rm=T),
x[,"Days_First_Chemo_to_Death"])}
time2Fun <- function(x){ifelse(x[,"time"]==0,1,x[,"time"])}
data_val_tmp <- data_val %>% mutate(event=eventFun(.)) %>% mutate(time=timeFun(.)) %>%
mutate(time2=time2Fun(.))
val_km_obs_in_sample <- survfit(Surv(time2,event)~StageGroup_AJCC6,type="kaplan-meier",
data=data_val_tmp,conf.type="plain")
# observed survival probabilties at certain time points
survTimeFun <- function(sample,time){
surv <- summary(sample, times=time, extend=T)$surv
lb <- summary(sample, times=time, extend=T)$lower
ub <- summary(sample, times=time, extend=T)$upper
out <- list(surv=surv,lb=lb, ub=ub)
return(out)}
times <- c(90,365)
sample_times_obs <- lapply(times, survTimeFun, sample=val_km_obs_in_sample)
names(sample_times_obs) <- paste0("d",times)
# median survival time -- ignore for stage I/II
sample_50_obs <- quantile(val_km_obs_in_sample, probs=.5, conf.int=T)
sample_km_obs <- list(sample_times_obs=sample_times_obs, sample_median_obs=sample_50_obs)
rm(sample_times_obs, sample_50_obs)
## In sample km based on predicted class (naive no-boot)
naiveKMPredFun <- function(data){
tmp <- data_val_tmp %>% select(id, time2, event) %>% left_join(data,.,by="id") %>%
mutate(y_pred_fac=factor(y_pred,levels=c("1","2","3"),ordered=T))
km <- survfit(Surv(time2,event)~ y_pred_fac, type="kaplan-meier", data=tmp,
conf.type="plain")
sample_times <- lapply(times, survTimeFun, sample=km)
names(sample_times) <- paste0("d",times)
sample_50 <- quantile(km, probs=.5, conf.int=T, na.rm=T)
out <- list(sample_times_pred=sample_times, sample_median_pred=sample_50)
return(out)
}
sample_km_pred_naive <- lapply(val_naive_results, naiveKMPredFun)
### Bootstrap: Naive and Weighted ###
# keep only Hstar data.frames
Hstar_list <- val_label_results$Hstar
# join pred values, data, and labels together
# note that joining in rest of data is unncessary if just doing KM
val_pred_labels_data <- map2(val_wtd_results, Hstar_list, ~cbind(.x, .y))
val_pred_labels_data <- map(val_pred_labels_data, ~left_join(.x,data_val,by=c("id")))
bsFun <- function(data){
# create variables for survival estimation
# create censoring indicator (1=dead/not censored, 0=alive to play well with Surv function)
data <- data %>% mutate(event=eventFun(.))
data <- data %>% mutate(time=timeFun(.))
data <- data %>% mutate(time2=time2Fun(.))
# naive
Hstar_naive <- data %>% transmute(g1=ifelse(y_pred==1,1,0),
g2=ifelse(y_pred==2,1,0),
g3=ifelse(y_pred==3,1,0))
coverage_class_naive <- sapply(1:3,function(k)mean(Hstar_naive[k==data["y_obs"],k]))
# km estimation
# observed survival by observed stage for each resample
km_obs <- survfit(Surv(time2,event)~StageGroup_AJCC6,type="kaplan-meier",
data=data,conf.type="plain")
# observed survival by predicted stage for each resample
km_pred_naive <- survfit(Surv(time2,event)~y_pred,type="kaplan-meier",
data=data,conf.type="plain")
out_naive <- list(coverage=coverage_class_naive,km_pred=km_pred_naive,obs=km_obs)
# wtd
Hstar_wtd <- data[,c("g1", "g2", "g3")]
# coverage
coverage_class_wtd <- sapply(1:3,
function(k)mean(Hstar_wtd[k==data["y_obs"],k]))
# identify ambiguous Hstars
ambFun <- function(x){case_when(
rowSums(x[,c("g1","g2","g3")])==0 ~"0",
rowSums(x[,c("g1","g2","g3")])==1 ~"1",
rowSums(x[,c("g1","g2","g3")])==2 ~"2",
TRUE~"3")}
label_data <- data %>% mutate(amb_tmp=ambFun(.),
amb_flag=factor(amb_tmp,levels=c("0","1","2","3"),ordered=T)) %>% select(-amb_tmp)
#######** ASSIGNMENT OF LABEL **########
# identify predicted class to use: if single label, seleted with pr==1
# if 2 labels, select one fo the labels with pr==.5
# if 0 or 3 labels, select one of labels with pr==.33
# could also assigned based on max pr for 0 (or any of them)
labelFun <- function(x){case_when(
x[,"amb_flag"]==0 ~ max.col(x[,c("g1", "g2", "g3")], ties.method="random"), # randomly select class 1-3 with equal probability
# another option for 0 is to select class w/ max prob, but keeping simple for now
x[,"amb_flag"]==3 ~ max.col(x[,c("g1", "g2", "g3")], ties.method="random"), # randomly select class 1-3 with equal probability
x[,"amb_flag"]==2 ~ max.col(x[,c("g1", "g2", "g3")], ties.method="random"), # randomly select betwwen two assigned labels
TRUE ~ max.col(x[,c("g1", "g2", "g3")]) # only one class assigned
)}
# max.col selects random col that is one of maxima
label_data <- label_data %>% mutate(y_pred_wtd=labelFun(.))
keep_cols <- label_data %>% select(amb_flag, g1, g2, g3,y_pred,
y_pred_wtd, y_obs, p1, p2, p3)
km_pred_wtd <- survfit(Surv(time2,event)~y_pred_wtd,type="kaplan-meier",
data=label_data,conf.type="plain")
out_wtd <- list(coverage=coverage_class_wtd,label_cols=keep_cols,
km_pred=km_pred_wtd)
out <- list(naive=out_naive,wtd=out_wtd)
return(out)
}
# making the resamples takes <1 min each; total resmaples list is 28 Gb
resamplesFun <- function(alg){
tmp <- lapply(1:500, function(i)val_pred_labels_data[[alg]][sample(nrow(val_pred_labels_data[[alg]]),nrow(data_val),replace=T),])
return(tmp)
}
resamples_list <- lapply(alg_names, resamplesFun)
names(resamples_list) <- alg_names
# apply bsFun across all alg resamples
system.time(bs_out_list <- lapply(resamples_list, function(x){lapply(x, bsFun)}))
rm(resamples_list)
## Measures that vary across BS iterations ##
bsMeasFun <- function(data, alg){
# coverage - to get bs-based CIs
cov_naive <- round(covFun(lapply(lapply(data,`[[`,"naive"),`[[`,"coverage"),val_naive_sample_coverage[[alg]]),3)[1,]
cov_wtd <- round(covFun(lapply(lapply(data,`[[`,"wtd"),`[[`,"coverage"),val_label_results$coverage[[alg]]),3)
# ambiguity
amb <- ambFun(lapply(data,`[[`,"wtd"),alg=alg)
# classification performance
class_measures_naive_pct <- perfBSFun_pct(lapply(data,`[[`,"naive"),data)
class_measures_wtd <- perfWTDBSFun(lapply(data,`[[`,"wtd"))
# survival estimation
data_obs <- lapply(lapply(data,`[[`,"naive"),`[[`,"obs")
km_naive_pct <- medWTDBSFun(lapply(lapply(data, `[[`,"naive"),`[[`,"km_pred"),data_obs)
km_wtd <- medWTDBSFun(lapply(lapply(data,`[[`,"wtd"),`[[`,"km_pred"),data_obs)
out <- list(class_coverage_naive=cov_naive, class_coverage_wtd=cov_wtd,
ambiguity=amb, class_measures_naive_pct=class_measures_naive_pct,
class_measures_wtd=class_measures_wtd, km_naive_pct=km_naive_pct,
km_wtd=km_wtd)
return(out)
}
bs_measures_list <- map2(bs_out_list, alg_names, ~bsMeasFun(.x,.y))
rm(bs_out_list)
savepath <- c("~/save_path/")
saveRDS(dev_naive_results, paste0(savepath, "dev_naive_fit_algs", ".RDS"))
saveRDS(dev_wtd_results, paste0(savepath, "dev_wtd_fit_algs", ".RDS"))
saveRDS(data_dev_thlds, paste0(savepath, "thlds", ".RDS"))
saveRDS(val_label_results, paste0(savepath, "val_wtd_cov_amb_in_sample", ".RDS"))
saveRDS(val_naive_sample_coverage, paste0(savepath, "naive_sample_coverage", ".RDS"))
saveRDS(val_naive_pred_in_sample, paste0(savepath, "naive_class_perf_in_sample", ".RDS"))
saveRDS(sample_km_obs, paste0(savepath, "km_obs_in_sample", ".RDS"))
saveRDS(sample_km_pred_naive, paste0(savepath, "km_naive_pred_in_sample", ".RDS"))
saveRDS(bs_measures_list, paste0(savepath, "bs_measures_list", ".RDS"))
|
88070c915494a9246f1271258451304d57dd525c
|
3c6be520713201909819dc62961af9c4aa2d92ab
|
/select.montage.R
|
078f338c9e8efaf890239813ce94368eee8ee3f8
|
[] |
no_license
|
tanyaberde/NS.plots.tidy
|
467672d40312845d56432d4d5ce476b0a4eb6f97
|
a4b3f4225838455b03a88e0e72bf95af69dbf7a7
|
refs/heads/master
| 2020-07-14T23:42:53.085024
| 2019-08-30T17:51:26
| 2019-08-30T17:51:26
| 205,429,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
r
|
select.montage.R
|
### What is your montage? Change switch
### COLLAPSE HEMISPHERE
## FINAL ROIs
roi1 <- c(25,21,22,18, 14,10,9,8) ; roi1.name <- "Frontal" ### mediofrontal (P2)
roi2 <- c(54,37,42,53, 79,87,93,86); roi2.name <- "Dorsal" ### centroparietal (P3, dorsal N2)
roi3 <- c(64,58,57,63, 96,100,95,99); roi3.name <- "Ventral" ### inferior occipito-temporal (ventral N2)
roi4 <- c(66,60,59,52, 84,85,91,92); roi4.name <- "Occipitotemporal" ### inferior occipito-temporal (P1,N1)
|
f4a5cb6f74a6a815528381304de72ba68e0dceb6
|
8f2d33ce811c0667ad82056f70a372ead18478f6
|
/R/klientulentele.R
|
69efb9cbcc1acbed991dcc0a2de74ba3bbf8d9d3
|
[] |
no_license
|
Tomas19840823/transportas_v2
|
6806fd39d1bc0eedf9f9a65bab355d08de574724
|
5fdbe20538cdde54750d21fba457749ad5349a9a
|
refs/heads/master
| 2021-01-19T03:49:08.649313
| 2017-04-24T15:25:39
| 2017-04-24T15:25:39
| 87,336,063
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 487
|
r
|
klientulentele.R
|
klientulentele <- function(){
mat <- matrix(nrow = 8, ncol = 4)
mat[,1] <- c("Baze", "Klientas1", "Klientas2","Klientas3","Klientas4","Klientas5","Klientas6","Klientas7")
mat[,2] <- c(54.6872, 54.7017, 54.6500, 54.6842, 54.6814, 54.8061, 54.7587, 54.4390)
mat[,3] <- c(25.2797, 25.2547, 25.2200, 25.2779, 25.2851, 25.2401, 25.3899, 25.3132)
mat[,4] <- c(0, 52, 32, 96, 45, 28, 10, 100)
colnames(mat) <- c("Kliento pav", "lat", "lon", "svoris")
return(mat)
}
|
25df74826a4ae3184c1d591309061587e9e630ef
|
e54c3f3d3538c676eff3140f889b8b454ec30324
|
/memorymigration/man/runMissedRuns_res.Rd
|
bd0e5d15c8bc2d24fbb513398aed0ea7830d2dea
|
[] |
no_license
|
EliGurarie/memorymigration
|
acaf4094ba4f580db31bd2b9e25af7bbb8778ca3
|
192d44e030eb73729a7f7c3969cba520ca386177
|
refs/heads/master
| 2023-08-21T17:09:24.312679
| 2021-09-20T16:49:40
| 2021-09-20T16:49:40
| 327,377,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 367
|
rd
|
runMissedRuns_res.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RunningModel.R
\name{runMissedRuns_res}
\alias{runMissedRuns_res}
\title{Run Missed Runs Resource}
\usage{
runMissedRuns_res(
world_param,
parameters.df,
resource_param,
world,
resource,
filename = NULL,
results.dir = NULL,
...
)
}
\description{
Run Missed Runs Resource
}
|
a0d509c0c12e656c1f54a80a1ba491b49756c1c7
|
192728e70bb5c6a8fb0ad8f486d7634acb6ee5a1
|
/R/feedly-search-contents.R
|
04e58c193dc2a08593a2754df413167cd12d6a6b
|
[] |
no_license
|
hrbrmstr/seymour
|
1672c6100d6b212d07162c36a3444cecdae675f4
|
83a41922c94d019e91c0f39e325ca7796c02538d
|
refs/heads/master
| 2020-04-13T19:34:40.239426
| 2020-01-22T10:01:12
| 2020-01-22T10:01:12
| 163,406,888
| 18
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,411
|
r
|
feedly-search-contents.R
|
#' Search content of a stream
#'
#' @md
#' @param query a full or partial title string, URL, or `#topic`
#' @param stream_id the id of the stream; a feed id, category id, tag id or a
#' system collection/category ids can be used as
#' stream ids. If `NULL` (the default) the server will use the
#' “`global.all`” (see [global_resource_ids]) collection/category.
#' @param fields if not "`all`" then a character vector of fields to use for
#' matching. SUpported fields are "`title`", "`author`", and "`keywords`".
#' @param embedded if not `NULL` then one of "`audio`", "`video`", "`doc`" or "`any`".
#' Using this parameter will limit results to also include this media type.
#' “`any`” means the article _must_ contain at least one embed.
#' Default behavior (i.e. `NULL`) is to not filter by embedded.
#' @param engagement if not `NULL` then either "`medium`" or "`high`".
#' Using this parameter will limit results to articles that have the
#' specified engagement. Default behavior (i.e. `NULL`) is to not
#' filter by engagement.
#' @param count number of items to return (max 20 for "pro" users)
#' @param locale if not `NULL` then a Feedly-recognized locale string (see
#' `References`) to provide a hint to the search engine to return feeds
#' in that locale.
#' @param feedly_token Your Feedly Developer Access Token (see [feedly_access_token()])
#' @references (<https://developer.feedly.com/v3/search/>) & [Search Tutorial](https://feedly.uservoice.com/knowledgebase/articles/441699-power-search-tutorial)
#' @seealso feedly_search_title
#' @return list with a data frame element of `results`
#' @export
#' @examples
#' feedly_search_contents("data science")
feedly_search_contents <- function(query,
stream_id = NULL,
fields = "all",
embedded = NULL,
engagement = NULL,
count = 20L,
locale = NULL,
feedly_token = feedly_access_token()) {
ct <- as.integer(count[1])
if (ct < 1) ct <- 20L
if (ct > 20) ct <- 20L
query <- query[1]
if (length(fields) == 1) {
fields <- match.arg(fields, c("all", "title", "author", "keywords"))
} else {
fields <- match.arg(fields, c("title", "author", "keywords"), several.ok = TRUE)
fields <- paste0(fields, collapse=",")
}
if (!is.null(embedded)) {
embedded <- match.arg(embedded, c("audio", "video", "doc", "any"))
}
if (!is.null(engagement)) {
engagement <- match.arg(engagement, c("medium", "high"))
}
httr::GET(
url = "https://cloud.feedly.com/v3/search/feeds",
.seymour_ua,
if (!is.null(feedly_token)) {
httr::add_headers(
`Authorization` = sprintf("OAuth %s", feedly_token)
)
},
query = list(
stream_id = stream_id,
query = query,
fields = fields,
embedded = embedded,
engagement = engagement,
count = ct,
locale = locale
)
) -> res
httr::stop_for_status(res)
out <- httr::content(res, as="text")
out <- jsonlite::fromJSON(out)
if (length(out$results) > 0) {
if (nrow(out$results) > 0) {
class(out$results) <- c("tbl_df", "tbl", "data.frame")
}
}
out
}
|
d83e0ca792f484216aa595026c5877de521bc330
|
b6a4b68ec502322a8ba8a9151e67e818cd112cb8
|
/man/StudentRecord.Rd
|
bce5b53c9407ea88d1372376b8167944bb4c10b3
|
[] |
no_license
|
ralmond/EABN
|
ffd67e3ba2e112bf69e42ee5c60eb1e2ec1734c5
|
ff55aa44c756cb6157d907f66b7d54f33766c01c
|
refs/heads/master
| 2023-07-25T13:29:02.241959
| 2023-07-12T20:45:02
| 2023-07-12T20:45:02
| 240,610,408
| 1
| 1
| null | 2023-07-11T22:00:12
| 2020-02-14T22:36:52
|
R
|
UTF-8
|
R
| false
| false
| 3,589
|
rd
|
StudentRecord.Rd
|
\name{StudentRecord}
\alias{StudentRecord}
\title{Constructor for \code{StudentRecord} object}
\description{
This is the constructor for a \code{\linkS4class{StudentRecord}}
object. Basically, this is a wrapper around the studnet model for the
appropriate user, with meta-data about the evidence that has been
absorbed.
}
\usage{
StudentRecord(uid, context = "", timestamp = Sys.time(), smser = list(),
sm = NULL, stats = list(), hist = list(), evidence = character(),
app = "default", seqno = -1L, prev_id = NA_character_)
}
\arguments{
\item{uid}{A user identifier for the student/player.}
\item{context}{An identifer for the scoring context/window.}
\item{timestamp}{Timestamp of the last evidence set absorbed for this
user. }
\item{smser}{A serialized Bayesian network (see
\code{\link[Peanut]{WarehouseUnpack}}). }
\item{sm}{A \code{\link[Peanut]{Pnet}} containing the student model
(or \code{NULL} if it has not been initialized.}
\item{stats}{A list of statistics calculated for the model.}
\item{hist}{A list of node histories for the measured nodes.}
\item{evidence}{A character vector of ids for the absorbed evidence sets.}
\item{app}{A guid (string) identifying the application.}
\item{seqno}{A sequence number, basically a count of absorbed evidence
sets.}
\item{prev_id}{The database ID of the previous student model.}
}
\value{
An object of class \code{\linkS4class{StudentRecord}}.
}
\author{Russell Almond}
\seealso{
\code{\linkS4class{StudentRecord}}
}
\examples{
%PNetica%\dontrun{#Requires PNetica
library(PNetica)
##Start with manifest
sess <- RNetica::NeticaSession()
RNetica::startSession(sess)
## BNWarehouse is the PNetica Net Warehouse.
## This provides an example network manifest.
config.dir <- file.path(library(help="Peanut")$path, "auxdata")
netman1 <- read.csv(file.path(config.dir,"Mini-PP-Nets.csv"),
row.names=1, stringsAsFactors=FALSE)
net.dir <- file.path(library(help="PNetica")$path, "testnets")
Nethouse <- PNetica::BNWarehouse(manifest=netman1,session=sess,key="Name",
address=net.dir)
dsr <- StudentRecord("*DEFAULT*",app="ecd://epls.coe.fsu.edu/P4Test",
context="*Baseline*")
sm(dsr) <- WarehouseSupply(Nethouse,"miniPP_CM")
PnetCompile(sm(dsr))
## dsr <- updateStats(eng,dsr)
statmat <- read.csv(file.path(config.dir,"Mini-PP-Statistics.csv"),
stringsAsFactors=FALSE)
rownames(statmat) <- statmat$Name
statlist <- sapply(statmat$Name,function (st)
Statistic(statmat[st,"Fun"],statmat[st,"Node"],st))
names(statlist) <- statmat$Name
dsr@stats <- lapply(statlist,
function (stat) calcStat(stat,sm(dsr)))
names(dsr@stats) <- names(statlist)
stat(dsr,"Physics_EAP")
stat(dsr,"Physics_Margin")
## dsr <- baselineHist(eng,dsr)
dsr@hist <- lapply(c("Physics"),
function (nd)
EABN:::uphist(sm(dsr),nd,NULL,"*Baseline*"))
names(dsr@hist) <- "Physics"
history(dsr,"Physics")
## Serialization and unserialization
dsr.ser <- as.json(dsr)
dsr1 <- parseStudentRecord(jsonlite::fromJSON(dsr.ser))
dsr1 <- fetchSM(dsr1,Nethouse)
### dsr and dsr1 should be the same.
stopifnot(
app(dsr)==app(dsr1),
uid(dsr)==uid(dsr1),
context(dsr)==context(dsr1),
# problems with timezones
# all.equal(timestamp(dsr),timestamp(dsr1)),
all.equal(seqno(dsr),seqno(dsr1)),
all.equal(stats(dsr),stats(dsr1),tolerance=.0002),
all.equal(history(dsr,"Physics"),history(dsr1,"Physics")),
PnetName(sm(dsr)) == PnetName(sm(dsr))
)
%PNetica%}
}
\keyword{ graph }
|
6c43812c7fe429413f0b0895fe80e5ddead5e0cb
|
3ee04b4129e86c9218a34f402349649727baa646
|
/man/jtrace_install.Rd
|
efd58811ad1480d6593d21de7093e0041ca513a6
|
[
"MIT"
] |
permissive
|
gongcastro/jtracer
|
c34233cfcebba4dce8e7c5be72f09b626c3573ec
|
ed4126d5a6b92034182eb9e77d6c357453af34c5
|
refs/heads/master
| 2023-09-04T11:31:43.980588
| 2021-10-15T15:37:16
| 2021-10-15T15:37:16
| 365,167,721
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 644
|
rd
|
jtrace_install.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install.R
\name{jtrace_install}
\alias{jtrace_install}
\title{Download and install jTRACE}
\usage{
jtrace_install(overwrite = FALSE, quiet = FALSE, check_java = FALSE)
}
\arguments{
\item{overwrite}{Logical value indicating whether to replace an existing jTRACE folder, in case there is}
\item{quiet}{Should downloading progress not be shown?}
\item{check_java}{Should it be checked that Java is installed?}
}
\description{
Download and install jTRACE
}
\author{
Gonzalo Garcia-Castro \href{mailto:gonzalo.garciadecastro@upf.edu}{gonzalo.garciadecastro@upf.edu}
}
|
d63efdbd4e7f6ae9d2b11ea1e583eec0a7322cf9
|
6b629e8bc4bb0b1c93bb217cb218af5ae5e587c8
|
/gender_differences/read_in_data_gsh.R
|
15082f1a86f5770c2d033a00a1c16806124b47d3
|
[] |
no_license
|
DashaZhernakova/umcg_scripts
|
91b9cbffea06b179c72683145236c39f5ab7f8c2
|
1846b5fc4ae613bec67b2a4dd914733094efdb23
|
refs/heads/master
| 2023-08-31T10:45:17.057703
| 2023-08-23T14:47:43
| 2023-08-23T14:47:43
| 237,212,133
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,982
|
r
|
read_in_data_gsh.R
|
library(rprojroot)
library(tidyverse)
config_path <- "/groups/umcg-lifelines/tmp01/projects/ov20_0051/umcg-dzhernakova/gender_difs/v5/config.yml"
script_folder <- "/groups/umcg-lifelines/tmp01/projects/ov20_0051/umcg-dzhernakova/scripts/umcg_scripts/gender_differences/"
cat("script folder:", script_folder, "\n")
source(paste0(script_folder, "/preprocessing_gam_fitting_functions.R"))
source(paste0(script_folder, "/get_breakpoints.R"))
source(paste0(script_folder, "/additional_functions.R"))
source(paste0(script_folder, "/plotting_functions.R"))
cat("Using config file: ", config_path, "\n")
config <- config::get(file = config_path)
# save the config in results folder
file.copy(config_path, paste0(config$basedir_path, "configs/", config$output_fname, "_cfg.yml"), overwrite = T)
#
# Read data
#
traits_path <- paste0(config$basedir_path, "/", config$traits_path)
pheno_path <- paste0(config$basedir_path, "/", config$pheno_path)
cat("Data paths:\nphenotype traits:", traits_path, "\r\ncovariates:", pheno_path, "\noutput base folder:", config$basedir_path, "\n\n")
# read phenotype traits of interest
traits0 <- read.delim(traits_path, header = T, row.names = 1, sep = "\t", as.is = T, check.names = F)
traits <- sapply(traits0, function(x) as.numeric(as.character(x)))
row.names(traits) <- row.names(traits0)
traits2use <- unlist(strsplit(config$traits2use, ",")) # choose phenotypes to run the analysis for
if (length(traits2use) > 0) {
traits <- as.data.frame(traits[,traits2use, drop = F])
cat("Running the analysis only for a subset of phenotypes: ", paste(traits2use, collapse = ", "), "\n")
}
# read age, gender and other covariate phenotypes
pheno0 <- read.table(pheno_path, header = T, row.names = 1, sep = "\t", as.is = T, check.names = F)
# Covariates
covariateslinear <- unlist(strsplit(config$covariateslinear, ","))
covariatesnonlinear <- unlist(strsplit(config$covariatesnonlinear, ","))
if (length(covariateslinear) > 0) print(paste0("covariates to add as linear terms in the gam model:", paste(covariateslinear, collapse = ", ")))
if (length(covariatesnonlinear) > 0) print(paste0("covariates to add as spline non-linear terms in the gam model:", paste(covariateslinear, collapse = ", ")))
phenos2use <- unlist(strsplit(config$phenos2use, ","))
if (length(phenos2use) > 0) {
pheno0 <- pheno0[,c("age", "gender_F1M2", phenos2use)] #choose covariate phenotypes to select from the file
} else {
pheno0 <- pheno0[,c("age", "gender_F1M2", covariateslinear, covariatesnonlinear)]
}
pheno <- na.omit(pheno0)
#order samples in the two tables
traits_m <- traits[match(row.names(pheno), row.names(traits), nomatch = 0 ), , drop = F]
pheno_m <- pheno[match(row.names(traits_m), row.names(pheno), nomatch = 0), ]
all(row.names(traits_m) == row.names(pheno_m))
num_traits <- ncol(traits_m)
#traits_m <- traits_m[order(pheno_m$age), , drop = F]
#pheno_m <- pheno_m[order(pheno_m$age),]
cat("Number of available phenotypes: ", num_traits, "\n")
cat("Number of shared samples: ", nrow(traits_m), "\n")
covariates_before <- unlist(strsplit(config$covariates_before, ","))
if (length(covariates_before) > 0){
print(paste0("Correcting for covariates using linear regression before gam fitting: ", paste(covariates_before, collapse = ", ")))
traits_m <- correct_for_covariates_before(traits_m, pheno_m, covariates_before)
}
pheno_table <- NULL
if ("phenotype_table" %in% names(config)){
pheno_table <- read.delim(config$phenotype_table, sep = "\t", as.is = T, check.names = F)
}
#
# Other parameters
#
nplotspp = config$n_plots_ppage
n_points = config$n_age_points
min_age = config$min_age
max_age = config$max_age
make_plots = config$make_plots
add_breakpoints = config$add_breakpoints
add_inter_p_to_plot = config$add_inter_p_to_plot
plot_title = config$plot_title
outlier_correction = config$outlier_correction_method
outlier_correction_method <- config$outlier_correction_method
log_transform = config$log_transform
scale_transform = config$scale_transform
gam_family = config$gam_family
split_by_covariate = config$split_by_covariate
highlight_positive_in_split = config$highlight_positive_in_split
ttest_cutoff <- config$breakpoints_ttest_cutoff
deriv_cutoff <- config$breakpoints_derivates_cutoff
interp_cutoff <- ifelse("interp_cutoff" %in% names(config), config$interp_cutoff, 0.05)
write_fitted <- ifelse("write_fitted" %in% names(config), config$write_fitted, F)
plot_points <- ifelse("plot_points" %in% names(config), config$plot_points, T)
runCV <- ifelse("run_cross_validation" %in% names(config), config$run_cross_validation, F)
ymax_hist <- ifelse("ymax_hist" %in% names(config), config$ymax_hist, 1)
if ("pheno_to_log" %in% names(config)){
pheno_to_log <- unlist(strsplit(config$pheno_to_log, ","))
} else {
pheno_to_log <- character(0)
}
cat("Phenotypes to log-transform: ", pheno_to_log, "\n")
|
581119c39f2871f9164ae9eac5f81a5abe722e1b
|
2448d4800d4336b53489bcce3c17a32e442a7716
|
/tests/testthat/infrastructure/tests/testthat.R
|
b95508e03b2c528a3f7d5112926ec36710dfa649
|
[] |
no_license
|
vsbuffalo/devtools
|
17d17fd1d2fb620fef8d9883dffed389f80e39fb
|
782e6b071d058eea53aae596a3c120d61df2f0b4
|
refs/heads/master
| 2020-12-24T10:41:24.637105
| 2016-02-18T14:03:05
| 2016-02-18T14:03:05
| 52,121,375
| 2
| 0
| null | 2016-02-19T22:42:43
| 2016-02-19T22:42:43
| null |
UTF-8
|
R
| false
| false
| 72
|
r
|
testthat.R
|
library(testthat)
library(infrastructure)
test_check("infrastructure")
|
e91bb0f368f8c9517f5a4c1ead0da93b0f6ad9bb
|
b742c81dc1128901fbd352c7ecd9a1378c8357ac
|
/checks/mfa_num.R
|
eac930d793370b51bbb6f675f47710e223fc98e8
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
arosas5/prince
|
69e9b69e39dab53884721fec848a4a190136f7d0
|
b05034fffd177ea75d38ab785eacc80a813cbfb6
|
refs/heads/master
| 2023-08-23T15:20:38.998656
| 2021-11-03T23:01:38
| 2021-11-03T23:01:38
| 424,382,589
| 0
| 0
|
MIT
| 2021-11-03T22:57:48
| 2021-11-03T21:14:40
|
Python
|
UTF-8
|
R
| false
| false
| 453
|
r
|
mfa_num.R
|
library(FactoMineR)
data(wine)
X <- wine[,c(3:31)]
mfa <- MFA(X, group=c(5,3,10,9,2), type=rep("s",5), ncp=5, name.group=c("olf","vis","olfag","gust","ens"), graph=FALSE)
print(mfa$global.pca$eig[1:5,])
print("---")
print("U")
print(mfa$global.pca$svd$U[1:5,])
print("---")
print("V")
print(mfa$global.pca$svd$V[1:5,])
print("---")
print("s")
print(mfa$global.pca$svd$vs)
print("---")
print("Row coords")
print(mfa$ind$coord[1:5,])
print("---")
|
b3780196bb7d9a2a35e710edda4ae9c48836ba98
|
c555092c911699a657b961a007636208ddfa7b1b
|
/man/ggplotGrob.Rd
|
e96dde94fd9c3b4387322415a480e63c6ba62dae
|
[] |
no_license
|
cran/ggplot2
|
e724eda7c05dc8e0dc6bb1a8af7346a25908965c
|
e1b29e4025de863b86ae136594f51041b3b8ec0b
|
refs/heads/master
| 2023-08-30T12:24:48.220095
| 2023-08-14T11:20:02
| 2023-08-14T12:45:10
| 17,696,391
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 309
|
rd
|
ggplotGrob.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-build.R
\name{ggplotGrob}
\alias{ggplotGrob}
\title{Generate a ggplot2 plot grob.}
\usage{
ggplotGrob(x)
}
\arguments{
\item{x}{ggplot2 object}
}
\description{
Generate a ggplot2 plot grob.
}
\keyword{internal}
|
4329cc973c0878a4df02cff34970f0757908f438
|
744080600e2df9d50b27fde5790bc2ddddad61a4
|
/server.R
|
018ba98338d06249cf1b96aa5dc06fd04dbba202
|
[] |
no_license
|
JC-chen0/IEEE-fraud-detection-kaggle
|
3b10b889e9f7f32d64a7ed6eda324d1095d70521
|
fe2167125b0562f7a3f585ec2ef073fdae50a8a9
|
refs/heads/master
| 2023-02-18T21:28:11.817179
| 2021-01-11T18:25:26
| 2021-01-11T18:25:26
| 327,829,522
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,099
|
r
|
server.R
|
library(shiny)
library(rsconnect)
transction <- read.csv('train-2.csv', stringsAsFactors = FALSE)
identity <- read.csv('identity.csv', stringsAsFactors = FALSE)
# Define server logic required to generate and plot a random distribution
server <- function(input, output) {
transction2 = transction[sample(nrow(transction), 50), ]
output$mytable1 <- DT::renderDataTable({
DT::datatable(transction2, options = list(lengthMenu = c(5, 30, 50), pageLength = 5))
})
identity2 = identity[sample(nrow(identity), 50), ]
output$mytable2 <- DT::renderDataTable({
DT::datatable(identity2, options = list(lengthMenu = c(5, 30, 50), pageLength = 5))
})
output$structure1 <- renderPrint({
str(transction)
})
output$structure2 <- renderPrint({
str(identity)
})
output$fraud <- renderImage({
list(src = 'image1.jpeg')
}, deleteFile = FALSE)
output$model1 <- renderImage({
list(src = 'model1.jpeg',
width = 500,
height = 500)
}, deleteFile = FALSE)
output$model2 <- renderImage({
list(src = 'model2.jpeg',
width = 500,
height = 500)
}, deleteFile = FALSE)
output$model3 <- renderImage({
list(src = 'model3.jpeg',
width = 500,
height = 500)
}, deleteFile = FALSE)
model <- c("KNN", "Naviebayes", "Null", "LGB")
accuracy <- c("0.9613", "0.774", "0.9425", "0.982")
precision <- c("0","0.982","0.0314","0.7716")
sensitivity <- c("0","0.78","0.234","0.033")
speficity <- c("0.9615","0.615","0.998","0.969")
recall <- c("0","0.78","0.235","0.0337")
F1 <- c("0","0.869","0.36","0.0325")
kappa <- c("-0.00059","0.105","0.3536","0.0029")
df <- data.frame(model=model,
accuracy=accuracy,
precision=precision,
sensitivity=sensitivity,
speficity=speficity,
recall=recall,
F1=F1,
kappa=kappa)
output$df <- DT::renderDataTable({
DT::datatable(df)
})
}
|
096d134133351fd7a8f0d41286a23936c09cdc61
|
0e41cfa523fc0f183d49557027656ceae25d33fb
|
/plot2.R
|
52f01b76bbd738a1e48f1632fd08c709cb7645c8
|
[] |
no_license
|
MadApe/ExData_Plotting1
|
a597d643ed0ec7cb7fb34fbd1f5e0e8f2c35f80e
|
f49c21291d4afb240553e644d40c40c29db8ab3c
|
refs/heads/master
| 2021-01-24T00:43:51.575565
| 2018-02-25T05:22:12
| 2018-02-25T05:22:12
| 122,777,346
| 0
| 0
| null | 2018-02-24T20:32:11
| 2018-02-24T20:32:11
| null |
UTF-8
|
R
| false
| false
| 2,231
|
r
|
plot2.R
|
# load libraries
library(data.table)
# initialize source and destination variables of the data files
wd <- getwd()
data_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
data_dir <- file.path(wd, "data/")
data_zip <- file.path(data_dir, "household_power_consumption.zip")
data_txt <- file.path(data_dir, "household_power_consumption.txt")
plot_png <- file.path(wd, "plot2.png")
# initialize the observation start/end date variables
observation_start <- strptime("01/02/2007", "%d/%m/%Y")
observation_end <- strptime("02/02/2007", "%d/%m/%Y")
# create a directory for the data if one doesn't exist
if (!file.exists(data_dir)) {
dir.create(data_dir)
}
# download the data file if it isn't already there
if (!file.exists(data_zip)) {
cat("Downloading data zip file ...\n")
download_date <- Sys.time()
download.file(data_url, data_zip)
}
# unzip the file if the download is there and hasn't been unzipped
if (file.exists(data_zip) & !file.exists(data_txt)) {
cat("Unzipping the data zip file ...\n")
unzip(zipfile = data_zip, exdir = data_dir)
}
# read the file into a data.table
cat("Reading the data...\n")
dt <- fread(data_txt, header = TRUE, na.strings = "?", stringsAsFactors = FALSE)
# subset the data.table to include only the date range in which we are interested
cat("Subsetting the data...\n")
power_dt <- subset(dt, strptime(Date, "%d/%m/%Y") >= observation_start & strptime(Date, "%d/%m/%Y") <= observation_end)
# create a vector of datetimes by pasting the Date and Time fields together and converting using strptime
datetimes <- with(power_dt, strptime(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S"))
# bind the datetimes column to the power_dt data table
power_dt <- cbind(datetimes, power_dt)
# open the PNG Graphic Device and set the size
cat("Plotting the data...\n")
png(plot_png, units = "px", width = 480, height = 480)
# create a line chart of Global Active Power over time and set the y-axis label appropriately
with(power_dt, plot(datetimes, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
# close the PNG graphic device
dev.off()
cat("Complete!\nPlot file located at: ", plot_png, "\n", sep = "")
|
cb6e2a1c0de2afee3c4f1eca2f5dff26b6d78ad1
|
fa32c05f7b8cdcefd719e23001c52ee6a3e59015
|
/initial_EDA.R
|
c0e8bee16be1c3ecb2e584a7b6e38558c22c5f5b
|
[] |
no_license
|
atthegates25/ML_Lab
|
a02ae652a6bed9656fc355af603ed5d65f2fa5b3
|
43ea36b5fe5132f57113aa8dc1720b0f5a4810bd
|
refs/heads/master
| 2020-03-26T12:22:24.740750
| 2018-09-28T05:02:44
| 2018-09-28T05:02:44
| 144,889,046
| 0
| 3
| null | 2018-08-16T00:36:41
| 2018-08-15T18:27:42
|
R
|
UTF-8
|
R
| false
| false
| 4,128
|
r
|
initial_EDA.R
|
library(data.table)
orders = fread('../../data/Orders.csv', stringsAsFactors = T)
returns = fread('../../data/Returns.csv', stringsAsFactors = T)
names(orders) # check column names
names(returns) # check column names
names(returns)[names(returns)=='Order ID']='Order.ID' # rename "Order ID" to Order.ID
orders$Sales = as.numeric(gsub('[$,]','',as.character(orders$Sales))) # convert from factor to numeric
orders$Profit = as.numeric(gsub('[$,]','',as.character(orders$Profit))) # convert from factor to numeric
orders$Order.Date = as.Date(as.character(orders$Order.Date),format = '%m/%d/%y') # convert from factor to date
orders$Ship.Date = as.Date(as.character(orders$Ship.Date),format = '%m/%d/%y') # convert from factor to date
orders$Month = as.factor(month(orders$Order.Date)) # add column for month of order date
orders$Year = as.factor(year(orders$Order.Date)) # add column for year of order date
str(orders) # inspect structure
summary(orders)
levels(orders$Customer.Name)[grep('Kevin',levels(orders$Customer.Name))]
str(returns)
apply(orders,MARGIN=2,function(c) sum(is.na(c))) # see which columns have NAs
head(orders)
library(tidyverse)
# plot decrease in total inventory by month grouping by year
orders %>%
group_by(., Month, Year) %>%
summarise(., Decrease_Inventory = sum(Quantity)) %>%
ggplot(., aes(x=Month,y=Decrease_Inventory)) + geom_bar(aes(fill=Year), stat='identity', position = 'dodge')
# plot average yearly decrease in inventory by month and category
orders %>%
group_by(., Month, Year, Category) %>%
summarise(., Decrease_Inventory = sum(Quantity)) %>%
group_by(., Month, Category) %>%
summarise(., Avg_Decrease_Inventory=mean(Decrease_Inventory)) %>%
ggplot(., aes(x=Month,y=Avg_Decrease_Inventory)) + geom_bar(aes(fill=Category), stat='identity', position = 'dodge')
# merge order and return data frames by order id and region
orders_returns = merge(x=orders, y=returns, by = c('Order.ID','Region'), all.x = T)
# plot total profit lost from returns by year
orders_returns %>%
filter(., Returned=='Yes') %>%
group_by(., Year) %>%
summarise(., Profit_Lost = sum(Profit)) %>%
ggplot(., aes(x=Year,y=Profit_Lost)) + geom_bar(aes(fill=Year),stat='identity')
# number of customers who returned more than once
orders_returns %>%
filter(., Returned=='Yes') %>%
group_by(., Customer.ID, Order.ID) %>%
summarise(.) %>%
group_by(., Customer.ID) %>%
summarise(., num_returns=n()) %>%
filter(., num_returns > 1) %>%
nrow(.)
# num customers who returned more than 5 times
orders_returns %>%
filter(., Returned=='Yes') %>%
group_by(., Customer.ID, Order.ID) %>%
summarise(.) %>%
group_by(., Customer.ID) %>%
summarise(., num_returns=n()) %>%
filter(., num_returns > 5) %>%
nrow(.)
# regions most likely to return an order (by num returns)
orders_returns %>%
filter(., Returned=='Yes') %>%
group_by(., Region, Order.ID) %>%
summarise(.) %>%
group_by(., Region) %>%
summarise(., num_returns=n()) %>%
arrange(., desc(num_returns))
# regions most likely to return an order (by return %)
orders_returns %>%
mutate(., Return_Yes=ifelse(is.na(Returned),0,1)) %>%
group_by(., Region, Order.ID) %>%
summarise(., return=mean(Return_Yes)) %>%
group_by(., Region) %>%
summarise(., pct_return=sum(return)/n()) %>%
arrange(., desc(pct_return))
# category/sub.category most likely to be returned (by % of orders returned)
orders_returns %>%
mutate(., Return_Yes=ifelse(is.na(Returned),0,1)) %>%
group_by(., Category, Sub.Category, Order.ID) %>%
summarise(., return=mean(Return_Yes)) %>%
group_by(., Category, Sub.Category) %>%
summarise(., pct_return=sum(return)/n()) %>%
arrange(., desc(pct_return))
# category/sub.category most likely to be returned (by % of quantity returned)
orders_returns %>%
mutate(., Return_Yes=ifelse(is.na(Returned),0,1)) %>%
group_by(., Category, Sub.Category) %>%
summarise(., pct_returned = sum(Return_Yes)/n()) %>%
arrange(., desc(pct_returned))
|
99b48a73d234aee7e0cf640de643d5a110362eb2
|
2cb5dbfc14e6e24eeed4e846a0aaec35506547e3
|
/man/loadcsv_multi.Rd
|
470a0d57ab2b4f4fdfe5436a68c610fedbf00443
|
[] |
no_license
|
cran/easycsv
|
1e0cbb4fed5da0855b63e8abb21df167699bc351
|
c1c711c67d397ba2f5bbf9b686d58e4811fade8a
|
refs/heads/master
| 2021-01-15T12:37:08.933963
| 2018-05-21T18:03:30
| 2018-05-21T18:03:30
| 99,650,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,464
|
rd
|
loadcsv_multi.Rd
|
\name{loadcsv_multi}
\alias{loadcsv_multi}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
read multiple csv files into named data frames
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Reads multiple files in table format and creates a data frame from them, with cases corresponding to lines and variables to fields in the file.
}
\usage{
loadcsv_multi(directory = NULL,
extension = "CSV",
encoding = "Latin-1",
stringsAsFactors = FALSE,
header = TRUE,
quote = "\"",
fill = TRUE,
comment.char = "")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{directory}{
%% ~~Describe \code{x} here~~
a directory to load the files from, if NULL then a manual choice is provided on windows OS.
}
\item{extension}{
logical. if TRUE .txt files will be loaded as tables instead of .csv.
}
\item{encoding}{
character. files encoding. default is Latin-1
}
\item{stringsAsFactors}{
logical: should character vectors be converted to factors? Note that this is overridden by as.is and colClasses, both of which allow finer control.
}
\item{header}{
a logical value indicating whether the files contain the names of the variables as its first line. If missing, the value is determined from the file format: header is set to TRUE if and only if the first row contains one fewer field than the number of columns.
}
\item{quote}{
the set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless colClasses is specified.
}
\item{fill}{
logical. If TRUE then in case the rows have unequal length, blank fields are implicitly added.
}
\item{comment.char}{
character: a character vector of length one containing a single character or an empty string. Use "" to turn off the interpretation of comments altogether.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
loadcsv_multi is used for uncompressed files in a single folder.it can be used either by entering the local directory the files are in, or just running it with no arguments for manual folder selection on windows OS.
It receives some arguments from read.csv and they are listed in the arguments section.
loadcsvfromZIP is used for comma separated tables inside of a .zip file.
loadZIPcsvfromURL is used for comma separated tables inside of a .zip file on the internet, no download needed.
}
\value{
A \link[base]{data.frame} containing a representation of the data in the file.
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\link[easycsv]{loadZIPcsvfromURL}
\link[easycsv]{loadcsvfromZIP}
}
\examples{
require(easycsv)
directory = getwd()
table1 <- data.frame(matrix(1:9, nrow = 3))
write.csv(table1, file = file.path(directory,"/table1.csv"))
write.csv(table1, file = file.path(directory,"/table2.txt"))
loadcsv_multi(directory, extension = "BOTH")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~utilities }
\keyword{ ~misc }
|
6c38895c3e4facb51dfee4674be4dd4b00a92fd9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DGVM3D/examples/triClose.Rd.R
|
7dfb525e82431cba7b00d18fa5a35ea6f577df90
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,289
|
r
|
triClose.Rd.R
|
library(DGVM3D)
### Name: triClose
### Title: fill a polygon (number of vertices) with triangles
### Aliases: triClose
### ** Examples
par(mfrow=c(2,2))
for (m in c("plan", "fix", "center", "")) {
faces <- sample(12:20, 1)
vertices <- sapply(seq(0, 2*pi*(faces-1)/faces, length.out=faces),
function(x){c(sin(x), cos(x))})
tri = triClose(faces, method=m)
if (m == "center") {
tri[is.na(tri)] = faces + 1
vertices = cbind(vertices, c(mean(vertices[1,]), mean(vertices[2, ])))
}
plot(vertices[1,1:faces], vertices[2,1:faces], type="b")
text(x=1.05*vertices[1,], y=1.05*vertices[2,], labels=1:faces, adj=0.5)
for (i in seq(1, length(tri), 3))
polygon(vertices[1,tri[i:(i+2)]], vertices[2,tri[i:(i+2)]],
col=rgb(runif(1), runif(1), runif(1)))
}
par(mfrow=c(2,2))
for (faces in c(6, 12, 13, 25)) {
vertices <- sapply(seq(0, 2*pi*(faces-1)/faces, length.out=faces),
function(x){c(sin(x), cos(x))})
tri = triClose(faces, method=m)
plot(vertices[1,], vertices[2,], type="b")
text(x=1.05*vertices[1,], y=1.05*vertices[2,], labels=1:faces, adj=0.5)
for (i in seq(1, length(tri), 3))
polygon(vertices[1,tri[i:(i+2)]], vertices[2,tri[i:(i+2)]],
col=rgb(runif(1), runif(1), runif(1)))
}
|
988bf261ec79c2426c248bd9e7791db0143c5911
|
b4cbfd634adf53ffc75a51eeec93e41c5ba4f5ac
|
/classifier/naive_bayes.R
|
eab39b5804012bd84744c0279d1f560182edd3a0
|
[] |
no_license
|
riskimidiw/tripadvisor-sentimentr
|
d4be9b8b7e7f008b77890f3e0e59764efb8c8e7f
|
6efe566da53d42265183e52f52d8fcd44cc088d1
|
refs/heads/master
| 2022-09-21T11:19:38.896724
| 2020-06-05T00:30:10
| 2020-06-05T00:30:10
| 268,198,441
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,910
|
r
|
naive_bayes.R
|
#Import package
library(dplyr)
library(tidyverse)
library(tm)
library(e1071)
library(caret)
features_rds_path = "classifier/features.rds"
naive_bayes_rda_path = "classifier/naive_bayes.rda"
# Membersihkan data dan merubah data menjadi bentuk corpus
clean_data <- function(data) {
corpus <- VCorpus(VectorSource(data))
corpus_clean <- tm_map(corpus, content_transformer(tolower))
corpus_clean <- tm_map(corpus_clean, removeNumbers)
corpus_clean <- tm_map(corpus_clean, removeWords, stopwords())
corpus_clean <- tm_map(corpus_clean, removePunctuation)
corpus_clean <- tm_map(corpus_clean, stripWhitespace)
return(corpus_clean)
}
# Menerapkan features dan mengubah data menjadi document term matrix
apply_feature <- function(corpus, features) {
dtm <- DocumentTermMatrix(corpus, control = list(dictionary = features))
return(apply(dtm, 2, convert_count))
}
# Mengubah jumlah kemunculan kata menjadi "Yes" dan "No"
convert_count <- function(x) {
y <- ifelse(x > 0, 1,0)
y <- factor(y, levels=c(0,1), labels=c("No", "Yes"))
return(y)
}
# Traning naive bayes model
train_model <- function() {
# Membaca training dataset
file_path <- "dataset/tripadvisor-restauran-traning-dataset.txt"
data.source <- read_delim(file_path, delim = "\t")
# Menambahkan kolom kelas pada data frame
data.source$sentiment <- ifelse(data.source$score > 0, "Positive", "Negative")
# Mengubah data menjadi factor
data.source$sentiment <- as.factor(data.source$sentiment)
# Mengacak data agar tidak berurutan
set.seed(1)
data.source <- data.source[sample(nrow(data.source)),]
# Pembersihan data
data.corpus <- clean_data(data.source$review)
# Mengubah data corpus menjadi document term matrix
data.dtm <- DocumentTermMatrix(data.corpus)
# Rasio perbandingan antara data training dengan data testing
training_ratio = 0.8
# Memecah data menjadi data training dan data testing
data.source.total <- nrow(data.source)
data.source.train <- data.source[1 : round(training_ratio * data.source.total),]
data.source.test <- data.source[(round(training_ratio * data.source.total) + 1) : data.source.total,]
data.corpus.total <- length(data.corpus)
data.corpus.train <- data.corpus[1 : round(training_ratio * data.corpus.total)]
data.corpus.test <- data.corpus[(round(training_ratio * data.corpus.total) + 1) : data.corpus.total]
data.dtm.total <- nrow(data.dtm)
data.dtm.train <- data.dtm[1 : round(training_ratio * data.dtm.total),]
data.dtm.test <- data.dtm[(round(training_ratio * data.dtm.total) + 1) : data.dtm.total,]
# Mengambil kata yang sering muncul, minimal 3 kali
freq_terms <- findFreqTerms(data.dtm.train, 3)
length(freq_terms)
# Save features yang sudah dibuat
saveRDS(freq_terms, file = features_rds_path)
# Mengaplikasikan fungsi convert_count untuk mendapatkan hasil training dan testing DTM
data.dtm.train <- apply_feature(data.corpus.train, freq_terms)
data.dtm.test <- apply_feature(data.corpus.test, freq_terms)
# Membuat model naive bayes
model <- naiveBayes(data.dtm.train, data.source.train$sentiment, laplace = 1)
# Save Model yang sudah dibuat agar bisa dipakai di Shiny
save(model, file = naive_bayes_rda_path)
# Membuat prediksi
prediction <- predict(model, newdata = data.dtm.test)
# Mengecek akurasi dari model yang telah dibuat
result <- confusionMatrix(table(Prediction = prediction, Actual = data.source.test$sentiment))
result
}
# Prediksi sentimen
predict_sentiment <- function(review) {
features <- readRDS(features_rds_path)
model <- get(load(naive_bayes_rda_path))
data.corpus <- clean_data(review)
data.test <- apply_feature(data.corpus, features = features)
prediction <- predict(model, newdata = data.test)
return(data.frame(review = review, sentiment = prediction))
}
# Hapus komentar untuk traning data
# train_model()
|
79e602a5cb9fed9f8251f09729f4ca1657be4771
|
4c72e92a6fd6a2830ac7513bb7de071bb6cd6eb5
|
/GoogleChartDemo_global.R
|
7b51c23258a6a0283d24d08c3f60f6e36d7441d9
|
[] |
no_license
|
ATLAS-CITLDataAnalyticsServices/ShinyDataVisualization
|
79bc1648d13bae66c9dbb0a3197fbcd702bdac29
|
94121c3382db8a3c3842aa729027c71bbc7b93b9
|
refs/heads/master
| 2021-01-09T05:27:13.834978
| 2017-02-02T21:46:43
| 2017-02-02T21:46:43
| 80,771,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,310
|
r
|
GoogleChartDemo_global.R
|
##############################################
# CITL Analytics Winter Project 2016-2017 #
# Liqun Zeng #
# #
# Data Visualization: #
# Shiny Google Charts #
# #
# Using Coursera Practice Click Stream Data #
##############################################
# Install:
# install.packages("stringr")
# install.packages('plyr')
library(stringr)
setwd("~/Dropbox/RA_CITL/WinterProject/clickStream01")
NewData <- read.csv("NewData.csv")
NewData2 <- NewData[!is.na(NewData$timecode),]
NewData2$video_name <- str_trim(NewData2$video_name, side = "both")
attach(NewData2)
KeyForNewData2 = cbind(aggregate(key=="download_subtitle", by=list(NewData2$illinois_user_id, NewData2$video_name), sum),
aggregate(key=="end", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="heartbeat", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="pause", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="play", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="playback_rate_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="seek", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="start", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="subtitle_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="volume_change", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3],
aggregate(key=="wait", by=list(NewData2$illinois_user_id, NewData2$video_name), sum)[,3])
names(KeyForNewData2) = c("UserID","Video", "Delete", "end", "heartbeat","pause","play","playback_rate_change","seek","start",
"subtitle_change","volume_change","wait")
detach(NewData2)
KeyForNewData2 = KeyForNewData2[,-3]
KeyForNewData2$Secs = KeyForNewData2$heartbeat * 5
###################################################
## select 12 videos for the example
## and transform the format of the data
# select 12 videos
video.list <- unique(KeyForNewData2$Video)[9:20]
# select observations from the 12 videos
KeyForNewData2.12 <- KeyForNewData2[sapply(KeyForNewData2$Video,function(x) any(video.list==x)),]
# transform the data into datasets for each second:
attach(KeyForNewData2.12)
KeyForNewData2.12 <- KeyForNewData2.12[order(Secs),]
secs.list <- sort(unique(Secs))
transformData <- function(x) {
click <- as.vector(t(data.matrix(x[,c(3,5:12)])))
data <- data.frame(user=rep(x$UserID,each=9),
#status=rep(c("end","pause","play","playback_rate_change","seek","start",
# "subtitle_change","volume_change","wait"),length(x$Video)),
status=rep(1:9,length(x$Video)),
click,
video=rep(x$Video,each=9))
data.big <- data.frame(video=video.list)
data <- merge(data,data.big,by="video",all=TRUE)
data[,5] <- data[,1]
data <- data[,2:5]
data[is.na(data)] <- 0
names(data) <- c("user","status","click","video")
return(data)
}
data.list=list()
for(i in 1:length(secs.list)) {
data.list[[i]] <- KeyForNewData2.12[Secs==secs.list[i],]
data.list[[i]] <- transformData(data.list[[i]])
data.list[[i]] <- data.list[[i]][order(as.character(data.list[[i]]$video)),]
}
names(data.list) <- as.character(secs.list)
detach(KeyForNewData2.12)
### Overview of the datasets and variables used in this script:
#names(NewData2)
#[1] "X" "illinois_user_id" "key" "timecode" "video_name"
#unique(NewData2$key)
#[1] heartbeat seek play wait pause
#[6] start download_subtitle end volume_change playback_rate_change
#[11] subtitle_change download_video
|
e0175ce92e39b50a8d15447322da60e9df30c520
|
3d2dd369a1beb4ae1886ac0347eadcca6905020b
|
/tests/testthat.R
|
2d8c5da5e2e1fe26017a50b65547c6b08593a5c2
|
[
"MIT"
] |
permissive
|
sstoeckl/pensionfinanceLi
|
1f5c501644d26cb293f7e9ad7d18ea1f90d420e9
|
ba9be9cee4381b766ac41e719257ec603d584c5c
|
refs/heads/master
| 2021-07-05T08:26:44.942479
| 2020-11-30T14:39:57
| 2020-11-30T14:39:57
| 207,890,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
testthat.R
|
library(testthat)
library(pensionfinanceLi)
test_check("pensionfinanceLi")
|
9f021203619fc776a14ad17b46a248b379ce1e69
|
8e503e16eba5103da436c67a684360b013e8f78d
|
/Final_Project_Files/sentiment_classification.R
|
e618fb8684d922b1a44567f91a82d8270ca9cec1
|
[] |
no_license
|
adamsjt13/Stock-Sentiment
|
10178629a2b6f91fef2eac6314657219d53a2761
|
29592c973831cea6415432e412e8bdf5a830a4cf
|
refs/heads/master
| 2020-04-14T18:32:24.414700
| 2019-01-03T21:23:11
| 2019-01-03T21:23:11
| 164,022,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,909
|
r
|
sentiment_classification.R
|
rm(list = ls())
setwd("~/Documents/BZAN_583_Text_Mining/FinalProject/stocks/Final_Project_Files")
intel_news <- read.csv("articles_for_intel.csv", stringsAsFactors = FALSE)
intel_stock_data <- read.csv("INTL_stock_data.csv")
apple_news <- read.csv("articles_for_apple.csv", stringsAsFactors = FALSE)
apple_stock_data <- read.csv("AAPL_stock_data.csv")
facebook_news <- read.csv("articles_for_facebook.csv", stringsAsFactors = FALSE)
facebook_stock_data <- read.csv("FB_stock_data.csv")
############ build classifier ############
require(stringr)
require(rvest)
require(ngram)
require(tm)
require(SnowballC)
require(AUC)
require(e1071)
require(randomForest)
### combine all company articles
combined_articles <- rbind(intel_news,apple_news,facebook_news)
### remove date
combined_articles$Date <- NULL
### removed quotes
combined_articles$Article <- gsub('"', '', combined_articles$Article)
### create tags from sentiment score for classifier
combined_articles$Sentiment <- ifelse(combined_articles$Sentiment >= 0, 'pos','neg')
### convert to ASCII to remove weird symbols
combined_articles$Article <- iconv(combined_articles$Article, to = "ASCII//TRANSLIT")
### make corpus of symptom text for clustering
articles_corp <- Corpus(VectorSource(combined_articles$Article))
### clean corpus and make DTM
dtm <- DocumentTermMatrix(articles_corp, control=list(removePunctuation = TRUE,
removeNumbers = TRUE,
tolower = TRUE,
stemming = TRUE,
stopwords = stopwords("SMART"),
minDocFreq=1,
minWordLength = 1))
### Reduce sparse terms
dtm_dense <- removeSparseTerms(dtm, 0.99)
### Weighting Terms by TF-IDF
dtm_tfxidf <- suppressWarnings(weightTfIdf(dtm_dense))
# trainind <- allind[1:round(length(allind)/3)]
# valind <- allind[(round(length(allind)/3)+1):round(length(allind)*(2/3))]
# testind <- allind[round(length(allind)*(2/3)+1):length(allind)]
### basetable
basetable <- as.matrix(dtm_tfxidf)
### class
y <- factor(combined_articles$Sentiment)
results_df <- data.frame("SVM AUC" = rep(0,5),
"SVM ACC" = rep(0,5),
"SVM CORRECT" = rep(0,5),
"NB AUC" = rep(0,5),
"NB ACC" = rep(0,5),
"NB CORRECT" = rep(0,5),
"RF AUC" = rep(0,5),
"RF ACC" = rep(0,5),
"RF CORRECT" = rep(0,5))
rownames(results_df) <- c("80/20","70/30","5Fold","10Fold","15Fold")
################### 80/20 split ###################
bins <- cut(seq(1,nrow(basetable)),breaks=10,labels=FALSE)
allind <- sample(x=1:nrow(basetable),size=nrow(basetable))
trainind <- allind[which(bins %in% 1:4)]
valind <- allind[which(bins %in% 5:8)]
testind <- allind[which(bins %in% 9:10)]
basetabletrain <- basetable[trainind,]
basetableval <- basetable[valind,]
basetabletest <- basetable[testind,]
basetabletrainbig <- rbind(basetabletrain,basetableval)
ytrain <- y[trainind]
yval <- y[valind]
ytest <- y[testind]
ytrainbig <- factor(c(as.character(ytrain),as.character(yval)))
### SVM 80/20 split
SV.cost <- 2^(-5:-4)
SV.gamma <- 2^(-15:-14)
SV.degree <- c(1,2)
SV.kernel <- c('polynomial')
parameters <- expand.grid("Cost" = SV.cost,
"Gamma" = SV.gamma,
"Degree" = SV.degree,
"Kernel" = SV.kernel)
aucstore <- numeric(nrow(parameters))
for(i in 1:nrow(parameters)){
start <- Sys.time()
model <- svm(basetabletrain,
ytrain,
type = "C-classification",
probability = TRUE,
kernel = parameters$Kernel[i],
degree = parameters$Degree[i],
cost = parameters$Cost[i],
gamma = parameters$Gamma[i])
pred_prob <- predict(model, basetableval, decision.values = TRUE, probability = TRUE)
print(i)
aucstore[i] <- AUC::auc(roc(pred_prob,yval))
}
optimal <- parameters[which.max(aucstore),]
model <- svm(basetabletrainbig,
ytrainbig,
type = "C-classification",
probability = TRUE,
kernel = optimal$Kernel,
degree = optimal$Degree,
cost = optimal$Cost,
gamma = optimal$Gamma)
pred_prob <- predict(model, basetabletest, decision.values = TRUE, probability = TRUE)
x <- table(pred_prob,ytest)
(svm_auc <- AUC::auc(roc(pred_prob,ytest)))
results_df["80/20","SVM.AUC"] <- AUC::auc(roc(pred_prob,ytest))
results_df["80/20","SVM.ACC"] <- sum(diag(x))/sum(x)
results_df["80/20","SVM.CORRECT"] <- sum(diag(x))
### NB
allind <- sample(x=1:nrow(basetable),size=nrow(basetable))
trainind <- allind[which(bins %in% 1:8)]
testind <- allind[-trainind]
basetabletrain <- basetable[trainind,]
basetabletest <- basetable[testind,]
ytrain <- y[trainind]
ytest <- y[testind]
NB <- naiveBayes(x=basetabletrain, y=ytrain)
predNB <- predict(NB,basetabletest, type = "class", threshold = 0.001)
NB_table <- table(predNB,ytest)
nb_accuracy <- sum(diag(NB_table)) / sum(NB_table)
(nb_auc <- AUC::auc(roc(predNB,ytest)))
results_df["80/20","NB.AUC"] <- AUC::auc(roc(predNB,ytest))
results_df["80/20","NB.ACC"] <- sum(diag(NB_table))/sum(NB_table)
results_df["80/20","NB.CORRECT"] <- sum(diag(NB_table))
### RF
rf <- randomForest(x = basetabletrain,
y = ytrain,
ntree=500)
predrf <- predict(rf, basetabletest, type="class")
rf_table <- table(predrf, ytest)
rf_accuracy <- sum(diag(rf_table)) / sum(rf_table)
(rf_auc <- AUC::auc(roc(predrf, ytest)))
results_df["80/20","RF.AUC"] <- AUC::auc(roc(predrf,ytest))
results_df["80/20","RF.ACC"] <- sum(diag(rf_table))/sum(rf_table)
results_df["80/20","RF.CORRECT"] <- sum(diag(rf_table))
write.csv(results_df, "results.csv")
################### 70/20 split ###################
bins <- cut(seq(1,nrow(basetable)),breaks=10,labels=FALSE)
allind <- sample(x=1:nrow(basetable),size=nrow(basetable))
trainind <- allind[which(bins %in% 1:4)]
valind <- allind[which(bins %in% 5:7)]
testind <- allind[which(bins %in% 8:10)]
basetabletrain <- basetable[trainind,]
basetableval <- basetable[valind,]
basetabletest <- basetable[testind,]
basetabletrainbig <- rbind(basetabletrain,basetableval)
ytrain <- y[trainind]
yval <- y[valind]
ytest <- y[testind]
ytrainbig <- factor(c(as.character(ytrain),as.character(yval)))
### SVM 70/30 split
SV.cost <- 2^(-5:-4)
SV.gamma <- 2^(-15:-14)
SV.degree <- c(1,2)
SV.kernel <- c('polynomial')
parameters <- expand.grid("Cost" = SV.cost,
"Gamma" = SV.gamma,
"Degree" = SV.degree,
"Kernel" = SV.kernel)
aucstore <- numeric(nrow(parameters))
for(i in 1:nrow(parameters)){
start <- Sys.time()
model <- svm(basetabletrain,
ytrain,
type = "C-classification",
probability = TRUE,
kernel = parameters$Kernel[i],
degree = parameters$Degree[i],
cost = parameters$Cost[i],
gamma = parameters$Gamma[i])
pred_prob <- predict(model, basetableval, decision.values = TRUE, probability = TRUE)
print(i)
aucstore[i] <- AUC::auc(roc(pred_prob,yval))
}
optimal <- parameters[which.max(aucstore),]
model <- svm(basetabletrainbig,
ytrainbig,
type = "C-classification",
probability = TRUE,
kernel = optimal$Kernel,
degree = optimal$Degree,
cost = optimal$Cost,
gamma = optimal$Gamma)
pred_prob <- predict(model, basetabletest, decision.values = TRUE, probability = TRUE)
x <- table(pred_prob,ytest)
(svm_auc <- AUC::auc(roc(pred_prob,ytest)))
results_df["70/30","SVM.AUC"] <- AUC::auc(roc(pred_prob,ytest))
results_df["70/30","SVM.ACC"] <- sum(diag(x))/sum(x)
results_df["70/30","SVM.CORRECT"] <- sum(diag(x))
### NB
allind <- sample(x=1:nrow(basetable),size=nrow(basetable))
trainind <- allind[which(bins %in% 1:7)]
testind <- allind[-trainind]
basetabletrain <- basetable[trainind,]
basetabletest <- basetable[testind,]
ytrain <- y[trainind]
ytest <- y[testind]
NB <- naiveBayes(x=basetabletrain, y=ytrain)
predNB <- predict(NB,basetabletest, type = "class", threshold = 0.001)
NB_table <- table(predNB,ytest)
nb_accuracy <- sum(diag(NB_table)) / sum(NB_table)
(nb_auc <- AUC::auc(roc(predNB,ytest)))
results_df["70/30","NB.AUC"] <- AUC::auc(roc(predNB,ytest))
results_df["70/30","NB.ACC"] <- sum(diag(NB_table))/sum(NB_table)
results_df["70/30","NB.CORRECT"] <- sum(diag(NB_table))
### RF
rf <- randomForest(x = basetabletrain,
y = ytrain,
ntree=500)
predrf <- predict(rf, basetabletest, type="class")
rf_table <- table(predrf, ytest)
rf_accuracy <- sum(diag(rf_table)) / sum(rf_table)
(rf_auc <- AUC::auc(roc(predrf, ytest)))
results_df["70/30","RF.AUC"] <- AUC::auc(roc(predrf,ytest))
results_df["70/30","RF.ACC"] <- sum(diag(rf_table))/sum(rf_table)
results_df["70/30","RF.CORRECT"] <- sum(diag(rf_table))
write.csv(results_df, "results.csv")
### cross fold validation
numbreaks <- c(5,10,15)
for(j in numbreaks){
bins <- cut(seq(1,nrow(basetable)),breaks=j,labels=FALSE)
svm_temp_auc <- svm_temp_acc <- svm_temp_correct <-
nb_temp_auc <- nb_temp_acc <- nb_temp_correct <-
rf_temp_auc <- rf_temp_acc <- rf_temp_correct <- numeric(j)
print(paste0(j,"-fold validation"))
for(i in 1:j){
print(paste0("Fold: ",i))
allind <- sample(x=1:nrow(basetable),size=nrow(basetable))
alltrainind <- allind[which(bins != i)]
trainind <- alltrainind[1:round(length(alltrainind)/2)]
valind <- alltrainind[(round(length(alltrainind)/2)+1):length(alltrainind)]
testind <- allind[which(bins == i)]
basetabletrain <- basetable[trainind,]
basetableval <- basetable[valind,]
basetabletest <- basetable[testind,]
basetabletrainbig <- rbind(basetabletrain,basetableval)
ytrain <- y[trainind]
yval <- y[valind]
ytest <- y[testind]
ytrainbig <- factor(c(as.character(ytrain),as.character(yval)))
### SVM
SV.cost <- 2^(-5:-4)
SV.gamma <- 2^(-15:-14)
SV.degree <- c(1,2)
SV.kernel <- c('polynomial')
parameters <- expand.grid("Cost" = SV.cost,
"Gamma" = SV.gamma,
"Degree" = SV.degree,
"Kernel" = SV.kernel)
aucstore <- numeric(nrow(parameters))
for(k in 1:nrow(parameters)){
model <- svm(basetabletrain,
ytrain,
type = "C-classification",
probability = TRUE,
kernel = parameters$Kernel[k],
degree = parameters$Degree[k],
cost = parameters$Cost[k],
gamma = parameters$Gamma[k])
pred_prob <- predict(model, basetableval, decision.values = TRUE, probability = TRUE)
x <- table(pred_prob,yval)
aucstore[k] <- AUC::auc(roc(pred_prob,yval))
print(paste0("SVM Parameter: ",k))
}
optimal <- parameters[which.max(aucstore),]
model <- svm(basetabletrainbig,
ytrainbig,
type = "C-classification",
probability = TRUE,
kernel = optimal$Kernel,
degree = optimal$Degree,
cost = optimal$Cost,
gamma = optimal$Gamma)
pred_prob <- predict(model, basetabletest, decision.values = TRUE, probability = TRUE)
x <- table(pred_prob,ytest)
(svm_auc <- AUC::auc(roc(pred_prob,ytest)))
svm_temp_auc[i] <- AUC::auc(roc(pred_prob,ytest))
svm_temp_acc[i] <- sum(diag(x))/sum(x)
svm_temp_correct[i] <- sum(diag(x))
### new sample for just test/train
allind <- sample(x=1:nrow(basetable),size=nrow(basetable))
trainind <- allind[which(bins != i)]
testind <- allind[which(bins == i)]
basetabletrain <- basetable[trainind,]
basetabletest <- basetable[testind,]
ytrain <- y[trainind]
ytest <- y[testind]
### NB
print("Naive Bayes")
NB <- naiveBayes(x=basetabletrain, y=ytrain)
predNB <- predict(NB,basetabletest, type = "class", threshold = 0.001)
NB_table <- table(predNB,ytest)
nb_accuracy <- sum(diag(NB_table)) / sum(NB_table)
(nb_auc <- AUC::auc(roc(predNB,ytest)))
nb_temp_auc[i] <- AUC::auc(roc(predNB,ytest))
nb_temp_acc[i] <- sum(diag(NB_table))/sum(NB_table)
nb_temp_correct[i] <- sum(diag(NB_table))
### RF
print("Random Forest")
rf <- randomForest(x = basetabletrain,
y = ytrain,
ntree=500)
predrf <- predict(rf, basetabletest, type="class")
rf_table <- table(predrf, ytest)
rf_accuracy <- sum(diag(rf_table)) / sum(rf_table)
(rf_auc <- AUC::auc(roc(predrf, ytest)))
rf_temp_auc[i] <- AUC::auc(roc(predrf,ytest))
rf_temp_acc[i] <- sum(diag(rf_table))/sum(rf_table)
rf_temp_correct[i] <- sum(diag(rf_table))
}
if(j == 5){
# results_df["5Fold","SVM.AUC"] <- median(svm_temp_auc)
# results_df["5Fold","SVM.ACC"] <- median(svm_temp_acc)
# results_df["5Fold","SVM.CORRECT"] <- median(svm_temp_correct)
results_df["5Fold","NB.AUC"] <- median(nb_temp_auc)
results_df["5Fold","NB.ACC"] <- median(nb_temp_acc)
results_df["5Fold","NB.CORRECT"] <- median(nb_temp_correct)
results_df["5Fold","RF.AUC"] <- median(rf_temp_auc)
results_df["5Fold","RF.ACC"] <- median(rf_temp_acc)
results_df["5Fold","RF.CORRECT"] <- median(rf_temp_correct)
} else if(j == 10){
# results_df["10Fold","SVM.AUC"] <- median(svm_temp_auc)
# results_df["10Fold","SVM.ACC"] <- median(svm_temp_acc)
# results_df["10Fold","SVM.CORRECT"] <- median(svm_temp_correct)
results_df["10Fold","NB.AUC"] <- median(nb_temp_auc)
results_df["10Fold","NB.ACC"] <- median(nb_temp_acc)
results_df["10Fold","NB.CORRECT"] <- median(nb_temp_correct)
results_df["10Fold","RF.AUC"] <- median(rf_temp_auc)
results_df["10Fold","RF.ACC"] <- median(rf_temp_acc)
results_df["10Fold","RF.CORRECT"] <- median(rf_temp_correct)
} else {
# results_df["15Fold","SVM.AUC"] <- median(svm_temp_auc)
# results_df["15Fold","SVM.ACC"] <- median(svm_temp_acc)
# results_df["15Fold","SVM.CORRECT"] <- median(svm_temp_correct)
results_df["15Fold","NB.AUC"] <- median(nb_temp_auc)
results_df["15Fold","NB.ACC"] <- median(nb_temp_acc)
results_df["15Fold","NB.CORRECT"] <- median(nb_temp_correct)
results_df["15Fold","RF.AUC"] <- median(rf_temp_auc)
results_df["15Fold","RF.ACC"] <- median(rf_temp_acc)
results_df["15Fold","RF.CORRECT"] <- median(rf_temp_correct)
}
write.csv(results_df, "results.csv")
}
################### SAVE MODEL WITH HIGHEST PERFORMANCE (RF WITH 80/20 SPLIT) ###################
### basetable
basetable <- as.matrix(dtm_tfxidf)
### class
y <- factor(combined_articles$Sentiment)
################### 80/20 split ###################
bins <- cut(seq(1,nrow(basetable)),breaks=10,labels=FALSE)
allind <- sample(x=1:nrow(basetable),size=nrow(basetable))
trainind <- allind[which(bins %in% 1:8)]
testind <- allind[-trainind]
basetabletrain <- basetable[trainind,]
basetabletest <- basetable[testind,]
ytrain <- y[trainind]
ytest <- y[testind]
### RF
rf <- randomForest(x = basetabletrain,
y = ytrain,
ntree=500)
predrf <- predict(rf, basetabletest, type="class")
rf_table <- table(predrf, ytest)
(rf_accuracy <- sum(diag(rf_table)) / sum(rf_table))
(rf_auc <- AUC::auc(roc(predrf, ytest)))
save(rf,file = "rf.RData")
|
5aaf425a7c682e5bfc85c18691216bdf6436186f
|
d434ec91242aad694c4e2d78580b60a9da3ce29a
|
/R/display_selected_code_comments.R
|
1e2f90cb2e79faebef630ef4d0f475a21bcfe06b
|
[
"BSD-3-Clause",
"LGPL-3.0-only",
"GPL-1.0-or-later",
"GPL-3.0-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"MIT"
] |
permissive
|
rmsharp/rmsutilityr
|
01abcdbc77cb82eb4f07f6f5d8a340809625a1c5
|
d5a95e44663e2e51e6d8b0b62a984c269629f76c
|
refs/heads/master
| 2021-11-20T08:45:23.483242
| 2021-09-07T17:28:22
| 2021-09-07T17:28:22
| 97,284,042
| 0
| 2
|
MIT
| 2021-09-07T17:28:22
| 2017-07-15T01:17:14
|
R
|
UTF-8
|
R
| false
| false
| 3,310
|
r
|
display_selected_code_comments.R
|
#' Displays selected comments
#'
#' @returns Dataframe of selected comments with the base file name, the
#' comment label, the comment start line, and the comment text.
#'
#' Internally uses the \code{list.files} function with the \code{path} and
#' \code{pattern} arguments as defined in the call. Other arguments to
#' \code{list.files} are forced as follows:
#' \describe{
#' \item{all.files}{TRUE}
#' \item{full.names}{TRUE}
#' \item{recursive}{TRUE}
#' \item{ignore.case}{TRUE}
#' \item{include.dirs}{FALSE}
#' \item{no..}{FALSE}
#' }
#' The user is free to create the list of files anyway desired and provide them
#' to the \code{path} argument.
#' @examples
#' files = system.file("testdata", "find_html_comment_test_file_1.Rmd",
#' package = "rmsutilityr")
#' display_selected_code_comments(path = dirname(files),
#' pattern = "Rmd",
#' label = "RMS")
#'
#' @param path a character vector of full path names; the default corresponds to
#' the working directory, getwd(). Tilde expansion (see path.expand) is
#' performed. Missing values will be ignored.
#' Elements with a marked encoding will be converted to the native encoding
#' (and if that fails, considered non-existent). Defaults to ".".
#' @param pattern an optional regular expression. Only file names which match the
#' regular expression will be returned.
#' @param label Optional regex expression that can be used to limit the
#' comments found by adding each element of the character vector in turn
#' immediately after "<!--" in the regex expression. The resulting logical
#' vectors are OR'd together to combine their results.
#' @importFrom kableExtra kbl kable_styling column_spec
#' @export
display_selected_code_comments <-
function(path = ".",
pattern = NULL,
label = "") {
files <-
list.files(
path = path,
pattern = pattern,
all.files = TRUE,
full.names = TRUE,
recursive = TRUE,
ignore.case = FALSE,
include.dirs = FALSE,
no.. = FALSE
)
html_comment_lines_and_labels <-
get_html_comment_text_lines_and_labels_from_files(files, label = label)
caption <-
stri_c("Output of the ",
"get\\_html\\_comment\\_text\\_lines\\_and\\_labels\\_from\\_files ",
"function includes text of comments from selected ",
"comment labels.")
selected_code_comments <-
html_comment_lines_and_labels[, c("file",
"comment_label",
"comment_start_line",
"comment_text")]
kbl(
selected_code_comments,
format = ifelse(knitr::is_latex_output(), "latex", "html"),
booktabs = TRUE,
caption = caption,
row.names = FALSE,
col.names = c("File", "Label", "Start", "Text"),
longtable = TRUE
) %>%
kable_styling(
latex_options = c("repeat_header", "striped"),
font_size = ifelse(knitr::is_latex_output(), 8, 12)
) %>%
column_spec(1, width = "15em") %>%
column_spec(2, width = "5em") %>%
column_spec(3, width = "5em") %>%
column_spec(4, width = "25em")
}
|
1686b8ac344d8810b3f3fabee9f130b8b8905064
|
c4010945565fedf0c3da444545ce94b85df8790e
|
/man/E4.4.Rd
|
4e47568018b7b3eebe279e5f42f274d72b623845
|
[] |
no_license
|
cran/SenSrivastava
|
adc924ed2e4a3068a65b8347d7418f96008f9612
|
e834ccc473ed498c093df2a27c5a7633da46442e
|
refs/heads/master
| 2016-09-06T04:57:59.016453
| 2015-06-25T00:00:00
| 2015-06-25T00:00:00
| 17,693,664
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,347
|
rd
|
E4.4.Rd
|
\name{E4.4}
\alias{E4.4}
\title{ Measures of Quality for Agencies Delivering Transportation for
the Elderly and the Handicapped }
\concept{Measures of Quality for Agencies Delivering Transportation for the Elderly and the Handicapped }
\usage{data(E4.4)}
\description{
The \code{E4.4} data frame has 40 rows and 3 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{QUAL}{
a numeric vector, a quality measure made using psychometric methods from results
of questionares.
}
\item{X.1}{
a numeric vector, an indicator variable for private ownership.
}
\item{X.2}{
a numeric vector, an indicator variable for private for profit ownership.
}
}
}
\details{
The quality data, \code{QUAL}, is constructed from questionares given
to users of such services in the state of Illinois. Multiple services
in the state of Illinois was scored using this method. The indicator variables
was constructed to give first (\code{X.1}) a comparison between private
and public services, then (\code{X.2}) a comparison between private
not-for-profit and private for profit services.
}
\source{
Slightly modified version of data supplied by Ms. Claire McKnight of
the Department of Civil Engineering, City University of New York.
}
\examples{
data(E4.4)
summary(E4.4)
}
\keyword{datasets}
\concept{regression}
|
db65b0229f0c2b0682418ab693a7f6e64e56d6e4
|
bf6201100e252d2636b2668a1fc682e71adb74ea
|
/R/oilCard.R
|
31713d5cfc6a906f6322ca7a6876db01426c20af
|
[] |
no_license
|
takewiki/caaspkg
|
0ed9721cc0d39b587ffa6df478ea403db80d7e72
|
f3ffede1785e4e9f8b05553271e8073386c9a714
|
refs/heads/master
| 2023-01-15T10:34:08.561659
| 2020-11-24T05:05:47
| 2020-11-24T05:05:47
| 259,510,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,540
|
r
|
oilCard.R
|
#' 查询油卡
#'
#' @param conn 连接
#' @param FKeyWord 关键词
#'
#' @return 返回值
#' @export
#'
#' @examples
#' oildCard_selectDB()
oildCard_selectDB <- function(conn=tsda::conn_rds('nsic'),FKeyWord='ljiang1469') {
sql <- paste0("SELECT FOrderSouce 订单来源渠道
,FTBId 淘宝ID
,FOrderId 订单号
,FLiYu 礼遇
,FDealerName 经销商名称
,FOrderPhone 拍单手机号
,FCar 车型
,FTmallOrderTime 天猫下单时间
,FLMSStatus LMS下发状态
,FLMSOrderTime LMS下单时间
,FLMSNewStatus LMS最新状态
,FChannelSource 渠道来源
,FVIN 车架号
,FVerificationStatus 核销情况
,FTmallOrderTimeBeforeLMS 天猫下单时间早于LMS下单时间
,FJudgeFavorableComments 是否好评
,FJudgeRules_Cause 是否符合礼遇领取规则
,FExtendGiftTime 礼包预计发放时间
,FExtendGiftDelivery 礼包发放单号
,Faddress 收货地址
FROM t_ic_oilCard
where FTBId = '",FKeyWord,"' or FOrderId ='",FKeyWord,"' or FOrderPhone ='",FKeyWord,"'")
#print()
res <- tsda::sql_select(conn,sql)
return(res)
}
#' 查询油卡数据
#'
#' @param conn 连接
#'
#' @return 返回值
#' @export
#'
#' @examples
#' oildCard_selectDB_all()
oildCard_selectDB_all <- function(conn=tsda::conn_rds('nsic')) {
sql <- paste0("SELECT FOrderSouce 订单来源渠道
,FTBId 淘宝ID
,FOrderId 订单号
,FLiYu 礼遇
,FDealerName 经销商名称
,FOrderPhone 拍单手机号
,FCar 车型
,FTmallOrderTime 天猫下单时间
,FLMSStatus LMS下发状态
,FLMSOrderTime LMS下单时间
,FLMSNewStatus LMS最新状态
,FChannelSource 渠道来源
,FVIN 车架号
,FVerificationStatus 核销情况
,FTmallOrderTimeBeforeLMS 天猫下单时间早于LMS下单时间
,FJudgeFavorableComments 是否好评
,FJudgeRules_Cause 是否符合礼遇领取规则
,FExtendGiftTime 礼包预计发放时间
,FExtendGiftDelivery 礼包发放单号
,Faddress 收货地址
FROM t_ic_oilCard")
#print()
res <- tsda::sql_select(conn,sql)
return(res)
}
#' 增值油卡订单查询功能
#'
#' @param conn 连接
#' @param FKeyWord 关键词
#'
#' @return 返回值
#' @export
#'
#' @examples
#' oildCard_selectDB2()
oildCard_selectDB2 <- function(conn=tsda::conn_rds('nsic'),FKeyWord='ljiang1469') {
sql <- paste0("SELECT
FTBId
,FOrderId
,FLiYu
,FOrderPhone
,FCar
,FTmallOrderTime
,FLMSStatus
,FLMSOrderTime
,FLMSNewStatus
,FVIN
,FVerificationStatus
,FTmallOrderTimeBeforeLMS
,FJudgeFavorableComments
,FJudgeRules_Cause
,FExtendGiftTime
,FExtendGiftDelivery
,Faddress
,FRemarks
FROM t_ic_oilCard
where FTBId = '",FKeyWord,"' or FOrderId ='",FKeyWord,"' or FOrderPhone ='",FKeyWord,"'")
#print()
data <- tsda::sql_select(conn,sql)
ncount <- nrow(data)
if(ncount >0){
if(is.na(data$FExtendGiftDelivery) | tsdo::len(data$FExtendGiftDelivery) ==0 ){
#未单号
msg <- paste0("TMALL ID: ",tsdo::na_replace(data$FTBId,""),"\n",
"订单号: ",tsdo::na_replace(data$FOrderId,""),"\n",
"礼遇: ",tsdo::na_replace(data$FLiYu,""),"\n",
"拍单手机号: ",tsdo::na_replace(data$FOrderPhone,""),"\n",
"车型: ",tsdo::na_replace(data$FCar,""),"\n",
"天猫下单时间: ",tsdo::na_replace(data$FTmallOrderTime,""),"\n",
"LMS下发状态: ",tsdo::na_replace(data$FLMSStatus,""),"\n",
"LMS下单时间: ",tsdo::na_replace(data$FLMSOrderTime,""),"\n",
"LMS最新状态: ",tsdo::na_replace(data$FLMSNewStatus,""),"\n",
"车架号: ",tsdo::na_replace(data$FVIN,""),"\n",
"核销情况: ",tsdo::na_replace(data$FVerificationStatus,""),"\n",
"天猫下单时间早于LMS下单时间: ",tsdo::na_replace(data$FTmallOrderTimeBeforeLMS,""),"\n",
"是否好评: ",tsdo::na_replace(data$FJudgeFavorableComments,""),"\n",
"是否符合礼遇领取规则或原因:",tsdo::na_replace(data$FJudgeRules_Cause,""),"\n",
"礼包预计发放时间: ",tsdo::na_replace(data$FExtendGiftTime,""),"\n",
"地址: ",tsdo::na_replace(data$Faddress,""),"\n",
"备注: ",tsdo::na_replace(data$FRemarks,""),"\n",
"很抱歉,这边已经将您的问题反馈至专员,目前还未收到专员回复,专员回复后我们这边会第一时间截图并留言给到您,给您带来不便,请见谅。"
)
}else{
msg <- paste0("亲,经查询,您的订单号:",data$FOrderId,",目前礼包已发放,物流单号为:",data$FExtendGiftDelivery)
}
}else{
msg <- paste0("亲,您的订单我们正在核实,请稍等")
}
return(msg)
}
#' 处理油卡发货日期异常数据
#'
#' @param x 针对日期进行处理
#'
#' @return 返回值
#'
#' @examples
#' oilCard_formatDeliverDate()
oilCard_formatDeliverDate <- function(x) {
if(is.na(x)){
#针对空值进行处理
res <-""
}else{
#处理其他情况
nlen <- tsdo::len(x)
if(nlen >0){
#判断数据长度
value <- try(as.numeric(x))
if(is.na(value)){
res <- x
}else{
#能够成功转换,变成日期后再转文本
res <- as.character(as.Date(value,origin='1899-12-30'))
}
}else{
res <-""
}
}
return(res)
}
#' 针对日期数据进行批量处理
#'
#' @param data 数据
#'
#' @return 返回值
#' @export
#'
#' @examples
#' oilCard_formatDeliverDates()
oilCard_formatDeliverDates <- function(data){
r <- lapply(data, oilCard_formatDeliverDate)
res <- unlist(r)
return(res)
}
#' 读取油卡数据
#'
#' @param file 文件名
#'
#' @return 返回值
#' @export
#'
#' @examples
#' oilCard_readExcel()
oilCard_readExcel <- function(file="data-raw/oilCardData.xlsx") {
#library(readxl)
oilCardData <- readxl::read_excel(file, col_types = c("text", "text", "text",
"text", "text", "text", "text", "text",
"numeric", "text", "text", "date",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text"))
#选择相应的数据
data <-oilCardData[ ,1:25]
#针对列进行重命名
col_names <- c('FOrderSouce',
'FTBId',
'FOrderId',
'FLiYu',
'FDealerID',
'FDealerProvince',
'FDealerCity',
'FDealerName',
'FOrderPhone',
'FCar',
'FImportLocal',
'FTmallOrderTime',
'FLMSStatus',
'FLMSOrderTime',
'FLMSNewStatus',
'FChannelSource',
'FVIN',
'FVerificationStatus',
'FTmallOrderTimeBeforeLMS',
'FJudgeFavorableComments',
'FJudgeRules_Cause',
'FExtendGiftTime',
'FExtendGiftDelivery',
'Faddress',
'FRemarks')
names(data) <- col_names
data$FExtendGiftTime <- oilCard_formatDeliverDates(data$FExtendGiftTime)
data$FOrderPhone <- as.character(data$FOrderPhone)
data$FTmallOrderTime <- as.character(data$FTmallOrderTime)
return(data)
}
#' 删除库中数据
#'
#' @param conn 连接
#'
#' @return 返回值
#' @export
#'
#' @examples
#' oilCard_backup_del()
oilCard_backup_del <- function(conn=tsda::conn_rds('nsic')) {
sql_bak <- paste0("INSERT INTO [dbo].[t_ic_oilCardDel]
([FOrderSouce]
,[FTBId]
,[FOrderId]
,[FLiYu]
,[FDealerID]
,[FDealerProvince]
,[FDealerCity]
,[FDealerName]
,[FOrderPhone]
,[FCar]
,[FImportLocal]
,[FTmallOrderTime]
,[FLMSStatus]
,[FLMSOrderTime]
,[FLMSNewStatus]
,[FChannelSource]
,[FVIN]
,[FVerificationStatus]
,[FTmallOrderTimeBeforeLMS]
,[FJudgeFavorableComments]
,[FJudgeRules_Cause]
,[FExtendGiftTime]
,[FExtendGiftDelivery]
,[Faddress]
,[FRemarks]
)
select * from t_ic_oilCard ")
tsda::sql_update(conn,sql_bak)
#删除数据
sql_del <- paste0("delete from t_ic_oilCard")
tsda::sql_update(conn,sql_del)
}
#' 油卡数据写入数据库
#'
#' @param file 文件
#' @param conn 连接
#'
#' @return 返回值
#' @export
#'
#' @examples
#' oilCard_writeDB()
oilCard_writeDB <- function(file="data-raw/oilCardData.xlsx",conn=tsda::conn_rds('nsic')){
#删除库存数据
oilCard_backup_del(conn=conn)
#查询数据
data <- oilCard_readExcel(file=file)
#写入数据库
tsda::db_writeTable(conn=conn,table_name = 't_ic_oilCard',r_object = data,append = TRUE)
}
|
19f52c79056294e92fdbeaefe5ef8a655769ae07
|
1a9ad356a301a467f99b3ae09bb958d28ae6d20b
|
/indicator_heterogeneity_I/exploratory/12_compile.R
|
2f0915c12afa21cbdf28476df59018f9aed4972b
|
[] |
no_license
|
kateharwood/covidcast-modeling
|
03d98f7cbc2aa5ce2ef851c2ca7905e9d0cb9826
|
5e44da23e1f39ca74647b67a842dc057d7589198
|
refs/heads/main
| 2023-07-17T20:51:52.416970
| 2021-08-26T19:38:14
| 2021-08-26T19:38:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 330
|
r
|
12_compile.R
|
#!/usr/bin/Rscript
for (geo_value_ in c('county', 'state')) {
rmarkdown::render('12_heterogeneity_longer_time_window.Rmd',
params=list(geo_value=geo_value_),
output_file=sprintf('12_heterogeneity_longer_time_window_%s.html',
geo_value_))
}
|
fe72ac114cb161992b046c5712c19184da05ab6e
|
9cf3b2ed512749a257001170e3adf509b748d75b
|
/ySequencing.r
|
ee9f7589432e537072cd1bdb42b0eee7a50b4c2c
|
[] |
no_license
|
yh86/R_Library
|
81eee992ba39771356d2224285234e311d775919
|
f28503bd813e97250fecea0be4a4f0b98262251c
|
refs/heads/master
| 2021-01-21T19:28:53.958332
| 2018-02-22T21:05:26
| 2018-02-22T21:05:26
| 26,069,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,105
|
r
|
ySequencing.r
|
getSamSigGene <- function(samr_siggene_table=NULL) {
#
# FUNCTION
# combine up and down regulated genes into one data frame (based on SAMR package)
#
# PARAMETER
# samr_siggene_table: sig gene table from samr::samr.compute.siggenes.table
#
# USAGE
#
obj = samr_siggene_table
ret = rbind( data.frame(group='up', obj$genes.up, stringsAsFactors=FALSE)
, data.frame(group='down', obj$genes.lo, stringsAsFactors=FALSE) )
colnames(ret) = gsub('\\.+$', '', colnames(ret))
colnames(ret) = gsub('\\.', '_', colnames(ret))
colnames(ret) = paste('SAM',colnames(ret),sep='_')
return(ret)
}
parseExcelBGI <- function(dat=NULL) {
#
# FUNCTION
# parsing Excel data from BGI into an easy-to-use data structure
#
# PARAMETER
# dat: dataframe that was obtained from Excel
#
# USAGE
#
data = list()
data$rsq_geneid = dat$GeneID
data$rsq_symbol = dat$Symbol
data$rsq_description = dat$Description
data$rsq_go = dat[['GO.Process']]
data$rsq_blastnr = dat[["Blast.nr"]]
# sequencing mesurements
data$rsq_read = dat[,grep('Uniq\\_reads\\_num',colnames(dat),value=T)]; colnames(data$rsq_read) = gsub('\\_Uniq\\_reads\\_num\\.[0-9]+\\.','',colnames(data$rsq_read))
data$rsq_coverage = dat[,grep('Coverage',colnames(dat),value=T)]; colnames(data$rsq_coverage) = gsub('\\_Coverage','',colnames(data$rsq_coverage))
data$rsq_rpkm = dat[,grep('RPKM',colnames(dat),value=T)]; colnames(data$rsq_rpkm) = gsub('\\_RPKM','',colnames(data$rsq_rpkm))
data$rsq_rpkm = str2num(data$rsq_rpkm)
data$rsq_rpkm = data$rsq_rpkm + 1 # adding 1 to safely log-transform 0 valued RPKKM
data$rsq_logrpkm = log10(data$rsq_rpkm)
data$rsq_sampleID = colnames(data$rsq_read)
# check the data conformality
dim(data$rsq_read) == dim(data$rsq_coverage) # expect to be TRUE
dim(data$rsq_read) == dim(data$rsq_rpkm) # expect to be TRUE
if( sum( is.na(data$rsq_logrpkm) )>0 )
cat ('\n', 'NaN value are present in logrpkm', '\n')
return(data)
}
|
b9100c0b2698f555b93cf3b957f10b643a3f3ac5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/epiDisplay/examples/Planning.rd.R
|
383c26b3477831049fac940acddb5723b5693f94
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
r
|
Planning.rd.R
|
library(epiDisplay)
### Name: Data for cleaning
### Title: Dataset for practicing cleaning, labelling and recoding
### Aliases: Planning
### Keywords: datasets
### ** Examples
data(Planning)
des(Planning)
# Change var. name to lowercase
names(Planning) <- tolower(names(Planning))
.data <- Planning
des(.data)
# Check for duplication of 'id'
attach(.data)
any(duplicated(id))
duplicated(id)
id[duplicated(id)] #215
# Which one(s) are missing?
setdiff(min(id):max(id), id) # 216
# Correct the wrong on
id[duplicated(id)] <- 216
detach(.data)
rm(list=ls())
|
1f62ab8ffc09c74323e1761312647a250dcf30d6
|
f2532a5bad45afaef76d4ae4b36a699d7bd35f6d
|
/stock/get_plot_stock.R
|
a75076ed0fbe1e1ea2359d28abe23248314b101d
|
[] |
no_license
|
haradakunihiko/investigation_of_r
|
07a8df599a700c50306bdad8d33de63111ac16fc
|
73b2e065f6e0669332f2417f8866997b0d5bc489
|
refs/heads/master
| 2016-09-13T20:03:06.946929
| 2016-05-05T01:04:23
| 2016-05-05T01:04:23
| 58,093,999
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,479
|
r
|
get_plot_stock.R
|
library(quantmod)
# 準備編
start = '2015-01-01'
end = '2015-12-31'
ticker = 'GOOG'
GOOG = getSymbols(ticker, src = 'yahoo', from = start, to = end, auto.assign=F)
summary(GOOG)
head(GOOG)
str(GOOG)
GOOG['2014-01/2014-12']
GOOG['2014-01::']
GOOG['2014-01']
GOOG['2014-01-30']
apply.daily(GOOG[, 6], max)
apply.weekly(GOOG[, 6], max)
apply.monthly(GOOG[, 6], max)
apply.quarterly(GOOG[, 6], max)
apply.yearly(GOOG[, 6], max)
apply.monthly(GOOG, function(y)sapply(y, max))
tail(rollapply(GOOG, 120, mean))
tail(rollapply(GOOG, 1, function(x_)sapply(x_, )))
tail(aggregate(GOOG, as.yearmon, last))
# 必要な技術
# plot
plot(GOOG[,'GOOG.Close'], main='GOOG Closing Prices')
# 必要な技術
# テーブルのマージ
# 移動平均を計算する
x <- c(1, 2, 3, 1, 2, 9, 4, 2, 6, 1)
mutate(x)
filter(x, c(1,1,1,1), sides=1)
rep(1/10,10)
GOOG.index = index(GOOG)
GOOG.MVA = xts(filter(GOOG[,'GOOG.Close'], rep(1/10, 10), sides=1), order.by = GOOG.index)
GOOG.MVA20 = xts(filter(GOOG[,'GOOG.Close'], rep(1/20, 20), sides=1), order.by = GOOG.index)
GOOG.MVA50 = xts(filter(GOOG[,'GOOG.Close'], rep(1/50, 50), sides=1), order.by = GOOG.index)
GOOG.MVA100 = xts(filter(GOOG[,'GOOG.Close'], rep(1/100, 100), sides=1), order.by = GOOG.index)
head(GOOG.MVA)
# 移動平均
head(SMA(GOOG[,'GOOG.Close'], 10), 30)
head(GOOG.MVA, 30)
GOOG = merge(GOOG, GOOG.MVA, all=TRUE)
GOOG = merge(GOOG, GOOG.MVA20, all=TRUE)
GOOG = merge(GOOG, GOOG.MVA50, all=TRUE)
GOOG = merge(GOOG, GOOG.MVA100, all=TRUE)
head(GOOG)
head(GOOG[,c('GOOG.Close', 'GOOG.MVA')])
# 重ねあわせ
plot.zoo(GOOG[,c('GOOG.Close', 'GOOG.MVA20', 'GOOG.MVA100')], plot.type = "single", col = c("red", "blue", "green"))
# 追加してplotするには?
# 前日比の取得
head(GOOG[,'GOOG.Close'])
# 階差
head(diff(GOOG[,'GOOG.Close']))
head(GOOG[,'GOOG.Close'])
head(lag.xts(GOOG[,'GOOG.Close'], 1))
head(lag(GOOG[,'GOOG.Close'], 1))
c(1,2,3,4)
lag(c(1,2,3,4),1)
diff(c(1,2,4,7))
head(GOOG[,'GOOG.Close'] / lag.xts(GOOG[,'GOOG.Close'], 1))
# ここが違う!
GOOG.PREV_DAY_RATE = xts(GOOG[,'GOOG.Close'] / lag.xts(GOOG[,'GOOG.Close'], 1) - 1)
GOOG.PREV_DAY_RATE <- na.omit(GOOG.PREV_DAY_RATE)
GOOG = merge(GOOG, GOOG.PREV_DAY_RATE, all=TRUE)
# allってなに?
head(GOOG.PREV_DAY_RATE)
plot(GOOG.PREV_DAY_RATE)
hist(GOOG.PREV_DAY_RATE, breaks = 100 )
#lines(density(GOOG[,'GOOG.Close.3']), col = "orange", lwd = 2)
tail(GOOG)
rbind(GOOG, c(1,1,1,1,1,1))
days <- 365
dt <- 1/days
mean(GOOG[,'GOOG.Close'])
mu <- mean(GOOG.PREV_DAY_RATE)
mu
# あってる?
sigma <- sd(GOOG.PREV_DAY_RATE)
sigma
# 世紀分布に従う乱数
rnorm(10)
shock <- sigma * rnorm(10) * sqrt(dt)
rnorm(1)
res = rep(0, days)
for (i in 2:(days -1)) {
res[i] <- i
}
mu
mu * sigma
rnorm(1)
sqrt(dt)
sigma
sigma * rnorm(1) * sqrt(dt)
motecarlo <- function (startPrice, days, mu, sigma) {
dt = 1/days
price = rep(1, days)
price[1] <- startPrice
drift <- mu * dt
for (i in 2:(days)) {
shock = sigma * rnorm(1) * sqrt(dt)
price[i] <- price[i - 1] + price[i - 1] *( drift + shock)
}
return (price);
}
fut <- c()
for(i in 1:10) {
fut <-cbind(fut, motecarlo(524, 365,mu,sigma))
}
days <- 365
num <- 10000
simu <- rep(0,num)
for(i in 1:num) {
simu[i] <-motecarlo(524, days,mu,sigma)[days -1]
}
simu
hist(simu, breaks = 100)
mean(simu)
quantile(simu, .01)
# zooパッケージ
fut.zoo = as.zoo(fut)
plot(x = fut.zoo, ylab = "Cumulative Return", main = "Cumulative Returns",
col = tsRainbow, screens = 1)
tsRainbow <- rainbow(ncol(fut))
plot.zoo(fut, plot.type = "single", col = tsRainbow, xlab ="Days", ylab="Price")
legend(x = "topleft", legend = c("1", "2", "3", "4", "5"),
lty = 1,col = tsRainbow)
# 合成
plot(fut[,1], t = 'l', ylim = c(min(fut), max(fut)))
par(new=T)
plot(fut[,2], t = 'l', ylim = c(min(fut), max(fut)))
par(new=T)
plot(fut[,3], t = 'l', ylim = c(min(fut), max(fut)))
par(new=T)
plot(fut[,4], t = 'l', ylim = c(min(fut), max(fut)))
# lineを足す
plot(fut[,1], t = 'l', ylim = c(min(fut), max(fut)))
lines(fut[,2])
lines(fut[,3])
lines(fut[,4])
# 相関
last <- index(last(GOOG[,'GOOG.Adjusted']))
last
xts(1, index(last(GOOG[,'GOOG.Adjusted']))+1:days)
start = '2015-01-02'
end = '2015-12-31'
GOOG = getSymbols('GOOG', src = 'yahoo', from = start, to = end, auto.assign=F)
AMZN = getSymbols('AMZN', src = 'yahoo', from = start, to = end, auto.assign=F)
MSFT = getSymbols('MSFT', src = 'yahoo', from = start, to = end, auto.assign=F)
AAPL = getSymbols('AAPL', src = 'yahoo', from = start, to = end, auto.assign=F)
head(GOOG)
GOOG.PREV_DAY_RATE = xts(GOOG[,'GOOG.Adjusted'] / lag.xts(GOOG[,'GOOG.Adjusted'], 1))
AMZN.PREV_DAY_RATE = xts(AMZN[,'AMZN.Adjusted'] / lag.xts(AMZN[,'AMZN.Adjusted'], 1))
MSFT.PREV_DAY_RATE = xts(MSFT[,'MSFT.Adjusted'] / lag.xts(MSFT[,'MSFT.Adjusted'], 1))
AAPL.PREV_DAY_RATE = xts(AAPL[,'AAPL.Adjusted'] / lag.xts(AAPL[,'AAPL.Adjusted'], 1))
hist(GOOG.PREV_DAY_RATE, breaks = 100)
hist(AMZN.PREV_DAY_RATE, breaks = 100)
ALL = merge(AAPL.PREV_DAY_RATE, AMZN.PREV_DAY_RATE, GOOG.PREV_DAY_RATE, MSFT.PREV_DAY_RATE, all= TRUE)
head(ALL)
tail(ALL)
ALL = na.omit(ALL)
cor(ALL)
library(PerformanceAnalytics)
chart.Correlation(ALL)
warnings()
# 細かくする?
# 前日比の分散 = risk
# 前日比の比較 (match度はどうする?)
# golden crossの取得
# 前日比からシミュレーション
# 1年後の価格予測
# 10000回やった場合の1年後の価格予測のplot
|
fd8ebd5780b9760fe8bcde4ec70fe51645e5876a
|
745d526cb4a0a7537f13762ec84ab7c4f1ec1cca
|
/tag_validation/11_bap2_quantify.R
|
88aebb9381371494d64777e9b9e1290613966483
|
[] |
no_license
|
ning-liang/dscATAC_analysis_code
|
c4a1598e5cc5f84fe4d46b2159d69ea0c36643fe
|
b08f76c7add6464c06f7bb4aab95c0bd0b205404
|
refs/heads/master
| 2020-11-25T03:22:49.094691
| 2019-09-30T16:08:42
| 2019-09-30T16:08:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,163
|
r
|
11_bap2_quantify.R
|
library(data.table)
library(precrec)
library(dplyr)
# Thresholds that I used in bap1 for the tag thresholds
thresholds_tag <- c(0.01, 0.01, 0.005, 0.005, 0.005)
names(thresholds_tag) <- c("Sample1", "Sample2", "Sample3", "Sample4", "Sample6")
# ZB: update this path
path_to_csvgz_files <- "../../may24_2019_from_ZB/"
# Simple function that takes the raw file name + sample name and coputed AUROC/AUPRCs
compute_metrics <- function(raw_file, sample){
dt <- fread(paste0(path_to_csvgz_files, "/", raw_file))
pass <- dt[["jaccard_tags"]] > thresholds_tag[sample]
mmpr <- mmdata(dt[["jaccard_frag"]], pass, modnames = c("bap2"))
mscurves <- evalmod(mmpr)
dfo <- auc(mscurves)
dfo$Sample <- sample
dfo
}
raw_files <- c("N701_Exp69_sample1.implicatedBarcodesWithTags.csv.gz", "N702_Exp69_sample2.implicatedBarcodesWithTags.csv.gz",
"N703_Exp69_sample3.implicatedBarcodesWithTags.csv.gz", "N704_Exp69_sample4.implicatedBarcodesWithTags.csv.gz",
"N706_Exp69_sample6.implicatedBarcodesWithTags.csv.gz")
# Loop over all
lapply(1:5, function(i){
compute_metrics(raw_files[i],names(thresholds_tag)[i])
}) %>% rbindlist()
|
2c1bee38d53225733b7905ec0314193c9f9e5781
|
6af19fc6836016681e9fbe6bae4d680f4589d33b
|
/R/predictInt.R
|
713337f1dd8d1056e7a7e0b35841b71d2bd1d862
|
[] |
no_license
|
cran/plaqr
|
0ee34d0bc3b1e1abcb1b161aac3ae7b998ab2b79
|
5a81423644b657143bc935ceae43ce297995384b
|
refs/heads/master
| 2020-04-21T00:42:04.328863
| 2017-08-08T17:35:59
| 2017-08-08T17:35:59
| 34,162,155
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,358
|
r
|
predictInt.R
|
predictInt <- function(fit, level=.95, newdata=NULL, ...)
{
x <- fit
taulwr <- (1-level)/2
tauupr <- .5+level/2
# If newdata is NULL, use current values for prediction
if(is.null(newdata)){
# Median
if(fit$tau==.5){
median <- fit$fitted.values
} else {
x$call$tau <- .5
median <- eval.parent(x$call)$fitted.values
}
# Lower quantile
if(fit$tau==taulwr){
lwr <- fit$fitted.values
} else {
x$call$tau <- taulwr
lwr <- eval.parent(x$call)$fitted.values
}
# Upper quantile
if(fit$tau==tauupr){
upr <- fit$fitted.values
} else {
x$call$tau <- tauupr
upr <- eval.parent(x$call)$fitted.values
}
} else {
# Median
if(fit$tau==.5){
median <- predict(fit, newdata)
} else {
x$call$tau <- .5
median <- predict(eval.parent(x$call), newdata)
}
# Lower quantile
if(fit$tau==taulwr){
lwr <- predict(fit, newdata)
} else {
x$call$tau <- taulwr
lwr <- predict(eval.parent(x$call), newdata)
}
# Upper quantile
if(fit$tau==tauupr){
upr <- predict(fit, newdata)
} else {
x$call$tau <- tauupr
upr <- predict(eval.parent(x$call), newdata)
}
}
mat <- cbind(median,lwr,upr)
return(mat)
}
|
d125098cc283725f3a5217a5c370b566c1abbd0a
|
9982377266ac28216180a7577be356ffc1015fac
|
/tmap.R
|
1014e39b49e4f4fd91e8d33a0380a2a29ca862e3
|
[] |
no_license
|
sharapov98/R
|
8608755536d449aaa59faaea1836f1a4445f5371
|
bd4f9a9447553b6c3671817509a265aa23afdf87
|
refs/heads/master
| 2020-05-19T21:54:05.250799
| 2019-05-06T23:44:38
| 2019-05-06T23:44:38
| 185,235,409
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,537
|
r
|
tmap.R
|
#TMAP by PM1
#Done by PM2
library(tmap)
data(World)
which(World$gdp_cap_est == max(World$gdp_cap_est, na.rm = TRUE))
World$gdp_cap_est[7] <- NA
World$log10gdp_cap_est <- log10(World$gdp_cap_est)
map <- tm_shape(World) +
tm_polygons(c("HPI", "log10gdp_cap_est"),
title = c("Happy planet index", "Log 10 scaled \n GDP per capita"),
palette = list("YlGnBu", "YlOrRd")) +
tm_layout(main.title = "HPI and GDP per capita",
main.title.position = "left",
panel.labels = c("Happy planet index 2016", "GDP per capita in 2014"),
bg.color = "skyblue",
legend.bg.color = "grey",
legend.bg.alpha = 0.5,
legend.position = c("left", "bottom")) +
tm_grid(projection = "longlat", labels.size = 0)
tmap_save(tm = map, filename = "tmap.pdf", width = 6, height = 8)
#SECOND PART
tmap_mode("view")
map + tm_view(text.size.variable = TRUE,
set.view = c(15.2, 54.5, 3))
#THE MAPS SEPERATELY
# tm_shape(World, bbox = ) +
# tm_polygons("HPI", palette = "YlGnBu",
# title = "Happy planet index",
# style = "quantile") +
# tm_layout(bg.color = "skyblue",
# legend.bg.color = "grey",
# legend.bg.alpha = 0.5,
# main.title = "HPI and GDP per capita") +
# tm_grid(projection = "longlat", labels.size = 0)
# tm_shape(World, bbox = ) +
# tm_polygons("log10gdp_cap_est") +
# tm_layout(bg.color = "skyblue") +
# tm_grid(projection = "longlat", labels.size = 0)
|
41a45c9f9a5ae61047232bb0f06980ed6ae47315
|
4acde36c651d9ae6d19cc2fc94438ed115104b01
|
/ACC2.R
|
39ffbfd41a0dd3bfb879288ba2b7e3ca2dd6be82
|
[] |
no_license
|
LucianoAndrian/tesis
|
ae8aa39cd948f69ea5f58ffc763ad9f3052a68e8
|
b87b43074aec7f37bacb79783af451a0343e9cf7
|
refs/heads/master
| 2022-07-24T18:00:33.088886
| 2022-07-22T18:03:20
| 2022-07-22T18:03:20
| 221,673,981
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,291
|
r
|
ACC2.R
|
# ACC "espacial"?
#### Apertura base de datos ####
#-------------------------------------------------#
### Observaciones. O(j,m) j años, m estaciones. ###
#-------------------------------------------------#
# necesito "estaciones_p_a_t" de datos_obs.R (ahora se va a llamar prom_est)
# los años y latitudes se mantienen igual que en datos_obs.R
library(ncdf4)
source("funciones.R")
mask = as.matrix(read.table("mascara.txt"))
# O == prom_est-...
# O' == O - c_v_....
##------------------------ CPC ------------------------ ##
#sin mascara
# Temp
ruta = "/pikachu/datos/osman/nmme/monthly"
tref = nc_open(paste(ruta,"tref_monthly_nmme_ghcn_cams.nc", sep = "/"))
names(tref$var)
temp = ncvar_get(tref, "tref")
lat = ncvar_get(tref, "Y")
lon = ncvar_get(tref, "X")
nc_close(tref)
temp = temp[which(lon==275):which(lon==330), which(lat==-60):which(lat==15), 3:371]
lon2 = lon[which(lon==275):which(lon==330)] # se usan las mismas en PP
lat2 = lat[which(lat==-60):which(lat==15)] #
temp_estaciones = array(NA, dim = c(length(lon2), length(lat2), 30, 12))
for(j in 1:12){
for (i in 0:29){
temp_estaciones[,,1+i,j] = temp[ , , j+12*i]
}
}
# Estaciones
prom_est_cpc_t = array(NA, dim = c(length(lon2), length(lat2), 30, 4))
i=1
while(i<=4){
prom_est_cpc_t[,,,i] = apply(temp_estaciones[,,,(i + 2*i - 2):(i+2*i)], c(1,2,3), mean)
i = i + 1
}
# PP
## ------------------------ CMAP ------------------------ ## # sin mascara
# solo pp
library(fields)
aux = nc_open("/home/luciano.andrian/tesis/X190.191.242.210.56.5.48.49.nc")
#aux2 = ncvar_get(aux, "precip")[which(lon==275):which(lon==330), which(lat==-60):which(lat==15),]
lon = ncvar_get(aux, "lon")
lat = ncvar_get(aux, "lat")
aux2 = ncvar_get(aux, "precip")[,,27:386]
nc_close(aux)
lon2 = lon
lat2 = lat
pp3_int = array(NA, dim = c(58, 78, 360)) # esta quedo con mayor latitud y longitud ya que sino queda mas chico debido a la grilla 2.5x2.5
for(i in 1:360){ #interpolado
mod = list(x = lon2, y = lat2, z = aux2[,,i])
grid = list(x=seq(min(lon2), max(lon2), by = 1), y = seq(min(lat2), max(lat2), by = 1))
pp_aux = interp.surface.grid(obj = mod, grid.list = grid)
pp3_int[,,i] = pp_aux$z
}
pp3_estaciones = array(NA, dim = c(58, 78, 30, 12))
for(j in 1:12){
for (i in 0:29){
pp3_estaciones[,,1+i,j] = pp3_int[1:58 , 1:78, j+12*i]
}
}
prom_est_cmap_pp = array(NA, dim = c(58, 78, 30, 4))
i=1
while(i<=4){
prom_est_cmap_pp[,,,i] = apply(pp3_estaciones[,,,(i + 2*i - 2):(i+2*i)], c(1,2,3), mean)*30 # esta en mm/day
i = i + 1
}
# O
datos.obs = array(data = NA, dim = c(56, 76, 29, 4, 2)) # uso misma cantidad de años que los modelos
datos.obs[,,,,1] = prom_est_cpc_t[,,1:29,]
datos.obs[,,,,2] = prom_est_cmap_pp[2:57,2:77,1:29,] # este tenia + lats y lons por el grillado
########################## Cross Validation datos.obs ##########################
#
# para cada año tengo q tener promedio de todos los años menos ese año.
aux = diag(29)
aux[which(aux == 1)] = NA ; aux[which(aux == 0)] = 1
aux2 = array(data = 1, dim = c(56, 76, 29, 4, 29, 2))
aux2.obs = array(data = 1, dim = c(56, 76, 29, 4, 29, 2))
cv.obs = array(data = NA, dim = c(56, 76, 29, 4, 2)) # para las 4 base de datos, la 1era temp y las otras pp
for(i in 1:29){
aux2[,,i,,i,] = aux2[,,i,,i,]*aux[i,i] # como matriz identidad inversa con NA en la diagonal y 1 pero en 4 dimenciones.
aux2.obs[,,,,i,] = aux2[,,,,i,]*datos.obs
# promedio sacando cada año.
cv.obs[,,i,,] = apply(aux2.obs[,,,,i,], c(1,2,4,5), mean, na.rm = T)
}
### O'
Op = datos.obs - cv.obs
#### Apertura de los modelos ####
#-------------------------------------------------#
### Modelos. F(j,m) j años, m estaciones. ###
#-------------------------------------------------#
# necesito el array intermedio para crear sd que tiene la funcion mean_sd.
# modificada la funcion, devuelve lista que en las dim [[5]] = se encuetnra la temp y [[6]] la pp. h
# ESTAS LISTAS SON EL ENSAMBLE DE LOS MIEMBROS DE CADA MODELO --> OK
lon2 = read.table("lon2.txt")[,1]
lat2 = read.table("lat2.txt")[,1]
modelos = c("COLA-CCSM4", "GFDL-CM2p1", "GFDL-FLOR-A06", "GFDL-FLOR-B01", "NASA-GEOS5", "NCEP-CFSv2", "CMC-CanCM4i", "CMC-GEM-NEMO")
# uso misma denominacion que para las obserbaciones.
# esto es F
t.mods = array(data = NA, dim = c(56, 76, 29, 4, 8)) # recordar, los modelos 1982-2010 (29 años)
pp.mods = array(data = NA, dim = c(56, 76, 29, 4, 8))
for(i in 1:length(modelos)){
aux = mean_sd(modelos[i])
t.mods[,,,,i] = aux[[5]]
pp.mods[,,,,i] = aux[[6]]
}
########################## Cross Validation modelos ##########################
aux = diag(29)
aux[which(aux == 1)] = NA ; aux[which(aux == 0)] = 1
aux2 = array(data = 1, dim = c(56, 76, 29, 4, 8, 29))
aux3 = array(data = 1, dim = c(56, 76, 29, 4, 8, 29)) # T
aux4 = array(data = 1, dim = c(56, 76, 29, 4, 8, 29)) # PP
aux5 = array(data = NA, dim = c(56, 76, 29, 4, 8))
aux6 = array(data = NA, dim = c(56, 76, 29, 4, 8))
for(i in 1:29){
aux2[,,i,,,i] = aux2[,,i,,,i]*aux[i,i] # una especie de matriz identidad inversa con NA y 1 pero en 4 dim.
aux3[,,,,,i] = aux2[,,,,,i]*t.mods
aux4[,,,,,i] = aux2[,,,,,i]*pp.mods
# promedio sacando cada anio
#
aux5[,,i,,] = apply(aux3[,,,,,i], c(1, 2, 4, 5), mean, na.rm = T)
aux6[,,i,,] = apply(aux4[,,,,,i], c(1, 2, 4, 5), mean, na.rm = T)
}
t.Fp = t.mods - aux5
pp.Fp = pp.mods - aux6
#### AREAS ####
#----falta alguna? -----#
lats = list()
lats[[1]] = seq(which(lat2 == -13), which(lat2 == 2), by = 1); lats[[2]] = seq(which(lat2 == -16), which(lat2 == 4), by = 1)
lats[[3]] = seq(which(lat2 == -16), which(lat2 == 2), by = 1); lats[[4]] = seq(which(lat2 == -26), which(lat2 == -17), by = 1)
lats[[5]] = seq(which(lat2 == -39), which(lat2 == -24), by = 1)
lons = list()
lons[[1]] = seq(which(lon2 == 291), which(lon2 == 304), by = 1); lons[[2]] = seq(which(lon2 == 301), which(lon2 == 316), by = 1)
lons[[3]] = seq(which(lon2 == 313), which(lon2 == 326), by = 1); lons[[4]] = seq(which(lon2 == 308), which(lon2 == 321), by = 1)
lons[[5]] = seq(which(lon2 == 296), which(lon2 == 309), by = 1)
#### ACC ####
# haciendo igual q en desempmods...
t.Fp_ens = apply(t.Fp, c(1,2,3,4), mean, na.rm = T)
pp.Fp_ens = apply(pp.Fp, c(1,2,3,4), mean, na.rm = T)
acc_ens = array(data = NA, dim = c(29,4,5,2))
V = list()
V[[1]] = t.Fp_ens
V[[2]] = pp.Fp_ens
for(v in 1:2){
for(z in 1:5){
xp = Op[lons[[z]], lats[[z]],,,v]
xp_sp = apply(xp, c(3,4), mean, na.rm = T)
fp = V[[v]][lons[[z]], lats[[z]],,] # cada modelo
fp_sp = apply(fp, c(3,4), mean, na.rm = T)
aux.o = array(data = NA, dim = c(dim(xp),5,2))
aux.m = array(data = NA, dim = c(dim(fp),5,2))
for(a in 1:29){
aux.o[,,a,,z,v] = xp[,,a,] - xp_sp[a,]
aux.m[,,a,,z,v] = fp[,,a,] - fp_sp[a,]
n = length(xp[,1,1,1])*length(xp[1,,1,1])
num = apply(aux.o*aux.m, c(3,4), sum, na.rm = T)
den = n*sqrt((apply(aux.o**2, c(3,4), sum, na.rm = T)/n)*(apply(aux.m**2, c(3,4), sum, na.rm = T)/n))
acc_ens[,,z,v] = num/den
}
}
}
#prueba grafico
library(ggplot2)
rc = qt(p = 0.95,df = 29-1)/sqrt((29-1)+qt(p = 0.95,df = 29-1))
region = c("Amazonia", "South American Monsoon", "North-estern Brazil", "SACZ", "La Plata Basin")
region.fig = c("Am", "SAM", "NeB", "SACZ")
var.title = c("Temperatura", "Precipitación")
var = c("t", "pp")
for(v in 1:2){
for(z in 1:5){
aux = as.data.frame(acc_ens[,,z,v])
aux=cbind(aux, seq(1982, 2010))
colnames(aux) = c("MAM", "JJA", "SON", "DJF", "Años")
g = ggplot(aux, aes(x = Años))+theme_minimal()+
geom_line(aes(y = MAM, colour = "MAM"), size = 1) +
geom_line(aes(y = JJA, colour = "JJA"), size = 1) +
geom_line(aes(y = SON, colour = "SON"), size = 1) +
geom_line(aes(y = DJF, colour = "DJF"), size = 1) +
scale_colour_manual("",
breaks = c("MAM", "JJA", "SON", "DJF"),
values = c("yellow2", "royalblue", "green3", "orange2")) +
geom_hline(yintercept = rc, color = "grey", size = 1, alpha = 1) +
ggtitle(paste("ACC ", var.title[v], " - ", region[z], sep = "")) +
scale_y_continuous(limits = c(-1, 1), breaks = seq(-1, 1, by = 0.2)) +
scale_x_continuous(limits = c(1982, 2010), breaks = seq(1982, 2010, by = 2)) +
theme(axis.text.y = element_text(size = 14, color = "black"), axis.text.x = element_text(size = 14, color = "black"), axis.title.y = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"), axis.title.x = element_text(),
panel.border = element_rect(colour = "black", fill = NA, size = 1),
panel.ontop = F,
plot.title = element_text(hjust = 0.5, size = 18),
legend.position = "right", legend.key.width = unit(1, "cm"), legend.key.height = unit(2, "cm"), legend.text = element_text(size = 15))
ggsave(paste("/home/luciano.andrian/tesis/salidas/desemp_mods/ACC2/", var[v], ".ACC2_", region[z],".jpg",sep =""), plot = g, width = 30, height = 15 , units = "cm")
}
}
|
c107eeffe760aa843164251c400739eb42303656
|
79b935ef556d5b9748b69690275d929503a90cf6
|
/man/plot.leverage.ppm.Rd
|
64291bc56cf9264151504a40d3ed1df9bc3349d2
|
[] |
no_license
|
spatstat/spatstat.core
|
d0b94ed4f86a10fb0c9893b2d6d497183ece5708
|
6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70
|
refs/heads/master
| 2022-06-26T21:58:46.194519
| 2022-05-24T05:37:16
| 2022-05-24T05:37:16
| 77,811,657
| 6
| 10
| null | 2022-03-09T02:53:21
| 2017-01-02T04:54:22
|
R
|
UTF-8
|
R
| false
| false
| 4,378
|
rd
|
plot.leverage.ppm.Rd
|
\name{plot.leverage.ppm}
\alias{plot.leverage.ppm}
\alias{contour.leverage.ppm}
\alias{persp.leverage.ppm}
\title{
Plot Leverage Function
}
\description{
Generate a pixel image plot, or a contour plot, or a perspective plot,
of a leverage function that has been computed by \code{\link{leverage.ppm}}.
}
\usage{
\method{plot}{leverage.ppm}(x, \dots,
what=c("smooth", "nearest", "exact"),
showcut=TRUE,
args.cut=list(drawlabels=FALSE),
multiplot=TRUE)
\method{contour}{leverage.ppm}(x, \dots,
what=c("smooth", "nearest"),
showcut=TRUE,
args.cut=list(col=3, lwd=3, drawlabels=FALSE),
multiplot=TRUE)
\method{persp}{leverage.ppm}(x, \dots,
what=c("smooth", "nearest"),
main, zlab="leverage")
}
\arguments{
\item{x}{
Leverage function (object of class \code{"leverage.ppm"}) computed by
\code{\link{leverage.ppm}}.
}
\item{\dots}{
Arguments passed to \code{\link{plot.im}} or \code{\link{contour.im}}
or \code{\link{persp.im}} controlling the plot.
}
\item{what}{
Character string (partially matched)
specifying the values to be plotted. See Details.
}
\item{showcut}{
Logical. If \code{TRUE}, a contour line is plotted at the
level equal to the theoretical mean of the leverage.
}
\item{args.cut}{
Optional list of arguments passed to
\code{\link[graphics]{contour.default}} to control the
plotting of the contour line for the mean leverage.
}
\item{multiplot}{
Logical value indicating whether it is permissible to display
several plot panels.
}
\item{main}{
Optional main title. A character string or character vector.
}
\item{zlab}{
Label for the \eqn{z} axis. A character string.
}
}
\details{
These functions are the \code{plot}, \code{contour} and \code{persp} methods
for objects of class \code{"leverage.ppm"}.
Such objects are computed by the command \code{\link{leverage.ppm}}.
The \code{plot} method displays the leverage function
as a colour pixel image using \code{\link{plot.im}},
and draws a single contour line at the mean leverage value
using \code{\link{contour.default}}.
Use the argument \code{clipwin} to restrict the plot to a subset
of the full data.
The \code{contour} method displays the leverage function as a contour
plot, and also draws a single contour line at the mean leverage value,
using \code{\link{contour.im}}.
The \code{persp} method displays the leverage function as a surface
in perspective view, using \code{\link{persp.im}}.
Since the exact values of leverage are computed only
at a finite set of quadrature locations, there are several options for
these plots:
\describe{
\item{\code{what="smooth"}:}{
(the default) an image plot showing a smooth function,
obtained by applying kernel smoothing to the exact leverage values;
}
\item{\code{what="nearest"}:}{
an image plot showing a piecewise-constant function,
obtained by taking the exact leverage value at the nearest
quadrature point;
}
\item{\code{what="exact"}:}{
a symbol plot showing the exact values of leverage as
circles, centred at the quadrature points,
with diameters proportional to leverage.
}
}
The pixel images are already contained in the object \code{x}
and were computed by \code{\link{leverage.ppm}};
the resolution of these images is controlled
by arguments to \code{\link{leverage.ppm}}.
}
\value{
Same as for \code{\link{plot.im}}, \code{\link{contour.im}}
and \code{\link{persp.im}} respectively.
}
\references{
Baddeley, A., Chang, Y.M. and Song, Y. (2013)
Leverage and influence diagnostics for spatial point process models.
\emph{Scandinavian Journal of Statistics} \bold{40}, 86--104.
}
\author{
\spatstatAuthors.
}
\seealso{
\code{\link{leverage.ppm}}.
}
\examples{
if(offline <- !interactive()) op <- spatstat.options(npixel=32, ndummy.min=16)
X <- rpoispp(function(x,y) { exp(3+3*x) })
fit <- ppm(X ~x+y)
lef <- leverage(fit)
plot(lef)
contour(lef)
persp(lef)
if(offline) spatstat.options(op)
}
\keyword{spatial}
\keyword{models}
|
534eb7ce514af9e016d7b6b1dc9be0334cd1c416
|
55719b6df5677aaa6b459bdea645c53899421a9b
|
/projectB/projectB.R
|
c05e55940103cf1a0d60f99f2f52eb7f8ceedb67
|
[] |
no_license
|
manav003/ComprehensiveProject
|
bb62ca3f39eaef7ac600591446fe11ea18f64100
|
42074b57a59de8b170bdc430de134910f382773b
|
refs/heads/master
| 2022-06-11T13:07:41.268869
| 2020-05-08T23:54:36
| 2020-05-08T23:54:36
| 261,581,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,384
|
r
|
projectB.R
|
# R Studio API Code
library(rstudioapi)
setwd(dirname(getActiveDocumentContext()$path))
# Libraries
library(tidyverse)
library(rvest)
library(httr)
# Data Import and Cleaning
## READ ALL PAPERS IN, ONCE
#there are 240 results, 10 per page
allPapers <- list()
for (i in 1:24) {
j <- (i - 1)*10
link <- paste0("https://scholar.google.com/scholar?start=", j, "&q=%22covid-19%22+source:psychology&hl=en&as_sdt=0,48&as_vis=1")
allPapers[[i]] <- read_html(link)
Sys.sleep(5)
print(link)
}
## READ ALL NECESSARY INFO IN
allTitleText <- c()
allLinksText <- c()
allInfoText <- c()
for (i in 1:length(allPapers)){
allTitleNodes <- html_nodes(allPapers[[i]], ".gs_rt a")
allTitleText <- c(allTitleText, html_text(allTitleNodes))
allLinksText <- c(allLinksText, html_attr(allTitleNodes, "href"))
infoNodes <- html_nodes(allPapers[[i]], ".gs_a")
allInfoText <- c(allInfoText, html_text(infoNodes))
}
#The instructions ask for 4 columns, but it's asking for 5 different pieces of data (article titles, author lists, journal title, year and link to each article), so I'm assuming you actually want 5 columns
split <- str_split(allInfoText, "-", 3)
allAuthorsText <- c()
journalYear <- c()
for (i in 1:length(split)) {
allAuthorsText[i] <- split[[i]][1]
journalYear[i] <- split[[i]][2]
}
journalName <- c()
year <- c()
for (i in 1:length(journalYear)) {
if (str_detect(journalYear[i], pattern = ", [0-9]{4}", negate = FALSE)) { #if both year and journal there
tempSplit <- str_split(journalYear[i], ", [0-9]{4}")
journalName[i] <- tempSplit[[1]][1]
year[i] <- str_extract(journalYear[i], "([0-9]{4})")
} else {
if(str_detect(journalYear[i], pattern = "[0-9]{4}", negate = FALSE)) { #if only year
year[i] <- journalYear[i]
} else { #if only journal
journalName[i] <- journalYear[i]
}
}
}
df <- tibble("ArticleTitle" = allTitleText, "AuthorList" = allAuthorsText, "JournalTitle" = journalName, "Year" = year, "Link" = allLinksText)
# Visualization
topJournals <- df %>%
group_by(JournalTitle) %>%
count() %>%
drop_na() %>%
arrange(desc(n))
topJournals <- topJournals[1:10,]
plot <- df %>%
right_join(topJournals, by = "JournalTitle") %>%
mutate(Year = as.numeric(Year)) %>%
select(Year) %>%
count(Year) %>%
ggplot(aes(x = Year, y = n)) + geom_point()
plot
|
4f57b0b2bb89272fb342da4b0d60ae50f49c6133
|
ce7998c8db9a3a3dc47aaffee3351b5f86f8b596
|
/man/find_filepath.Rd
|
df023ec7c501bcd1c5afd0ea72b0c8c0c5c8566d
|
[] |
no_license
|
WerthPADOH/sasconfigger
|
c2f0c1a5b62fa1dfebe39bcb430d41aa0a7ad573
|
f92cd183ffaeeeba38dc3c461bd655fb13d8cbac
|
refs/heads/master
| 2021-01-11T04:19:46.687203
| 2016-10-17T20:45:26
| 2016-10-17T20:45:26
| 71,179,838
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 475
|
rd
|
find_filepath.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find_filepath.R
\name{find_filepath}
\alias{find_filepath}
\title{Find where file paths occur in text
Finds the start and end positions of all file paths occurring in a text.}
\usage{
find_filepath(x)
}
\arguments{
\item{x}{Character vector}
}
\value{
List of matrices
}
\description{
Find where file paths occur in text
Finds the start and end positions of all file paths occurring in a text.
}
|
dd9cdb9eea1fcd46614ebb3e713113407a296be2
|
0cc86ecac7e9cb23cb97512ba4d7f5b81d48687e
|
/RNAseq/normalize_epic_arrays.R
|
6b77e990fc1b3ba2d406c5d67b99fd525fb78a06
|
[] |
no_license
|
thangnx1012/RNAseq_Annalysis
|
d677862bd31a9187bd6ecd706a579d6577785e6c
|
085f44d1bc7bbca43d7e19a8dc37ca58a8ea9d0a
|
refs/heads/main
| 2023-08-27T20:33:45.453219
| 2021-11-14T16:46:28
| 2021-11-14T16:46:28
| 414,225,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,957
|
r
|
normalize_epic_arrays.R
|
#### This analysis is for analysis of DNA EPIC array analysis. NOTE!!! There is not an annotation available for hg38, so the genomic coordinates are hg19.
setwd("Z:/Wendy_Kellner/DNMT1/DNMT1_AML_Epic_methylation_NYU")
library(limma)
library(minfi)
library(IlluminaHumanMethylationEPICanno.ilm10b2.hg19)
library(IlluminaHumanMethylation450kmanifest)
library(RColorBrewer)
library(missMethyl)
library(matrixStats)
library(minfiData)
library(Gviz)
library(DMRcate)
library(stringr)
library(ggplot2)
######################## Reading in files ###########################
###Read in the sample sheet with the phenotypic data
targets <- read.metharray.sheet(base = "path_to_idat")
RGset <- read.metharray.exp(targets = targets, force = TRUE)
targets$ID <- paste(targets$CellLine,targets$Dose,targets$Treatment,targets$Time,targets$Details,sep=".")
sampleNames(RGset) <- targets$ID
################Annotate data########################################
annotation(RGset)
RGset@annotation=c(array='IlluminaHumanMethylationEPIC', annotation='ilm10b2.hg19')
######################### Remove poor qualtiy samples #################################
detP <- detectionP(RGset)
keep <- colMeans(detP) < 0.05
RGset <- RGset[,keep]
########### Processing and normalization of data ###########################
mSetSq <- preprocessSWAN(RGset)
MSet.raw <- preprocessFunnorm(RGset)
########### make the violin plot for publication ###########################
var<-data.frame(getBeta(mSetSq))
subs<-var[(1:100000),]
subs2<-(rep(c("DMSO","GSK762"),7))
subs<-rbind(subs2,subs)
row.names(subs[1,])<-c("Treatment")
df.m <- reshape2::melt(subs, id=subs[1,])
p<-ggplot(df.m, aes(x = variable, y = value),fill=subs) + geom_violin()
+scale_fill_manual(values=c("blue","red"))
p + stat_summary(aes(group=1),fun.y=mean, geom="point", size=10,shape=95,col=c("blue","red","blue","red","blue","red","blue","red","blue","red","blue","red","blue","red"))
|
bd35396a2d6b9646684310d81d35f0566a8fef15
|
7fd1e5f78328c67f0644bf7dafe7c308613dcc29
|
/R/group_project_R.R
|
4efd07e3436c8a30ae81b53ada7c6a9631d66386
|
[] |
no_license
|
yuywang1227/Stat-506-Project
|
9be3243270bcd11a797644a655f7cc1b296b714f
|
4aea873c43d52017cc3438ca2c8a5ae998067ec6
|
refs/heads/master
| 2020-09-25T23:17:08.755093
| 2019-12-12T07:11:12
| 2019-12-12T07:11:12
| 226,110,436
| 0
| 1
| null | 2019-12-10T13:50:07
| 2019-12-05T13:37:52
|
HTML
|
UTF-8
|
R
| false
| false
| 2,661
|
r
|
group_project_R.R
|
## Group project by group 6
## Stats 506, Fall 2019
## Group Member: Yehao Zhang, Yuying Wang
##
## In this project, the team is going to apply statistical methods to answer the following question:
##
## Do people with higher carbohydrate intake feel more sleepy during the day?
##
## Author: Yehao Zhang
## Updated: December 11, 2019
# set local directory
setwd("C:/Users/zhang/Desktop/fall 2019/git/506/project")
# load packages
library(tidyverse)
library(data.table)
library(foreign)
library(MASS)
## Data
# Import data
sleep_disorder <- setDT(read.xport("SLQ_I.XPT"))
tot_nutrition_d1 <-setDT(read.xport("DR1TOT_I.XPT"))
demo <- setDT(read.xport("DEMO_I.XPT"))
# Data cleaning
sf <- c("never","rarely","sometimes","often","almost always")
sleep_disorder = sleep_disorder[SLQ120 <= 4, .(respondent_ID = SEQN, sleep_hr = SLD012, sleepy_freq = factor(SLQ120,levels=0:4,labels=sf))]
tot_nutrition = tot_nutrition_d1[, .(respondent_ID = SEQN, energy = DR1TKCAL, CHO = DR1TCARB)
][, .(p_CHO = CHO*4/energy), by = respondent_ID
][, CHO_level := "suggested range"
][p_CHO <= 0.45, CHO_level := "below range(<=0.45)"
][p_CHO >= 0.65, CHO_level := "above range(>=0.65)"
][, CHO_level := factor(CHO_level)
]
demo = demo[RIDAGEYR >= 5, .(respondent_ID = SEQN, six_month = factor(RIDEXMON), gender = factor(RIAGENDR,levels=c(1,2),labels=c("male","female")), age = RIDAGEYR)]
# merge these three datasets
sleep = merge(sleep_disorder, tot_nutrition, by = "respondent_ID") %>%
merge(. , demo, by = "respondent_ID") %>%
na.omit(.)
sleep # the cleaned data
## Ordered logistic regression
# fit ordered logit model
m <- polr(sleepy_freq ~ sleep_hr + CHO_level + six_month + gender + age, data = sleep, Hess = TRUE)
# brant test
#install.packages("brant")
library(brant)
brant(m)
# view a summary of the model
summary(m)
# store the coefficientts
mcoef <- coef(summary(m))
# calculate p values
p <- pnorm(abs(mcoef[, "t value"]), lower.tail = FALSE) *2
mcoef = cbind(mcoef, "pvalue" = p)
mcoef
# 95% CI
(ci <- confint(m))
ci
# Interpretation: e.g. for CHO_level, we would say that for a one unit increase in CHO_level (i.e., going from 0 to 1),
# we expect a 0.11 increase in the expected value of apply on the log odds scale, given all of the other variables in the model are held constant.
# odds ratios
exp(coef(m))
# odds ratios & CI
ci = exp(cbind(OR = coef(m), ci))
ci
|
02b8da72754f1ba18e708bbaa61709dc0918c232
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/ICcalib/man/CalcNpmleRSP.Rd
|
ea05a3718760b1a4bb704e597466d85aec10942a
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,998
|
rd
|
CalcNpmleRSP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CalcNpmleRSP.R
\name{CalcNpmleRSP}
\alias{CalcNpmleRSP}
\title{Calculating the probabilities of positive binary exposure status at a given time point using a nonparametric risk-set calibration models}
\usage{
CalcNpmleRSP(w, w.res, point, obs.tm)
}
\arguments{
\item{w}{A matrix of time points when measurements on the binary covariate were obtained.}
\item{w.res}{A matrix of measurement results of the binary covariate. Each measurement corresponds to the time points in \code{w}}
\item{point}{The time point at which the probabilities are estimated}
\item{obs.tm}{Vector of observed main event time or censoring time}
}
\value{
A vector of estimated probabilities of positive exposure status at time \code{point}.
}
\description{
For a given time point, calculate the probability of positive exposure value for multiple observations (participants).
The function first fits the nonparametric risk-set calibration models at each main event time point and then calculates the probabilities
of positive binary exposure status.
}
\details{
This function calculates the NPMLE at each main event time point and then provides the estimated probabilities for positive
exposure status at time \code{point}.
}
\examples{
# Simulate data set
sim.data <- ICcalib:::SimCoxIntervalCensSingle(n.sample = 200, lambda = 0.1,
alpha = 0.25, beta0 = log(0.5),
mu = 0.2, n.points = 2,
weib.shape = 1, weib.scale = 2)
# Calculate the conditional probabilities of binary covariate=1 at time one
# Unlike CalcNpmle, CalcNpmleRSP includes the calibration model fitting
probs <- CalcNpmleRSP(w = sim.data$w, w.res = sim.data$w.res, point = 1,
obs.tm = sim.data$obs.tm)
summary(probs)
}
\seealso{
\code{\link[icenReg]{ic_np}}
}
|
da840f36178fcdd9def7cbf666a3187e3b86c530
|
315af6191046d18fa8856566add85b1586b052f4
|
/Code/Environmental Factors/environ_flux_data.R
|
3363bebeec26b2ce1702a266fbde2f62543bd716
|
[] |
no_license
|
twilli2/n2oflux
|
40e1fbf12919b33d366800eacec62049d0347f97
|
73f0e143bfab81f458f7e1c2d02d9df228ed4226
|
refs/heads/master
| 2021-01-03T14:43:57.948139
| 2020-02-12T21:01:23
| 2020-02-12T21:01:23
| 240,113,520
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,347
|
r
|
environ_flux_data.R
|
library(tidyr)
flux_data$date <- as.Date(flux_data$date)
flux_env <- left_join(flux_data, joined_env_data, by = c("date","field"))
summary(flux_env)
cp_n2o <- filter(flux_env, compound == 'n2o', plot == 'C'|plot == 'P') %>%
select_all() %>%
group_by(date, field, plot) %>%
summarize(mean_flux = mean(flux, na.rm = T), median_flux = median(flux,na.rm = T), mean_temp = mean(max_temp_5, na.rm = T),
mean_precip = mean(total_precip, na.rm = T), mean_moist = mean(avg_moist,na.rm = T)) %>%
group_by(field,plot) %>%
summarize(mean_flux = mean(mean_flux, na.rm = T), median_flux = median(median_flux, na.rm = T),temp = mean(mean_temp, na.rm = T), precip = mean(mean_precip,na.rm = T), moist = mean(mean_moist, na.rm = T))
cp_n2o
a <- flux_data %>%
filter(compound == 'co2') %>%
group_by(field, plot) %>%
summarize(median = median(flux, na.rm = T), mean = mean(flux, na.rm = T))
b <- flux_data %>%
filter(compound == 'n2o', plot == "C"|plot == "P")
c <- flux_data %>%
filter(compound == 'n2o', plot != "C" | plot != "P")
%>%
group_by(field, plot) %>%
mutate(sd = sd(flux, na.rm = T)) %>%
summarize(median = median(flux, na.rm = T), mean = mean(flux, na.rm = T), sd = sd(flux))
ggplot(b) +
geom_boxplot(aes(x = plot, y = flux), outlier.shape = NA, notch = T)+
coord_cartesian(ylim = c(-3, 6))+
facet_grid(~field)
ggplot(c) +
geom_boxplot(aes(x = plot, y = flux), outlier.shape = NA, notch = T)+
coord_cartesian(ylim = c(-3, 6))+
facet_grid(~field)
ggplot(joined_env_data) +
geom_line(aes(x = date, y = total_precip)) +
geom_smooth(aes(x= date, y = max_temp_5)) +
facet_wrap(~field)
flux_data$date <- as.Date(flux_data$date)
cp_n2o_sum$date <- as.Date(cp_n2o_sum$date)
flux_env<- left_join(cp_n2o_sum, joined_env_data, by = c("date", "field"))
ggplot(flux_env) +
geom_line(aes(x = date, y = total_precip)) +
geom_line(aes(x = date, y = mean_flux, color = plot)) +
geom_smooth(aes(x = date, y = max_temp_5)) +
facet_wrap(~field)
flux_env <- left_join(p, cp_n2o, by = c("date", "field"))
flux_env
ggplot(flux_env) +
geom_line(aes(x = date, y = total_precip)) +
geom_line(aes(x = date, y = mean_flux, color = plot), na.rm = T, size = 1) +
geom_smooth(aes(x = date, y = max_temp_5)) +
geom_point(aes(x = date, y = soiltemp), color = "red") +
facet_wrap(~field)
|
9f1d644915e3adfc0850d0df548c7d7a9744596c
|
dab05df8a6ddf8947638c2bc2c3b5946d13771e2
|
/R/production_possibility_frontier.R
|
5e1f7e41877f2462a612b06d6402e9e8a0fbe6f4
|
[
"MIT"
] |
permissive
|
tpemartin/econR
|
2011047b7ef100b27fffd99148a7698ce7f99930
|
5df4fd5bf61b417b9860b3efc7ff20339e694fe4
|
refs/heads/master
| 2023-09-05T03:34:20.354596
| 2021-11-23T12:22:42
| 2021-11-23T12:22:42
| 335,521,237
| 0
| 4
| null | 2021-03-17T07:18:16
| 2021-02-03T05:48:23
|
HTML
|
UTF-8
|
R
| false
| false
| 1,605
|
r
|
production_possibility_frontier.R
|
#' Construct PPF
#'
#' @param endowment_L A number.
#' @param produce_x A production function of L input
#' @param produce_y A production function of L input
#'
#' @return An environment with above 3 input arguments, a plot_PPF function and an update_endowmentL function
#' @export
#'
#' @examples
#' produce_x <- function(L) 3*L
#' produce_y <- function(L) 4*L
#'
#' PPF_A <- get_PPF(20, produce_x, produce_y)
#' PPF_A$plot_PPF()
#' PPF_A$update_endowmentL(30)
#' PPF_A$plot_PPF()
get_PPF <- function(endowment_L, produce_x, produce_y){
require(ggplot2)
PPFenv <- new.env()
PPFenv$endowment_L <- endowment_L
PPFenv$produce_x <- produce_x
PPFenv$produce_y <- produce_y
PPFenv$`.yield_xyTradeoff` <- function(){
Lx <- seq(0, PPFenv$endowment_L, length.out=102)
Lx <- Lx[-c(1, length(Lx))]
Ly <- PPFenv$endowment_L - Lx
PPFenv$xy_tradeoff <-
data.frame(
x = PPFenv$produce_x(Lx),
y = PPFenv$produce_y(Ly)
)
}
PPFenv$`.yield_xyTradeoff`()
PPFenv$plot_PPF <- function(...){
require(ggplot2)
ggplot()+
geom_line(
data=PPFenv$xy_tradeoff,
mapping=aes(x=x,y=y)
)
}
PPFenv$update_endowmentL <- function(endowment_L){
PPFenv$endowment_L <- endowment_L
xyOld <- PPFenv$xy_tradeoff
PPFenv$`.yield_xyTradeoff`()
PPFenv$plot_PPF <- function(...){
require(ggplot2)
ggplot()+
geom_line(
data=PPFenv$xy_tradeoff,
mapping=aes(x=x,y=y), color="red"
)+
geom_line(
data=xyOld,
mapping=aes(x=x,y=y)
)
}
}
PPFenv
}
|
73814a3efd6989366d7782c9131a65c593c4c91e
|
543c541ff5cf3342f32480bd2958770dcaa3ad63
|
/US-EU-Soft-Commodity/R Code/First Differenced VAR.R
|
e438a78caf4fef6262e705174e65257f08fb5e6c
|
[] |
no_license
|
jzt5132/Time-Series-Stuff
|
c439deecddd8aea573d1f6d85ad7292956ce5935
|
d4f6f0a69900fcdd0c94a3907705aa08c7503e39
|
refs/heads/master
| 2016-09-08T01:50:16.127520
| 2015-09-16T08:43:00
| 2015-09-16T08:43:00
| 41,987,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 497
|
r
|
First Differenced VAR.R
|
###First Differenced VAR###
#This program determines the bivariate first difference VAR of all time serieses
#and conduct Granger Causlity Test on it. The output is returned in csl matrix.
csl <- matrix(data = NA,nrow = 7,ncol = 7)
for (i in 1:6)
{ for (j in (i+1):7)
{
v <- VAR(cbind(diff(a[,i]),diff(a[,j])),p = 2)
csl[i,j] <- causality(v)$Granger$p.value
}}
for (j in 1:6)
{ for (i in (j+1):7)
{
v <- VAR(cbind(diff(a[,i]),diff(a[,j])),p = 2)
csl[i,j] <- causality(v)$Granger$p.value
}}
csl
|
2ec4c593b753403ebcc8a53b79aa5faaf2018822
|
db4118bc4c3fa27bce4c2d5039facbb9072479c0
|
/coevo/h5_n1/h5_n1.R
|
6f668d0d9e5ab96f132872c6bf7df8288bbc101a
|
[] |
no_license
|
yaotli/Packaging_Type
|
166d4a4b6b8d20daab88612bc497e02d9e8fc038
|
4dba547aed7105c13f5bf4042c121f2289081ae1
|
refs/heads/master
| 2021-01-23T01:26:21.502910
| 2019-06-17T03:44:33
| 2019-06-17T03:44:33
| 85,908,055
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,377
|
r
|
h5_n1.R
|
source( "./function.coevo.R" )
#source( "./ha_classi.R" )
source( "./f.aa_recon.R")
require(ggtree)
require(ape)
require(tidyverse)
require(stringr)
H5_treefile = "./gsgd/processed/tree/raxml_c2344_2657/raxml_pH5_2657.tre"
N1_trefile = "./raw_data/processed/tree/fasttree_pN1_4696_.tre"
H5_seq = "./gsgd/processed/pH5_c2344_2657.fasta"
N1_seq = "./raw_data/processed/pN1_4696_trim2.fasta"
# pairing H5-N1 -----------------------------------------------------
n1_tre <- read.nexus( N1_trefile )
n1_root <- length( n1_tre$tip.label ) + 1
n1_dismx <- dist.nodes( n1_tre )
n1_table <- fortify( n1_tre )
n1_i <- grep( paste0( gsub( "^[0-9A-Za-z]+|\\|", "", ha_mdr.1$id ), collapse = "|" ), gsub( "\\|", "", n1_table$label ) )
# length( ha_mdr.1$id) == length(n1_i)
n1_id <- gsub( "'", "", n1_table$label[ n1_i ] )
n1_mdr <- treeMDR( n1_i, n1_dismx )
ha_na <- match( gsub( "^[0-9A-Za-z]+|\\|", "", n1_id ), gsub( "^[0-9A-Za-z]+|\\|", "", ha_mdr.1$id ) )
n1_mdr$ix = n1_i
n1_mdr$group = ha_mdr.1$group[ ha_na ]
n1_mdr$id = gsub( "^[0-9A-Za-z]+|\\|", "", n1_id )
n1_mdr$type = "N"
n1_mdr$sero = "H5N1"
ha_mdr.1$type = "H"
# # V1
# ggplot( n1_mdr, aes( x = Dim_1, y = Dim_2, label = id ) ) + geom_point( aes(color = group), alpha = 0.5, size = 5)
#
# # V2
# ggplot( rbind( ha_mdr.1, n1_mdr ), aes( x = Dim_1, y = Dim_2, label = id ) ) +
# geom_point( aes(color = group, alpha = type ), size = 5) +
# geom_line( aes(group = id), size = 0.1) +
# geom_rect( aes( xmin = h5n1_g1[1], xmax = h5n1_g1[2], ymin = h5n1_g1[3], ymax = h5n1_g1[4] ), inherit.aes = FALSE, color = "red", fill = NA) +
# geom_rect( aes( xmin = h5n1_g2[1], xmax = h5n1_g2[2], ymin = h5n1_g2[3], ymax = h5n1_g2[4] ), inherit.aes = FALSE, color = "red", fill = NA)
# # coord_cartesian( xlim = c(0, 0.05), ylim = c(0, 0.02) ) +
# # geom_text( aes(alpha = type), size = 2, vjust = 1) +
# # scale_y_continuous( limits = c( -0.1, 0.2) )
#
# # V3
# N1_trein = treeio::read.nexus( N1_trefile )
# N1_tredf = fortify( N1_trein )
# N1_tredf$shape = NA
# N1_tredf$group = NA
#
# N1_tredf$shape[ n1_mdr$ix ] = 1
# ggtree( N1_trein, right = TRUE ) %<+% N1_tredf + geom_tippoint( aes( shape = I(shape) ), color = "red", size = 5, alpha = 0.5 )
#
# # V4
# N1_tredf$group[ n1_mdr$ix ] = n1_mdr$group
# ggtree( N1_trein, right = TRUE ) %<+% N1_tredf + geom_tippoint( aes( shape = I(shape), color = group ), size = 5, alpha = 0.5 )
#
# # V5
# N1_tredf$shape[ g1_out$ix ] = 19
# ggtree( N1_trein, right = TRUE ) %<+% N1_tredf + geom_tippoint( aes( shape = I(shape), color = group ), size = 5 )
# grouping
h5n1_g1 <- c( 0, 0.01, 0.0075, 0.0175 )
h5n1_g2 <- c( -0.075, -0.025, -0.03, -0.01 )
# extract 1
g1_out =
n1_mdr %>%
filter( Dim_1 > h5n1_g1[1] & Dim_1 < h5n1_g1[2] ) %>%
filter( Dim_2 > h5n1_g1[3] & Dim_2 < h5n1_g1[4] ) %>%
filter( group == 1 ) %>%
select( ix )
# extract 2
g2_out =
n1_mdr %>%
filter( Dim_1 > h5n1_g2[1] & Dim_1 < h5n1_g2[2] ) %>%
filter( Dim_2 > h5n1_g2[3] & Dim_2 < h5n1_g2[4] ) %>%
filter( group == 2 ) %>%
select( ix )
# output
#
g1_na <- gsub( "'", "", n1_table$label )[ g1_out$ix ]
g1_ha <- gsub( "'", "", ha_table$label[ ha_mdr.1$ix[ ha_na[ match( g1_out$ix, n1_mdr$ix ) ] ] ] )
leafEx( H5_seq, g1_ha, seq.out = "./h5_n1/pHA_h5n1_g1.fasta")
leafEx( N1_seq, g1_na, seq.out = "./h5_n1/pNA_h5n1_g1.fasta" )
#
g2_na <- gsub( "'", "", n1_table$label )[ g2_out$ix ]
g2_ha <- gsub( "'", "", ha_table$label[ ha_mdr.1$ix[ ha_na[ match( g2_out$ix, n1_mdr$ix ) ] ] ] )
leafEx( H5_seq, g2_ha, seq.out = "./h5_n1/pHA_h5n1_g2.fasta")
leafEx( N1_seq, g2_na, seq.out = "./h5_n1/pNA_h5n1_g2.fasta")
# aa reconstruction -----------------------------------------------------
# sam1
.aa_recon( folderdir = "./h5_n1/dS/h5n1_g1_h5/" )
.aa_recon( folderdir = "./h5_n1/dS/h5n1_g1_n1/" )
# sam2
.aa_recon( folderdir = "./h5_n1/dS/h5n1_g2_h5/" )
.aa_recon( folderdir = "./h5_n1/dS/h5n1_g2_n1/" )
#
# 2nd samples -----------------------------------------------------
.root_seq( seqfile = "./h5_n1/pHA_h5n1_g2.fasta", H5_treefile, H5_seq )
.root_seq( seqfile = "./h5_n1/pNA_h5n1_g2.fasta", N1_trefile, N1_seq )
# aa reconstruction -----------------------------------------------------
# sam2
.aa_recon( folderdir = "./h5_n1/dS_r//h5n1_g2_h5/" )
.aa_recon( folderdir = "./h5_n1/dS_r/h5n1_g2_n1/" )
|
1caddf117202cff05e88e106fa28878e0935a68d
|
b39713726afbf52fd03c8b3470e37f9c52e2085f
|
/plot2.R
|
42e35f3c05e73985fb2f738e1234fb6947d16de7
|
[] |
no_license
|
tesszty/ExData_Plotting1
|
f76eb3a192fa358a3205985a57c108bba4cb720e
|
d57ccc3990f84eb3f11cf3a0a3593165ef7f11e5
|
refs/heads/master
| 2020-12-30T22:57:45.442517
| 2016-03-27T09:32:12
| 2016-03-27T09:32:12
| 54,650,810
| 0
| 0
| null | 2016-03-24T15:03:29
| 2016-03-24T15:03:29
| null |
UTF-8
|
R
| false
| false
| 179
|
r
|
plot2.R
|
with(mydata,plot(Time,Global_active_power,ylab="Global Active Power (kilowatts)",xlab="",type="o",pch=".")
)
dev.copy(png,'plot2.png', width = 480, height = 480)
dev.off()
|
bfa1d89bd678232ed98994e16cc1c41e1400abc8
|
c238ecf25d51558f4e57533422810b05f4c9bb6b
|
/plot4.R
|
94f7e1ab632187b784aa93f46d4d8b047a3532f8
|
[] |
no_license
|
franciscoalvaro/ExData_Plotting1
|
2c1052e6f93870e87e64cf1c8727fa1471f97872
|
0446558c7658c3bec99a64f6cc7daebabb0294f9
|
refs/heads/master
| 2021-01-15T22:33:53.546223
| 2015-02-08T21:06:24
| 2015-02-08T21:06:24
| 30,434,735
| 0
| 0
| null | 2015-02-06T21:55:27
| 2015-02-06T21:55:26
| null |
UTF-8
|
R
| false
| false
| 2,504
|
r
|
plot4.R
|
library(lubridate)
mydata <- read.table("household_power_consumption.txt", header=TRUE,sep=";")
par(mfrow = c(2, 2))
selection<-c("Global_active_power","Date","Time")
plot1<-mydata[selection]
plot2<-plot1[which((plot1$Date == "1/2/2007") | (plot1$Date == "2/2/2007")),]
plot2$DateTime <- strptime(paste(plot2$Date, plot2$Time), "%d/%m/%Y %H:%M:%S")
plot(plot2$DateTime,as.numeric(levels(plot2$Global_active_power))[plot2$Global_active_power], type = "l", lty = "solid",ylab="Global Active Power (kilowatts)",xlab="")
selection1<-c("Voltage","Date","Time")
plotVoltage1<-mydata[selection1]
plotVoltage2<-plotVoltage1[which((plotVoltage1$Date == "1/2/2007") | (plotVoltage1$Date == "2/2/2007")),]
plotVoltage2$DateTime <- strptime(paste(plotVoltage2$Date, plotVoltage2$Time), "%d/%m/%Y %H:%M:%S")
plot(plotVoltage2$DateTime,as.numeric(levels(plotVoltage2$Voltage))[plotVoltage2$Voltage], type = "l", lty = "solid",ylab="Voltage",xlab="datetime")
selection<-c("Sub_metering_1","Sub_metering_2","Sub_metering_3","Date","Time")
plotSubmetering<-mydata[selection]
plotSubmetering<-plotSubmetering[which((plotSubmetering$Date == "1/2/2007") | (plotSubmetering$Date == "2/2/2007")),]
plotSubmetering$DateTime <- strptime(paste(plotSubmetering$Date, plotSubmetering$Time), "%d/%m/%Y %H:%M:%S")
plot(plotSubmetering$DateTime,as.numeric(levels(plotSubmetering$Sub_metering_1))[plotSubmetering$Sub_metering_1], type = "l", ylim=c(0,40),lty = "solid",ylab="Energy Submetering",xlab="")
par(new=TRUE)
plot(plotSubmetering$DateTime,as.numeric(levels(plotSubmetering$Sub_metering_2))[plotSubmetering$Sub_metering_2], type = "l", ylim=c(0,40),lty = "solid",col = "red",ylab="Energy Submetering",xlab="")
par(new=TRUE)
plot(plotSubmetering$DateTime,as.numeric(plotSubmetering$Sub_metering_3), type = "l", ylim=c(0,40),lty = "solid",col = "blue",ylab="Energy Submetering",xlab="")
legend("topright", pch = 45, col = c("black","red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
selection<-c("Global_reactive_power","Date","Time")
plotReactive1<-mydata[selection]
plotReactive2<-plotReactive1[which((plotReactive1$Date == "1/2/2007") | (plotReactive1$Date == "2/2/2007")),]
plotReactive2$DateTime <- strptime(paste(plotReactive2$Date, plotReactive2$Time), "%d/%m/%Y %H:%M:%S")
plot(plotReactive2$DateTime,as.numeric(levels(plotReactive2$Global_reactive_power))[plotReactive2$Global_reactive_power], type = "l", lty = "solid",ylab="Global_reactive_power",xlab="datetime")
|
31fbe2551024809d6bc29746d2c1322d0e77bfc3
|
6c800fc94df87bac4cd11bbe910bf483b85f6871
|
/helpers/VisualMarketsTheme.R
|
078cbd37bb86a8b5b48f5111d8cc7c79f19c5e16
|
[] |
no_license
|
visualmarkets/visualmarkets
|
f77141fb9aad92960e0898b228473f044e0d7f93
|
e586058813c6fa65f7aa53cc30737cf98004fc23
|
refs/heads/master
| 2020-04-02T03:42:17.618212
| 2019-01-25T00:21:14
| 2019-01-25T00:21:14
| 153,980,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,923
|
r
|
VisualMarketsTheme.R
|
hc_theme_vm <-
function (...) {
theme <-
list(colors = c("#6794a7", "#014d64", "#76c0c1", "#01a2d9", "#7ad2f6", "#00887d", "#adadad", "#7bd3f6", "#7c260b", "#ee8f71", "#76c0c1", "#a18376"),
chart = list(backgroundColor = "#ffffff",
style = list(fontFamily = "Droid Sans",
color = "#3C3C3C")
),
title = list(align = "left", style = list(fontWeight = "bold")),
subtitle = list(align = "left"),
yAxis = list(gridLineColor = "#d5e4eb",
lineColor = "#d5e4eb",
minorGridLineColor = "#d5e4eb",
tickColor = "#d5e4eb",
tickWidth = 1,
title = list(style = list(color = "#A0A0A3"))),
tooltip = list(backgroundColor = "#FFFFFF", borderColor = "#76c0c1",
style = list(color = "#000000")),
legend = list(itemStyle = list(color = "#3C3C3C"),
itemHiddenStyle = list(color = "#606063")),
credits = list(style = list(color = "#666")),
labels = list(style = list(color = "#D7D7D8")),
drilldown = list(activeAxisLabelStyle = list(color = "#F0F0F3"),
activeDataLabelStyle = list(color = "#F0F0F3")),
navigation = list(buttonOptions = list(symbolStroke = "#DDDDDD",
theme = list(fill = "#505053"))),
legendBackgroundColor = "rgba(0, 0, 0, 0.5)",
background2 = "#505053",
dataLabelsColor = "#B0B0B3",
textColor = "#C0C0C0",
contrastTextColor = "#F0F0F3",
maskColor = "rgba(255,255,255,0.3)")
theme <- structure(theme, class = "hc_theme")
if (length(list(...)) > 0) {
theme <- hc_theme_merge(theme, hc_theme(...))
}
theme
}
|
3606bcdc3f2bcb3b1324a160915584563dfc7384
|
9c90c51d76a54580b67c6a6d8292facc693322b0
|
/results/TEplot.R
|
55001968ddd4388e84c9ae82ba482ce266133d3f
|
[] |
no_license
|
altingia/REpipe
|
7dd8fde51ab06989507c3c886cf56fc00490c7a6
|
a0dc876ece9f50a3585ef84658f4baad2a1837d1
|
refs/heads/master
| 2021-09-15T14:44:01.936941
| 2018-06-04T19:18:27
| 2018-06-04T19:18:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 431
|
r
|
TEplot.R
|
##PLOTTING RESULTS FROM MULTIPLE SPECIES (on JeanLuc)
setwd("~/Copy/TAXON/results/combine")
table <- read.csv(file="REpipeResults.csv")
#create total read column
rawdata <- transform(rawdata, totalreads = mappedreads + unmappedreads)
#create nuclear reads column (remove organellar)
rawdata <- transform(rawdata, nucreads = totalreads - orgreads)
#plotting
attach(table)
plot(totalreads, contigs)
plot(totalreads, repeatreads)
|
c86d7db093b73a6906bcb869a7f028fb4a1858cd
|
20bfcff74f158557d50f1293c8f70404ece0d5a5
|
/glmPR/R/RcppExports.R
|
60276ae70fc1d2aeaf0cd283824eac5d948690b6
|
[] |
no_license
|
Xia-Zhang/Poisson-Regression
|
76d047ccae6300841906929f5cfc875b4ab9258b
|
82ed7237db8cbade82b1dcf3cc36a40cbec0e2a0
|
refs/heads/master
| 2021-01-18T07:26:29.615945
| 2017-05-11T15:37:48
| 2017-05-11T15:37:48
| 84,288,908
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 251
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
glmPR <- function(X, y, lambda = 0.5, threads = 4L) {
.Call('glmPR_glmPR', PACKAGE = 'glmPR', X, y, lambda, threads)
}
|
7e101a34e1ab22bc1658537642d2b7800b50ee1c
|
e835f60e7ad4be41d40293d58e157b5689ae9525
|
/cachematrix.R
|
600edfa7341b4801ca229d9893ca1b661104e067
|
[] |
no_license
|
GutsIkari/ProgrammingAssignment2
|
01e076c9526b993f2b78cb6d53db9d82e4a09cfc
|
1a44769dc1ffe6a30398b57f4b91db697e8202fe
|
refs/heads/master
| 2021-01-18T11:37:08.751676
| 2015-06-16T13:08:06
| 2015-06-16T13:08:06
| 37,523,162
| 0
| 0
| null | 2015-06-16T10:20:25
| 2015-06-16T10:20:25
| null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
cachematrix.R
|
## The purpose of makeCacheMatrix() is to be able to
## produce a matrix which is able to cache it's own
## inverse and to define functions which will allow
## cacheSolve() to either reproduce the inverses, or to
## simply calculate them if the inverse is defined as NULL
## makeCacheMatrix() will create a matrix which will cache
## the inverse of it's values. It will define functions
## to set and get the matrix and the inverse which can
## be used by the cacheSolve() function
makeCacheMatrix <- function(x = matrix()) {
inv<- NULL
set<- function(y) {
x <<- y
inv <<- NULL
}
get<- function() x
setinv<- function(inverse) inv <<- inverse
getinv<- function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## cacheSolve() will either retrieve the inverse cached by
## makeCacheMatrix utilising lexical scoping, or it will
## calculate the inverse and return it
cacheSolve <- function(x, ...) {
inv<- x$getinv()
if (!is.null(inv)){
message("getting cached data")
return(inv)
}
mat.data<- x$get()
inv<- solve(mat.data, ...)
x$setinv(inv)
return(inv)
}
|
a3a7bac78b73f95113fd32c37c3c8ae4fce91b5b
|
e9e0be3a532b12ed9a36e4f0d9254deaa209b38e
|
/inst/manuscript/MALAT1/Code/malat1_DataPreprocessing.R
|
89cda8c9f66cbe22220e6e4f8c4db45bd31150d4
|
[] |
no_license
|
Leonrunning/scTenifoldKnk
|
57da17d9e1a97d83bef406b0dce3dbb323342f27
|
09f4ebd2c5dffbd57a878c51f279334b9d83ff85
|
refs/heads/master
| 2023-06-30T14:02:26.167430
| 2021-07-30T18:32:22
| 2021-07-30T18:32:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,443
|
r
|
malat1_DataPreprocessing.R
|
library(Matrix)
library(Seurat)
library(scTenifoldKnk)
source('https://raw.githubusercontent.com/dosorio/utilities/master/singleCell/scQC.R')
MALAT1 <- Read10X_h5('WT.h5')
MALAT1 <- scQC(MALAT1, mtThreshold = 0.05)
MALAT1 <- CreateSeuratObject(MALAT1)
MALAT1 <- NormalizeData(MALAT1)
MALAT1 <- FindVariableFeatures(MALAT1)
MALAT1 <- ScaleData(MALAT1)
MALAT1 <- RunPCA(MALAT1, verbose = FALSE)
MALAT1 <- RunUMAP(MALAT1, dims = 1:20)
MALAT1 <- FindNeighbors(MALAT1, reduction = 'umap', dims = 1:2)
MALAT1 <- FindClusters(MALAT1, resolution = 0.05)
WT <- subset(MALAT1, idents = 0)
WT <- WT@assays$RNA@counts
WT <- WT[rowMeans(WT != 0) > 0.1,]
MALAT1 <- Read10X_h5('KO.h5')
MALAT1 <- scQC(MALAT1, mtThreshold = 0.05)
MALAT1 <- CreateSeuratObject(MALAT1)
MALAT1 <- NormalizeData(MALAT1)
MALAT1 <- FindVariableFeatures(MALAT1)
MALAT1 <- ScaleData(MALAT1)
MALAT1 <- RunPCA(MALAT1, verbose = FALSE)
MALAT1 <- RunUMAP(MALAT1, dims = 1:20)
MALAT1 <- FindNeighbors(MALAT1, reduction = 'umap', dims = 1:2)
MALAT1 <- FindClusters(MALAT1, resolution = 0.05)
KO <- subset(MALAT1, idents = 0)
KO <- KO@assays$RNA@counts
KO <- KO[rowMeans(KO != 0) > 0.1,]
writeMM(WT, 'WT.mtx')
writeLines(rownames(WT), 'genesWT.txt')
writeLines(colnames(WT), 'barcodesWT.txt')
writeMM(KO, 'KO.mtx')
writeLines(rownames(KO), 'genesKO.txt')
writeLines(colnames(KO), 'barcodesKO.txt')
# MALAT1 <- scTenifoldKnk(WT, gKO = 'Malat1')
# save(MALAT1, file = 'betaMALATko.RData')
|
4c6e5b8c9affd13ce42660f1d618056ad5293ae4
|
cbdede81db4e81dc0372920d781b3b7e3b05e3e3
|
/Kiiru_1.R
|
bbec34650838a7cb1c31e558538603025c2fcc87
|
[] |
no_license
|
kiiru60/Regression-and-hypothesis-testing-
|
8718be2c014a9e3173019a3d6b6a404362d8641c
|
0b86f4eee7e7fbc467fa2bc54fc7b54628cf6d1d
|
refs/heads/master
| 2020-07-29T14:31:01.658629
| 2019-09-20T17:11:00
| 2019-09-20T17:11:00
| 209,842,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,265
|
r
|
Kiiru_1.R
|
#________________________________________________________________________________________________#
#************************************************************************************************#
# --> The following setwd() command should be commented out until your code is ready to be
# submitted. At that time, comment out the setwd() command in the 'Load Data' section.
# Set working directory to run code on Prof. Hamilton's
#setwd(paste0(loc.Teaching,'ECON270_2019Fall/Data'))
# List of sub-directories
# /Data - folder with datasets for analysis (read-only)
# /Submissions - folder for submitted .R files (write-only)
#________________________________________________________________________________________________#
#************************************************************************************************#
#------------------------------------------------------------------------------------------------#
# Load Packages ---------------------------------------------------------------------------------#
#install.packages("tidyverse")
library(tidyverse)
#------------2222222------------------------------------------------------------------------------------#
# Load Data -------------------------------------------------------------------------------------#
setwd('C:/Users/akkiiru/Desktop/myRdirectory')
getwd()
regdata<-readRDS('Wooldridge_Wages.rds')
#examine data ------------------------------------------------------------------------------------------------#
names(regdata)
head(regdata)
tail(regdata)
summary(regdata)
View(regdata)
# Begin analysis --------------------------------------------------------------------------------#
#fit regression
fit<- lm(wage~IQ, data= regdata)
summary(fit)
# plot
plot(regdata$IQ, regdata$wage)
#summary of the regressions
sum.fit<-summary(fit)
names(sum.fit)
#finding the coefficients
sum.fit$coefficients
coefficients(sum.fit)
a1<-coefficients(fit)
#intercept and slope
coefficients(fit)
coeff.data <- data.frame(sum.fit$coefficients)
str(coeff.data)
a1<-coefficients(fit)
#finding f sstatistic
sum.fit$fstatistic
coefficients(fit)
#finding r squared
a5<-sum.fit$r.squared
#finding adjusted r squared
sum.fit$adj.r.squared
#finding the standard error
a6<-sum.fit$standarderror
#finding residual and fitted values
fitted.residuals <- fit$residuals
yhat <- fit$fitted.values
#residual plot
plot(fitted.residuals, xlab='Observation', ylab='e', main='Residual Plot', col='red')
abline(0,0, col='blue')
#Residual plot and IQ
plot(regdata$wage,fitted.residuals)
abline(0,0, col='blue')
#predicting wages using IQ
a2 <- predict(fit, data.frame(IQ = 120))
answerA<-predict(fit, data.frame(IQ = 108))
answerB<-predict(fit, data.frame(IQ = 115))
AnswerC=answerB-answerA
#finding the corelation coefficient
a4<-cor( regdata$wage,regdata$IQ)
#Finding residual and IQ regression
newfit<- lm(fitted.residuals~IQ, data= regdata)
summary(newfit)
#summary of the regressions
sum.newfit<-summary(newfit)
names(sum.newfit)
#finding the coefficients
sum.newfit$coefficients
coefficients(sum.newfit)
a7<-coefficients(newfit)
#------------------------------------------------------------------------------------------------#
# Print Results ---------------------------------------------------------------------------------#
# For quantitative responses (myvar) use the following command
# cat(paste0('a: ', '\n\n')); print(myvar); cat(paste0('', '\n\n'))
#
# For qualitative responses use the following command
# cat(paste0('a: answer to part a', '\n\n'))
#Report the intercept and slope coefficient as two variables in a single dataframe.
cat(paste0('a: The intercept and slope coefficient', '\n\n')); print(a1); cat(paste0('', '\n\n'))
#Find the predicted weekly wages for someone with an IQ of 120.
cat(paste0('b: The predicted weekly wages for someone with an IQ of 120.', '\n\n')); print(a2); cat(paste0('', '\n\n'))
#Find the expected difference in wages between two individuals who have IQs of 108 and 115, respectively
cat(paste0('c: The expected difference in wages between two individuals who have IQs of 108 and 115 ', '\n\n')); print(AnswerC); cat(paste0('', '\n\n'))
# Find the correlation coefficient between weekly wages and IQ.
cat(paste0('d: The correlation coefficient between weekly wages and IQ', '\n\n')); print(a4); cat(paste0('', '\n\n'))
#Find the R2 for this regression model.
cat(paste0('e: The R2 for this regression model.', '\n\n')); print(a5); cat(paste0('', '\n\n'))
#Find the standard error of the slope coefficient.
cat(paste0('f: The standard error of the slope coefficient. ', '\n\n')); print(a6); cat(paste0('', '\n\n'))
#g. Consider a new regression in which the residuals from above are the dependent variable and IQ is the
#independent variable. Report the intercept and slope coefficient as two variables in a single dataframe
cat(paste0('g: The intercept and slope coefficient as two variables in a single dataframe. ', '\n\n')); print(a7); cat(paste0('', '\n\n'))
# For qualitative responses use the following command
cat(paste0('a: I love Econometrics', '\n\n'))
|
546aeceee74598747a9286ca2126f1df08dbd393
|
0e290f17d1c7798abd4e3b4883827e0e83426956
|
/static_code.R
|
3e3db2bb717607a923e77d81b9cdf75770d43f87
|
[] |
no_license
|
siare1023/ST558-Project3
|
4dca4a531135a85757e0304983df895b377fdfa9
|
8c39a7ba63bf2bd2b4e81f60eb1edfb1a53e7383
|
refs/heads/main
| 2023-06-29T01:52:44.091783
| 2021-08-03T01:40:53
| 2021-08-03T01:40:53
| 389,371,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,524
|
r
|
static_code.R
|
raw_data_original <- read_csv("California_Houses.csv")
raw_data_original$Median_House_Value %>% summary()
# training and test data
training.percentage <- 0.1
set.seed(7)
training <- sample(1:nrow(raw_data_original), size = nrow(raw_data_original)*training.percentage)
test <- dplyr::setdiff(1:nrow(raw_data_original), training)
training.data <- raw_data_original[training, ]
test.data <- raw_data_original[test, ]
# pick aic predictors
# only 1st order
fit.aic1 <- step(lm(Median_House_Value ~ ., data = training.data), direction = "both")
# 1st order + interactions
fit.aic2 <- step(lm(Median_House_Value ~ .^2, data = training.data), direction = "both")
# use aic predictors (1st order terms)
set.seed(7)
fit.mlr1 <- train(fit.aic1$terms,
data = training.data,
method = "lm",
preProcess = c("center", "scale"),
trControl = trainControl(method = "cv", number = 10))
fit.mlr1$results
predict.mlr1 <- postResample(predict(fit.mlr1, newdata = test.data), obs = test.data$Median_House_Value)
summary(fit.mlr1)
# use aic predictors (1st order + interactions)
set.seed(7)
fit.mlr2 <- train(fit.aic2$terms,
data = training.data,
method = "lm",
preProcess = c("center", "scale"),
trControl = trainControl(method = "cv", number = 10))
predict.mlr2 <- postResample(predict(fit.mlr2, newdata = test.data), obs = test.data$Median_House_Value)
summary(fit.mlr2)
# regression tree model
set.seed(7)
fit.regression.trial <- train(Median_House_Value ~ ., data = training.data,
method = "rpart",
preProcess = c("center", "scale"),
trControl = trainControl(method = "cv", number = 10))
set.seed(7)
fit.regression <- train(Median_House_Value ~ ., data = training.data,
method = "rpart",
preProcess = c("center", "scale"),
trControl = trainControl(method = "cv", number = 10),
tuneGrid = data.frame(cp = seq(0.01, 0.05, by = 0.001)))
predict.regression <- postResample(predict(fit.regression, newdata = test.data), test.data$Median_House_Value)
summary(fit.regression)
min.rmse <- fit.regression$results["RMSE"] %>% min()
predict.regression["RMSE"]
treeFit <- tree::tree(Median_House_Value ~ Median_Income + Median_Age + Tot_Rooms, data = training.data)
plot(treeFit); text(treeFit)
model_param <- Median_House_Value ~ Median_Age + Tot_Rooms + Population
predictors <- paste(predictor_select, collapse = "+")
response <- paste("Median_House_Value")
formula <- as.formula(paste(response,"~",predictors))
treeFit2 <- tree(model_param,
data = training.data)
plot(treeFit2)
text(treeFit2)
# random forest model
set.seed(7)
fit.rf <- train(Median_House_Value ~ ., data = training.data,
method = "rf",
preProcess = c("center", "scale"),
trControl = trainControl(method = "cv", number = 10),
tuneGrid = data.frame(mtry = 1:(ncol(raw_data_original)-1)),
importance = TRUE)
fit.rf$results["RMSE"] %>% min()
predict.rf <- postResample(predict(fit.rf, newdata = test.data), test.data$Median_House_Value)
rf.fit <- randomForest::randomForest(model_param, data = test.data, mtry=1:3, importance = TRUE)
randomForest::varImpPlot(rf.fit)
# comparison
compare.rmse <- data.frame(predict.mlr1,
predict.mlr2,
predict.regression,
predict.rf)
colnames(compare.rmse) <- c("mlr aic1", "mlr aic2", "regression tree", "random forest")
compare.rmse
min.compare.rmse <- min(compare.rmse["RMSE", ])
min.test <- compare.rmse["RMSE", ] == min.compare.rmse
#------------------------------------------------------------------------
set.seed(7)
train <- sample(1:nrow(raw_data_original), size = nrow(raw_data_original)*(as.numeric(70)/100))
test <- dplyr::setdiff(1:nrow(raw_data_original), train)
training_data <- raw_data_original[train, ]
test_data <- raw_data_original[test, ]
train_test_data <- list("training_data"=training_data,"test_data"=test_data)
train_test_data[["test_data"]]$Median_House_Value
var_interact <- 1
model_select_mlr <- 1
predictor_select <- list("Median_Age", "Tot_Rooms")
if(var_interact == 1 & model_select_mlr == 1) {
predictors <- paste(predictor_select, collapse = "*")
} else {
predictors <- paste(predictor_select, collapse = "+")
}
response <- paste("Median_House_Value")
formula <- as.formula(paste(response,"~",predictors))
# cv
folds <- 5
trControl <- trainControl(method = "cv", number = folds)
# tuning grid
cp_min <- 0.01
cp_max <- 0.03
cp_by <- 0.001
tree_grid <- data.frame(cp = seq(cp_min, cp_max, by = cp_by))
mtry <- 9
rf_grid <- data.frame(mtry = 1:(mtry-1))
modeling_parameters <- list("formula"=formula, "trControl"=trControl, "tree_grid"=tree_grid, "rf_grid"=rf_grid)
modeling_parameters[["rf_grid"]]
if(model_select_mlr==1) {
set.seed(7)
fit_mlr_model <- train(as.formula(modeling_parameters[["formula"]]),
data = train_test_data[["training_data"]],
method = "lm",
preProcess = c("center", "scale"),
trControl = modeling_parameters[["trControl"]])
predict_mlr <- postResample(predict(fit_mlr_model, newdata = train_test_data[["test_data"]]),
obs = train_test_data[["test_data"]]$Median_House_Value)
return(predict_mlr["RMSE"])
} else {
paste0("You must select Multiple Linear Regression to see result.")
}
as.formula(modeling_parameters[["formula"]])
model_select_rf <- 1
fit_rf <- if(model_select_rf==1) {
set.seed(7)
fit_rf_model <- train(modeling_parameters[["formula"]],
data = train_test_data[["training_data"]],
method = "rf",
preProcess = c("center", "scale"),
trControl = modeling_parameters[["trControl"]],
tuneGrid = modeling_parameters[["rf_grid"]])
predict_rf <- postResample(predict(fit_rf_model, newdata = train_test_data[["test_data"]]),
obs = train_test_data[["test_data"]]$Median_House_Value)
}
fit_rf_model$xlevels
output$rmse_training_tree <- renderPrint({
fit_rf <- fit_rf()
fit_rf
})
#------------------------------------------------------------------------
varXselect <- "Median_Income"
varYselect <- "Median_House_Value" #"Population"
varZselect <- "Tot_Bedrooms_Factor"
varWselect <- "Median_Income_Factor"
ggplot(raw_data_added, aes_string(x=varXselect, y=varYselect)) +
geom_point(aes_string(color=varWselect)) #+
#facet_wrap(~as.character(varWselect))
#geom_smooth(method = lm, col = "red") +
#geom_smooth()
binWidth <- 150
ggplot(raw_data_added, aes_string(x=varXselect)) +
geom_histogram()
#stat_ecdf(geom="step")
ggplot(raw_data_added, aes_string(x=varZselect, y=varYselect)) +
geom_boxplot() +
stat_summary(fun.y = mean, geom = "line", lwd = 1, aes_string(group = varWselect, col = varWselect))
cov.stat <- raw_data_added %>% select(Median_Income, Population, Median_Age, Median_House_Value) %>% cov(method = "pearson")
corrplot::corrplot(cov.stat)
cor(raw_data_added$Median_Income, raw_data_added$Median_Age)
raw_data_added %>% select(Median_Income, Population, Median_Age, Median_House_Value) %>% cor(method = "pearson")
select_variable <- "Median_House_Value"
cut(raw_data_added$Median_House_Value, breaks = 4, dig.lab = 10) %>% table() %>% kable(caption = "Frequency Table", col.names = c("Range of Median House Value", "Count"))
raw_data_added %>% select(select_variable) %>% pull() %>% cut(breaks = 4, dig.lab = 10) %>% table() %>% kable()
kable(table(raw_data_added$Median_Income_Factor))
raw_data_original %>% select(Median_Income) %>%
pull() %>% cut(breaks = 4, dig.lab = 10) %>% table() %>%
kable("html") %>% kable_styling("striped", full_width = FALSE)
df<- raw_data_original %>% select(Median_Income) %>%
pull() %>% cut(breaks = 4, dig.lab = 10) %>%
table() %>% as.data.frame()
colnames(df) <- c("variable","count")
explore_summary_variable <- raw_data_original %>% select(Median_Income) %>% pull()
explore_summary_output <- c(summary(explore_summary_variable),
"St.Dev."=sd(explore_summary_variable)) %>% t() %>% as.data.frame(row.names = "variable")
|
b2839474a2003ef3d9d68d9e92960a2bb76cf0c5
|
39c8af74e550cfd4d2d6c9432707e951800c1cd1
|
/cachematrix.R
|
3822c0ebad23eda6041b09c0c4b1f7f740a86527
|
[] |
no_license
|
Matanatr96/ProgrammingAssignment2
|
e084856a95c77e38e11f6909a41208c1b9e215ac
|
c5bd035d3e5d21c9e03436fee6de93264b0955ba
|
refs/heads/master
| 2021-06-06T06:45:33.756800
| 2016-11-03T23:08:54
| 2016-11-03T23:08:54
| 72,792,559
| 0
| 0
| null | 2016-11-03T22:29:01
| 2016-11-03T22:29:00
| null |
UTF-8
|
R
| false
| false
| 1,002
|
r
|
cachematrix.R
|
## This set of functions calculates the inverse of a matrix and stores the value in cache
## This saves time by avoiding the need to calculate the partial inverse every time
## This function creates a matrix with the ability to get and set its value and get and set its inverse
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
setx <- function(y) {
inverse <<- NULL
x <<- y
}
getx <-function() {
x
}
setInverse <- function(inverse) {
x <<- inverse
}
getInverse <- function() {
inverse
}
list(set = setx, get = getx, setInverse = setInverse, getInverse = setInverse)
}
## This function calculates the inverse of the matrix passed in unless its already in cache
cacheSolve <- function(x, ...) {
inverse <- x$getInverse()
if(!is.null(inverse)) {
return(inverse)
}
temp <- x$get()
inverse <- solve(temp, ...)
x$setInverse(inverse)
inverse
}
|
5a29fcd2c9c0d5b87bdd06d3c21996cbbb8a3292
|
b2fceb19567b364f6ba7b16f318f396075a0d874
|
/cachematrix.R
|
3ccd01be178cfbf38680b2dc09b4e61e8f4e98c1
|
[] |
no_license
|
juraseg/ProgrammingAssignment2
|
93dc0b5be95e50609cf868c2c677766f7004930d
|
7381b744b4bae8357284827d0324f639719cc041
|
refs/heads/master
| 2021-01-17T11:24:59.051294
| 2014-05-25T11:15:04
| 2014-05-25T11:15:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,058
|
r
|
cachematrix.R
|
## These functions perform caching of results of matrix inverse operation
## The function creates special "matrix" object which caches it's inverse
makeCacheMatrix <- function(x = matrix()) {
# initialize inverse as NULL
inverse <- NULL
set <- function(y) {
x <<- y
# set inverse to NULL when changing value of x
inverse <<- NULL
}
get <- function() {
x
}
setinverse <- function(inv) {
inverse <<- inv
}
getinverse <- function() {
inverse
}
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## The function returns the inverse of given matrix,
## if the inverse was already calculated before - it returns it from cache,
## otherwise calculates it, saves to cache, and returns the result
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if (!is.null(inverse)) {
message("getting from cache")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$setinverse(inverse)
inverse
}
|
596303c344856a31fc54d490ad322dca29a6be28
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.machine.learning/man/sagemaker_stop_notebook_instance.Rd
|
20cc4e7534ba4ab54e78d5db71dea4a21e56a629
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,273
|
rd
|
sagemaker_stop_notebook_instance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_stop_notebook_instance}
\alias{sagemaker_stop_notebook_instance}
\title{Terminates the ML compute instance}
\usage{
sagemaker_stop_notebook_instance(NotebookInstanceName)
}
\arguments{
\item{NotebookInstanceName}{[required] The name of the notebook instance to terminate.}
}
\value{
An empty list.
}
\description{
Terminates the ML compute instance. Before terminating the instance,
Amazon SageMaker disconnects the ML storage volume from it. Amazon
SageMaker preserves the ML storage volume. Amazon SageMaker stops
charging you for the ML compute instance when you call
\code{\link[=sagemaker_stop_notebook_instance]{stop_notebook_instance}}.
To access data on the ML storage volume for a notebook instance that has
been terminated, call the
\code{\link[=sagemaker_start_notebook_instance]{start_notebook_instance}} API.
\code{\link[=sagemaker_start_notebook_instance]{start_notebook_instance}} launches
another ML compute instance, configures it, and attaches the preserved
ML storage volume so you can continue your work.
}
\section{Request syntax}{
\preformatted{svc$stop_notebook_instance(
NotebookInstanceName = "string"
)
}
}
\keyword{internal}
|
b217d738749bfb488a936ec1eedff97c532b4636
|
bdb8c969fedf227b6bb4f2ea5f0aaf0c3b3a4fa0
|
/03_genome_genes/10_codeml_output_processing.r
|
c23bf69b5e2e708dcd7619dfe2ec7073bd48cc28
|
[
"MIT"
] |
permissive
|
schnappi-wkl/certhia_genomes1
|
9416ed445cfb5e811f03ca654533cf565fdc95c7
|
95cce3cf7375203fe8b9970e2b0b19f70bb18559
|
refs/heads/master
| 2023-03-18T22:53:12.301976
| 2021-03-08T15:42:57
| 2021-03-08T15:42:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,105
|
r
|
10_codeml_output_processing.r
|
output_name <- "codeml_results_pvals_uncorrected.txt"
output_name2 <- "codeml_results_pvals_corrected.txt"
write(c("gene_number", "certhia_un_p", "ficedula_un_p", "parus_un_p", "taeniopygia_un_p"), file=output_name, ncolumns=5, sep="\t")
x <- list.files(pattern="*fasta")
x2 <- list.files(pattern="*txt")
x_numbers <- as.numeric(sapply(strsplit(x, "_"), "[[", 1))
require(stats)
require(Biostrings)
require(seqinr)
require(rphast)
for(a in 1:max(x_numbers)) {
x_match <- match(paste(a, "_aligned_trimmed.fasta", sep=""), x) # see if fasta file exists
if(!is.na(x_match)) { # if yes read it
a_rep <- readDNAStringSet(paste(a, "_aligned_trimmed.fasta", sep=""))
if(a_rep@ranges@width[1] >= 150) { # only use those that are at least 50 AAs (150 nucleotides)
x_match <- match(paste(a, "_total_output.txt", sep=""), x2)
if(!is.na(x_match)) { # read in codeml output
a_results <- read.table(paste(a, "_total_output.txt", sep=""), fill = T, stringsAsFactors=F)
a_null_lnl <- a_results[1,5] # get the null model lnL
a_alt_lnl <- a_results[c(3,5,7,9), 5] # get the alt models' lnL
a_LRT <- 2 * (a_alt_lnl - a_null_lnl) # calculate the LRT
a_uncorrected_p <- pchisq(a_LRT, df=1, lower.tail=FALSE) # get p-values for the LRT values (chi-square two tail)
a_output <- c(a, a_uncorrected_p)
write(a_output, file=output_name, ncolumns=5, append=T, sep="\t")
}
}
}
}
# read in previous output with p-values
output <- read.table(output_name, sep="\t", stringsAsFactors=F, header=T)
# calculate number of tests = number of genes * four tests
number_comparisons <- nrow(output) * 4
# multiple testing correction of the p-values using Benjamini & Hochberg (1995) (fdr)
output[,2] <- p.adjust(output[,2], method="fdr", n=number_comparisons)
output[,3] <- p.adjust(output[,3], method="fdr", n=number_comparisons)
output[,4] <- p.adjust(output[,4], method="fdr", n=number_comparisons)
output[,5] <- p.adjust(output[,5], method="fdr", n=number_comparisons)
# find minimum p-value for each gene and append that column to the output
min_p <- apply(output[,2:5], 1, min)
output <- cbind(output, min_p)
plot(min_p, pch=19, cex=0.1)
write.table(output, file=output_name2, sep="\t", quote=F, col.names=T, row.names=F)
# make the 4 fold degenerate sites output directory
dir.create("_4d_output")
# remove all significant tests and any rows missing info
filtered_output <- na.omit(output)
filtered_output <- filtered_output[filtered_output$min_p > 0.05, ]
# loop to
# read in multiple sequence alignments that are not under selection so as to get the four-fold degenerate sites
# for a later phylogeny
for(a in 1:nrow(filtered_output)) {
a_rep <- read.msa(paste(filtered_output[a,1], "_aligned_trimmed.fasta", sep=""))
a_feat <- feat(seqname="certhia", feature="CDS", start=1, end=ncol(a_rep))
a_4d_rep <- get4d.msa(a_rep, a_feat)
write.msa(a_4d_rep, file=paste("_4d_output/", filtered_output[a,1], "_4d.fasta", sep=""), format="FASTA")
}
# list all the 4d alignments output
x_files <- list.files("_4d_output", full.names=T)
# loop to read in all alignments and concatenate
certhia <- list()
ficedula <- list()
parus <- list()
taeniopygia <- list()
for(a in 1:length(x_files)) {
a_rep <- readDNAStringSet(x_files[a])
certhia[[a]] <- as.character(a_rep)[1]
ficedula[[a]] <- as.character(a_rep)[2]
parus[[a]] <- as.character(a_rep)[3]
taeniopygia[[a]] <- as.character(a_rep)[4]
}
certhia <- paste(unlist(certhia), collapse="")
ficedula <- paste(unlist(ficedula), collapse="")
parus <- paste(unlist(parus), collapse="")
taeniopygia <- paste(unlist(taeniopygia), collapse="")
output_name <- "_total_4d_sites.fasta"
write(">certhia", file=output_name, ncolumns=1)
write(certhia, file=output_name, ncolumns=1, append=T)
write(">ficedula", file=output_name, ncolumns=1, append=T)
write(ficedula, file=output_name, ncolumns=1, append=T)
write(">parus", file=output_name, ncolumns=1, append=T)
write(parus, file=output_name, ncolumns=1, append=T)
write(">taeniopygia", file=output_name, ncolumns=1, append=T)
write(taeniopygia, file=output_name, ncolumns=1, append=T)
|
7989ddb361a20e053af4751ddb4310ba3d060cf5
|
12e0ddae06438b748d12a7f9c26e67cf682a8c16
|
/models/loadData.R
|
68790ed13b7a1ea83afdb54837aa04f139991084
|
[
"MIT"
] |
permissive
|
christianadriano/ML_SelfHealingUtility
|
b05b2462c95a9aed9ac86af9e5eeb65bb07713d0
|
398ef99a7073c6383862fade85b8816e65a2fb1e
|
refs/heads/master
| 2021-10-07T20:45:51.281121
| 2018-12-05T09:16:05
| 2018-12-05T09:16:05
| 105,566,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,918
|
r
|
loadData.R
|
#---------------------------------------------------------------
#Load all data into a dataframe
loadData<- function(fileName){
setwd("C://Users//Chris//Documents//GitHub//ML_SelfHealingUtility//");
data_all <- read.csv(fileName,header = TRUE,sep=",");
dataf <- data.frame(data_all);
#Remove NA's
dataf <- dataf[complete.cases(dataf),]
#summary(dataf)
dataf <- renameAuthenticationServices(dataf)
#dataf <- dataf[dataf$AFFECTED_COMPONENT=="Authentication Service",];
#Remove negative values
dataf <- dataf[dataf$UTILITY_INCREASE>0,]
return(dataf);
}
# Replace component names -------------------------------------------------
#Authentication components have different names, but are still of the same type
#Twitter Authentication Service
#Facebook Authentication Service
#Google Authentication Service
renameAuthenticationServices <- function (df){
flag<- df$AFFECTED_COMPONENT=="Twitter Authentication Service"
df$AFFECTED_COMPONENT <- replace(df$AFFECTED_COMPONENT,flag,"Authentication Service")
flag<- df$AFFECTED_COMPONENT=="Facebook Authentication Service"
df$AFFECTED_COMPONENT <- replace(df$AFFECTED_COMPONENT,flag,"Authentication Service")
flag<- df$AFFECTED_COMPONENT=="Google Authentication Service"
df$AFFECTED_COMPONENT <- replace(df$AFFECTED_COMPONENT,flag,"Authentication Service")
return(df);
}
# select_Linear <- function(dataf){
# # Select feature columns --------------------------------------------------
# features.df<- data.frame(dataf$CRITICALITY,dataf$CONNECTIVITY,
# dataf$RELIABILITY,
# dataf$UTILITY_INCREASE);
#
#
# colnames(features.df) <- c("CRITICALITY","CONNECTIVITY",
# "RELIABILITY",
# "UTILITY_INCREASE");
#
# return(features.df);
# }
select_Linear <- function(dataf){
# Select feature columns --------------------------------------------------
features.df<- data.frame(dataf$CRITICALITY,dataf$PROVIDED_INTERFACE, dataf$REQUIRED_INTERFACE,
dataf$RELIABILITY,
dataf$UTILITY_INCREASE);
colnames(features.df) <- c("CRITICALITY","PROVIDED_INTERFACE","REQUIRED_INTERFACE",
"RELIABILITY",
"UTILITY_INCREASE");
return(features.df);
}
select_Saturation <- function(dataf){
# Select feature columns --------------------------------------------------
features.df<- data.frame(dataf$CRITICALITY,dataf$PROVIDED_INTERFACE, dataf$REQUIRED_INTERFACE,
dataf$RELIABILITY,
dataf$PMax,dataf$alpha,dataf$REPLICA,dataf$REQUEST,
dataf$UTILITY_INCREASE);
colnames(features.df) <- c("CRITICALITY","PROVIDED_INTERFACE","REQUIRED_INTERFACE",
"RELIABILITY",
"PMax","alpha","REPLICA","REQUEST",
"UTILITY_INCREASE");
return(features.df);
}
select_Discontinuous <- function(dataf){
# Select feature columns --------------------------------------------------
features.df<- data.frame(dataf$CRITICALITY,dataf$RELIABILITY,dataf$IMPORTANCE,
dataf$PROVIDED_INTERFACE, dataf$REQUIRED_INTERFACE,
dataf$ADT,dataf$UTILITY_INCREASE);
colnames(features.df) <- c("CRITICALITY","RELIABILITY","IMPORTANCE",
"PROVIDED_INTERFACE","REQUIRED_INTERFACE",
"ADT","UTILITY_INCREASE");
return(features.df);
}
select_Combined <- function(dataf){
# Select feature columns --------------------------------------------------
features.df<- data.frame(dataf$CRITICALITY,dataf$RELIABILITY, dataf$IMPORTANCE,
dataf$PROVIDED_INTERFACE, dataf$REQUIRED_INTERFACE,
dataf$REPLICA,dataf$REQUEST,dataf$ADT,
dataf$PMax,
dataf$UTILITY_INCREASE);
# dataf$alpha
colnames(features.df) <- c("CRITICALITY","RELIABILITY", "IMPORTANCE",
"PROVIDED_INTERFACE", "REQUIRED_INTERFACE",
"REPLICA" ,"REQUEST","ADT",
"PMax",
"UTILITY_INCREASE");
# "alpha",
return(features.df);
}
#-------------------------------------------------------------
#Scramble the dataset before extracting the training set.
scrambleData<-function(datadf){
set.seed(8850);
g<- runif((nrow(datadf))); #generates a random distribution
return(datadf[order(g),]);
}
#--------------------------------------------------------------
#Extract the unique items from a column and return them sorted
listUniqueItems<- function(column,columnName){
#obtain a list of unique items
uniqueItems <- data.frame(unique(column));
colnames(uniqueItems) <- c(columnName);
#Sort items in ascending order
uniqueItems <- uniqueItems[with(uniqueItems,order(columnName)),];
return(uniqueItems);
}
# Centralize data ---------------------------------------------------------
#Centralize features (divide them by their mean)
centralize<- function(featureColumn){
featureColumn <- featureColumn/mean(featureData);
return(featureColumn);
}
# RMSE --------------------------------------------------------------------
# Root mean square error
# https://en.wikipedia.org/wiki/Root-mean-square_deviation
rmse <- function(error){
sqrt(mean(error^2))
}
# MAPD --------------------------------------------------------------------
# Mean Absolute Percent Deviation MADP
# https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
madp <- function(prediction, actual){
error <- abs(actual-prediction);
return(100* (sum(error/abs(actual) ))/ length(actual));
}
# R_squared ---------------------------------------------------------------
# Coefficient of determination
# https://en.wikipedia.org/wiki/Coefficient_of_determination
r_squared <- function(prediction, actual){
SS_ExplainedVariance <- sum((prediction - actual)^2);
SS_TotalVariance <- sum((actual-mean(actual))^2);
R2<- 1- SS_ExplainedVariance / SS_TotalVariance;
return (R2);
}
# Average RMSE ------------------------------------------------------------
#sampleSize that was use to compute RMSE datapoint (assuming we used the same sampleSize for all RMSE datapoints)
#https://stats.stackexchange.com/questions/99263/average-of-root-mean-square-error
averageRMSE <- function(RMSEVector, sampleSize){
RMSE_sqr <- sqrt((RMSEVector^2) * sampleSize);
RMSE_points <- length(RMSEVector);
return (sum(RMSE_sqr /(RMSE_points * sampleSize)))
}
# Save results to file ----------------------------------------------------
resultsToFile <- function(results, modelName, methodName,extension){
fileName <- paste0("results_",methodName,"_",modelName,"_",extension);
write.table(results,fileName,sep=",",col.names = TRUE, row.names=FALSE);
return (paste0("file written:",fileName));
}
# Generate the dataset names that will be trained -------------------------
generateDataSetNames <- function(modelName,datasetSizeList,s_idx){
###s_idx=0 generates for all sizes in the dataset.
###s_idx=1 generates only for the first element of datasetSizeList
if(s_idx==0 & length(datasetSizeList)>0){#Generate for all sizes
datasetName <- paste0(modelName,datasetSizeList[1]);
for(i in c(2:length(datasetSizeList))){
datasetName <- cbind(datasetName,paste0(modelName,datasetSizeList[i]));
}
}
else{
datasetName <- paste0(modelName,datasetSizeList[s_idx]);
}
return(datasetName);
}
# Prepare features --------------------------------------------------------
prepareFeatures <- function(dataf,selectionType){
#Do feature selection (or not)
if(selectionType=="Combined")
features.df<- select_Combined(dataf)
else
if(selectionType=="Linear")
features.df<- select_Linear(dataf)
else
if(selectionType=="Discontinuous")
features.df<- select_Discontinuous(dataf)
else
if(selectionType=="Saturating")
features.df<- select_Saturation(dataf)
#Remove zero utilities
features.df <- features.df[features.df$UTILITY_INCREASE!=0,];
# Scramble data
features.df <- scrambleData(datadf=features.df);
return (features.df);
}
# Generate PMML file ------------------------------------------------------
generatePMML <- function(trained.model, training.df, pmmlFileName, numberOfTrees){
#browser();
last.column.explanatory <- dim(training.df)[2] - 1; #last column is the target variable
# Generate feature map
feature.map = r2pmml::genFMap(training.df[1:last.column.explanatory])
r2pmml::writeFMap(feature.map, "feature.map")
# Save the model in XGBoost proprietary binary format
#xgb.save(model, "xgboost.model")
# Dump the model in text format
# xgb.dump(model, "xgboost.model.txt", fmap = "feature.map");
#for gbm
r2pmml(trained.model, pmmlFileName);#, fmap = feature.map, response_name = "UTILITY_INCREASE",
#missing = NULL, compact = TRUE)
#for xgboost
#r2pmml(trained.model, pmmlFileName, fmap = feature.map, response_name = "UTILITY_INCREASE",
# missing = NULL, ntreelimit = numberOfTrees, compact = TRUE)
}
# Convert time to Data Frame ----------------------------------------------
convertTimeToDataFrame <- function(time){
time.df <- data.frame(matrix(data=NA,nrow=1,ncol=3));
colnames(time.df) <- c("user.time","sys.time","elapsed.time");
df <- data.frame(unlist(lapply(time, '[[', 1)));
time.df$user.time <- df[1,1];
time.df$sys.time <- df[2,1];
time.df$elapsed.time <-df[3,1];
return (time.df);
}
|
ecf5d5bd52e9cca76667660e77ace41aded01f27
|
208aa0cbd5c25dc27f769627a53e81f980a5e817
|
/deep_learning/rstudio/install_packages.R
|
3abd335ce3d2e5fd37c0d0d946d2fa1f4785e4f2
|
[] |
no_license
|
GeertvanGeest/scs-docker
|
50d2380a8668be072df7d00942863db27722021b
|
f82bcad3e3c78bd33ebfe7811d06414330fb6036
|
refs/heads/master
| 2023-07-15T22:27:32.765202
| 2021-09-03T07:26:39
| 2021-09-03T07:26:39
| 399,025,052
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 407
|
r
|
install_packages.R
|
install.packages(c( "tensorflow", "keras", "BiocManager", "Matrix", "Rtsne", "rsvd",
"RColorBrewer", "umap", "reshape2"))
# Bioconductor packages:
BiocManager::install(c( "SingleCellExperiment", "scater", "cowplot", "scran",
"batchelor", "ComplexHeatmap", "tximeta",
"AnnotationDbi"))
devtools::install_github('fmicompbio/swissknife')
|
8543b0d3e36e188a7122b8c26ad6ec71b3c83d6b
|
712c71892a6edd61227e2c0c58bbc1e9b43893e4
|
/R/git_info.R
|
9a8dd3b3b92d2df64dac40294e5ebc36f7ec0bc6
|
[] |
no_license
|
gelfondjal/adapr
|
130a6f665d85cdfae7730196ee57ba0a3aab9c22
|
b85114afea2ba5b70201eef955e33ca9ac2f9258
|
refs/heads/master
| 2021-01-24T10:20:14.982698
| 2020-01-28T22:56:18
| 2020-01-28T22:56:18
| 50,005,270
| 33
| 3
| null | 2018-10-18T16:09:57
| 2016-01-20T04:48:49
|
R
|
UTF-8
|
R
| false
| false
| 928
|
r
|
git_info.R
|
#' Retrieves the information from git about a file
#' @param gitdir string with git directory
#' @param filename string of file to query
#' @param branch git branch
#' @param git_args string argument for git
#' @param git_binary location of git executable
#' @return git log for filename
#' @export
#' @examples
#'\dontrun{
#' si <- pullSourceInfo("adaprHome")
#' file0 <- file.path(si$project.path,project.directory.tree$analysis,"read_data.R")
#' gitInfo(si$project.path,file0)
#'}
#'
#'
gitInfo <- function(gitdir,filename,branch = NULL, git_args = character(), git_binary = NULL){
# extract the git information related to a filename in the git repository in gitdir
git_binary_path <- git_path(git_binary)
args <- c('log', shQuote(filename), git_args)
temp <- getwd()
setwd(gitdir)
git.out <- system2(git_binary_path, args, stdout = TRUE, stderr = TRUE)
#print(temp)
return(git.out)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.