blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7bedc4026c1a1dba65f3a4eda095677c54294e2f | e2f83d83780a64591fef34b2b48208166c19e040 | /05.gwas_power_marker_number.R | f2d13bd01fa096090f9e11474c2cb399be8f1223 | [] | no_license | quanrd/tomatoWgsGwasGs | 77ed273edb4840ef81d153c9d880d42bfbb496c3 | e5301f71512eff697085b3b4dc43856a1c9086bb | refs/heads/master | 2020-12-04T09:41:39.535203 | 2019-06-28T08:45:22 | 2019-06-28T08:45:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,652 | r | 05.gwas_power_marker_number.R | ### number of markers and GWAS power : Figure 3A ###
## libraries ##
library(rrBLUP)
library(pROC)
library(ggplot2)
source("inhouse.functions.R")
## data ##
load("data/geno.train.rda")
load("data/K.train.rda")
genetic.map = read.csv("data/genetic.map.csv")
## execution ##
geno.train = geno.train[geno.train$CHROM != "SL3.0ch00",]
geno.train = loessMapPos(genetic.map, geno.train)
G = t(geno.train[,-(1:14)])
info = geno.train[,1:14]
cond.qtl = c(10, 25)
cond.h2 = c(0.3, 0.6)
n.rep = 25
Ncore = 10
n.p = c(5000, 50000, 100000, 500000)
set.seed(1)
for (n.qtl in cond.qtl) {
for (h2 in cond.h2) {
for (i in 1:n.rep) {
filename = paste("simQTLnmrks",n.qtl,h2*10,i,".rda",sep="_")
simqtl = sim.QTL(G, info, K.train, h2, n.qtl)
save(simqtl, file=filename)
}
}
}
Nmrks_list = vector(mode="list", length=4)
ct = 1
set.seed(1)
for (n.qtl in cond.qtl) {
for (h2 in cond.h2) {
AUC = c()
Nmrks = c()
Sim = c()
names(Nmrks_list)[[ct]] = paste("Nqtl.",n.qtl,"_h2.",h2,sep="")
for (i in 1:n.rep) {
filename = paste("simQTLnmrks",n.qtl,h2*10,i,".rda",sep="_")
load(filename)
phenoIN = data.frame(gid=rownames(simqtl$val), y=simqtl$val$y)
geno.base = geno.train[!is.element(geno.train$MARKER, simqtl$qtl$MARKER),]
for (p in n.p) {
gi = geno.base[sort(sample(1:nrow(geno.base), p)),]
Ki = A.mat(t(gi[,-(1:14)]), n.core=Ncore)
res = GWAS(phenoIN, gi[,-(4:14)], fixed=NULL, K=Ki, min.MAF=0.05, n.core=Ncore, P3D=TRUE, plot=FALSE)
ans = reconstruct.res(res, simqtl$qtl)
logP = res$y
Causative = res$MARKER %in% ans[,1]
not.detected = n.qtl - length(Causative[Causative])
Causative = c(Causative, rep(TRUE, not.detected))
logP = c(logP, rep(0, not.detected))
val = roc(Causative, logP, algorithm=2, quiet=FALSE, levels=c("FALSE", "TRUE"), direction="<")$auc
AUC = c(AUC, as.numeric(val))
Nmrks = c(Nmrks, p)
Sim = c(Sim, i)
}
}
M = data.frame(AUC=AUC, N.markers=Nmrks, Sim=Sim)
M$Sim = as.factor(M$Sim)
M$N.markers = as.numeric(as.factor(M$N.markers))
Nmrks_list[[ct]] = M
ct = ct + 1
}
}
M = c()
for (i in 1:length(Nmrks_list)) {
cond.names = names(Nmrks_list)[i]
Cond = rep(cond.names, 100)
M = rbind(M, cbind(Cond, Nmrks_list[[i]]))
}
M$N.markers = as.numeric(as.factor(M$N.markers))
M$Sim = as.character(as.numeric(M$Sim))
xx = c(5,50,100,500)
for (i in 1:4) M$N.markers[M$N.markers==i] = xx[i]
M$N.markers = as.factor(M$N.markers)
g = ggplot(M, aes(x=N.markers, y=AUC, color=N.markers)) +
geom_boxplot() +
facet_grid( ~ Cond) +
theme(legend.position='none')
ggsave(file = "outputs/Fig3A.png", plot = g, dpi = 400, width = 7, height = 2.5)
|
b42b96d52041b4a624fa9a52559473deb8775629 | a986267478f44c19688c5c8f4fe42c05b65844c1 | /Gradient matching/Results without measurement errors/PDE_GradientMatching_NoMeasurementError.r | a30ffb8d71b230296a72fb8c82abd494725ada4e | [
"CC-BY-4.0"
] | permissive | ycx12341/Data-Code-Figures-RSOS-rev | 66ad1868f4a5947f17ed252940f392c18cc440f8 | 6ecd8ee6c9c67fade3e64043685482a22e9b9e08 | refs/heads/main | 2023-04-13T22:36:01.896703 | 2022-08-09T14:21:45 | 2022-08-09T14:21:45 | 348,829,671 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,445 | r | PDE_GradientMatching_NoMeasurementError.r | ### Gradient matching scheme ########
### Author: Yunchen Xiao & Len Thomas ###########
### Single run on dataset with no measurement error ###
#Environment settings
library(readr)
#Source companion functions
source("PDE_GradientMatching_Functions.r")
### Setup ####
#Define simulation parameters
# Define model parameters
dn <- 0.01
gamma <- 0.05
eta <- 10
dm <- 0.01
alpha <- 0.1
rn <- 5
# This parameter not included in the optimization
beta <- 0
# Make a vector to store the true values
true.values <- c(dn, gamma, rn, eta, dm, alpha)
names(true.values) <- c("dn", "gamma", "rn", "eta", "dm", "alpha")
#No parameters fixed here, so set fixed par to 6 NAs
fixed.par <- rep(NA, 6)
is.estimated <- is.na(fixed.par)
n.estimated <- sum(is.estimated)
#For optimization, use start values from manuscript
start.values <- c(0.01, 0.133, 6.25, 12.5, 0.0166, 0.125)
#Trim to only those for which parameters are being estimated
start.values <- start.values[is.estimated]
# Define 1D dimensionless space points
n.x11 <- 80
max.x11 <- 1
x11 <- seq(0, max.x11, length = n.x11)
# Define time discretization and max time
dt <- 0.001
max.t <- 10
# Set initial conditions
eps <- 0.01
n0 <- rep(0, n.x11)
for (i in 1:n.x11) {
if (x11[i] <= 0.25) {
n0[i] <- exp(-x11[i] ^ 2 / eps)
} else {
n0[i] <- 0
}
}
f0 <- 1-0.5*n0
m0 <- 0.5*n0
#Generate reference dataset
ref.data.trun <- generate.reference.data(n.x11, max.x11, dt, max.t,
dn, gamma, eta, dm, alpha, rn, beta, n0, f0, m0, truncate = TRUE)
### Gradient matching estimation ####
#Obtain gradient approximations
dist <- "gamma"
grads <- approximate.gradients(ref.data.trun, x11, max.t, distribution = dist)
#write_rds(grads, "Reference gradients GAM.rds")
grads <- read_rds("Reference gradients GAM.rds")
#Write gradients predicted by GAM into a .txt file
#write.table(grads, "Reference gradients GAM.txt")
#Estimate parameter values
res <- optim(start.values, calculate.sse, grads = grads, fixed.par = fixed.par,
control = list(trace = 1, maxit = 20000, reltol = 1e-10))
par.ests <- res$par
print(par.ests)
# dn = 0.009957235 gamma = 0.045667239 rn = 4.595227960
# eta = 10.306459015 dm = 0.009527586 alpha = 0.099251049
#Calculate percent error
perc.err <- (par.ests - true.values) / true.values * 100
print(perc.err)
# dn gamma rn eta dm alpha
#-0.4276505 -8.6655220 -8.0954408 3.0645902 -4.7241351 -0.7489510 |
94c765cd6ab0a52831272d6a3d92a8d7c5608cdb | 557b62106c0c5393ffd3904c47e7c29f38e4e845 | /man/theme_myriad_semi.Rd | eea5432048d1585298117ba0cd9efc4e551b1020 | [] | no_license | kjhealy/myriad | fffe308f3806c9889871fc15a1a14b8c1b9f57bf | 4816b3d8e554bc1a0d10c4b019e3733eec2a13a2 | refs/heads/main | 2023-04-15T09:55:38.618791 | 2023-04-06T16:21:26 | 2023-04-06T16:21:26 | 81,757,191 | 14 | 5 | null | null | null | null | UTF-8 | R | false | true | 738 | rd | theme_myriad_semi.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myriad.r
\name{theme_myriad_semi}
\alias{theme_myriad_semi}
\title{theme_myriad_semi}
\usage{
theme_myriad_semi(
base_size = 12,
base_family = "Myriad Pro SemiCondensed",
title_family = "Myriad Pro SemiCondensed",
base_line_size = base_size/24,
base_rect_size = base_size/24
)
}
\arguments{
\item{base_family, title_family, base_size, base_line_size, base_rect_size}{base font family and size}
}
\description{
A [ggplot2] theme using semibold variants of Adobe Myriad Pro
}
\details{
You should [import_myriad_semi]() first and also install the fonts on your
system before trying to use this theme.
}
\examples{
\dontrun{
}
}
\author{
Kieran Healy
}
|
0a79081cdafe0206968570f1d14adadd60cdf526 | bde168fac75e9c17cd62e05fa8751b71328704f1 | /man/read62_01_run_control_params.Rd | bd35e0ab3114939d38d85d0ff4b0ec44ce504c19 | [
"MIT"
] | permissive | yosukefk/PuffR | bcafed842cbb25d4c890d7cf0706e85b46294f66 | b5d2040eaff1de017152c5e405c2d4f2b17f9251 | refs/heads/master | 2020-07-23T14:34:56.494444 | 2019-09-10T17:22:05 | 2019-09-10T17:22:05 | 207,593,685 | 0 | 0 | MIT | 2019-09-10T15:19:01 | 2019-09-10T15:19:01 | null | UTF-8 | R | false | false | 3,715 | rd | read62_01_run_control_params.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/read62_01_run_control_params.R
\name{read62_01_run_control_params}
\alias{read62_01_run_control_params}
\title{Set the READ62 run control parameters}
\usage{
read62_01_run_control_params(read62_inp = "read62_template.txt",
read_data_from_surf_dat = TRUE, ibyr = NULL, ibmo = NULL, ibdy = NULL,
ibhr = NULL, ibsec = 0, ieyr = NULL, iemo = NULL, iedy = NULL,
iehr = NULL, iesec = 0, jdat = 2, isub = 2, ifmt = 2, pstop = 500,
lht = FALSE, ltemp = FALSE, lwd = FALSE, lws = FALSE, lxtop = TRUE,
pvtop = 850, lxsfc = TRUE, zvsfc = 200)
}
\arguments{
\item{read62_inp}{the absolute path and filename for the working CALMET input file.}
\item{read_data_from_surf_dat}{an option to read the time variable data from an extant SURF.DAT file in the working folder.}
\item{ibyr}{the starting year for the CALMET run.}
\item{ibmo}{the starting month for the CALMET run.}
\item{ibdy}{the starting day for the CALMET run.}
\item{ibhr}{the starting hour for the CALMET run.}
\item{ibsec}{the starting second for the CALMET run.}
\item{ieyr}{the ending year for the CALMET run.}
\item{iemo}{the ending month for the CALMET run.}
\item{iedy}{the ending day for the CALMET run.}
\item{iehr}{the ending hour for the CALMET run.}
\item{iesec}{the ending second for the CALMET run.}
\item{jdat}{the type of NCDC input sounding data file; where '1' is the TD-6201 format and '2' is the NCDC FSL format.}
\item{isub}{the format of substitute UP.DAT input sounding data file; where '0' indicates that no substitute will be used, '1' states that the delimiter between sounding levels is a forward slash (and WS and WD have integer representations), and '2' states that the delimiter between sounding levels is a comma (and WS and WD have floating point representations).}
\item{ifmt}{the format of the main UP.DAT input sounding data file; where '1' states that the delimiter between sounding levels is a forward slash (and WS and WD have integer representations), and '2' states that the delimiter between sounding levels is a comma (and WS and WD have floating point representations).}
\item{pstop}{the top pressure level (in mb units) for which data are extracted. The pressure level must correspond to a height that equals or exceeds the top of the CALMET modeling domain, or else CALMET will stop with an error message.}
\item{lht}{a missing data control option for height that is used determine when a sounding level is rejected. If the height is missing from a level, that level will be rejected.}
\item{ltemp}{a missing data control option for temperature that is used determine when a sounding level is rejected. If the temperature is missing from a level, that level will be rejected.}
\item{lwd}{a missing data control option for wind direction that is used determine when a sounding level is rejected. If the wind direction is missing from a level, that level will be rejected.}
\item{lws}{a missing data control option for wind speed that is used determine when a sounding level is rejected. If the wind speed is missing from a level, that level will be rejected.}
\item{lxtop}{choice of whether to extrapolate to extend missing profile data to PSTOP pressure level.}
\item{pvtop}{if 'lxtop' is TRUE, then pvtop is the pressure level corresponding to where valid data must exist.}
\item{lxsfc}{choice of whether to extrapolate to extend missing profile data to the surface.}
\item{zvsfc}{if 'lxsfc' is TRUE, then zvsfc is the height (in meters) corresponding to where valid data must exist.}
}
\description{
This function validates and writes READ62 run control parameters to the working READ62.INP file.
}
|
cc5ab0d4179b1a4bfe32ce0ccc2f69f3b8eddc6c | b826cbebcb87f76ef7fda08b568e7fabd0fa69d6 | /3-getting-cleaning-data/week-2-getting-data.R | a61324a7ebed183d8b4ba3b1291c5a294b96abd4 | [] | no_license | M0eB/data-science-coursera | f7dc7406474cb6ba5346ace51e0b6eb0b6a55e47 | 2a1bdb3cde5ff9e2efb70c012e73ff51952b1563 | refs/heads/master | 2016-09-11T02:56:35.705301 | 2014-09-29T02:51:02 | 2014-09-29T02:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,099 | r | week-2-getting-data.R | # =============================================================================
# By : Mohamed T. Ismail
# Course : Getting and Cleaning Data (Johns Hopkins University)
# Provider : Coursera.com
# Description : Week 2 notes and examples (from slides)
# =============================================================================
get_mysql_data <- function()
{
# Get a connection handle
ucscDb <- dbConnect( MySQL(),
user="genome",
host="genome-mysql.cse.ucsc.edu" )
## Run a mysql command on the db
## Gets the list of all available databases
result <- dbGetQuery( ucsc, "show databases;" );
## Always make sure you disconnect (returns TRUE)
dbDisconnect( ucscDb );
## Contains the reuslt of all
print( result )
## -------------------------------------------------------
## The server used above has many databases
## You can connect to a specific database :
## Access a specific database within the mysql server
hg19 <- dbConnect( MySQL(),
user="genome",
db="hg19",
host="genome-mysql.cse.ucsc.edu")
## See what tables are availables in that database
allTables <- dbListTables( hg19 )
## Large databse with over 10949 tables...
length( allTables )
allTables[1:5]
## Get fields (columns) of a specific table
dbListFields( hg19, "affyU133Plus2" )
## Get number of rows in the dataset using query
dbGetQuery( hg19, "select count(*) from affyU133PLus2" )
## REad from the table
affyData <- dbREadTable( hg19, "affyU133Plus2" )
head( affyData )
## The table may be too large to read into R
## In that case, read only a subset of the table :
## Query what you want - result not yet in your pc
query <- dbSendQuery( hg19, "select * from affyU133PLus2 where misMatches between 1 and 3" )
## Fetch the full query result
affyMis <- fetch( query )
quantile( affyMis$misMatches )
## Fetch only some of the query result
affyMisSmall <- fetch( query, n=10 )
dim( affyMisSmall )
## Clear the query from the server
dbClearResult( query )
## Remember to close connection (returns TRUE)
dbDisconnect( hg19 )
}
get_hdf5_data <- function()
{
source( "http://bioconductor.org/biocLite.R" )
biocLite( "rhdf5" )
library( rhdf5 )
created = h5createFile( "example.h5" )
created
## This will install packages from Bioconductor http:///bioconductor.org/
## primarily used for genomics but also has good "big data" packages
## Can be used to interface with hdf5 data sets
## This lecture is modeled very closely on the rhdf5 tutorial here :
## http://www.bioconductor.org/packages/release/bioc/vignettes/rhdf5/inst/doc/rhdf5.pdf
## Create groups
created = h5createGroup( "example.h5", "foo" )
created = h5createGroup( "example.h5", "baa" )
created = h5createGroup( "example.h5", "foo/foobaa" )
h5ls( "example.h5" )
## Write to groups
A = matrix( 1:10, nr=5, nc=2 )
h5write( A, "example.h5", "foo/A" )
B = array( seq( 0.1, 2.0, by=0.1 ), dim=c( 5, 2, 2) )
attr( B, "scale" ) <- "liter"
h5write( B, "example.h5", "foo/foobaa/B" )
h5ls( "example.h5" )
## Write a data set
df = data.frame( 1L:5L, seq( 0, 1, length.out=5 ),
c( "ab", "cde", "fghi", "a", "s"), stringsAsFactors=FALSE )
h5write( df, "example.h5", "df" )
h5ls( "example.h5" )
## Reading Data
readA = h5read( "example.h5", "foo/A" )
readB = h5read( "example.h5", "foo/foobaa/B" )
readdf= h5read( "example.h5", "df" )
readA
## Writing and Reading Chunks
h5write( c( 12, 13, 14), "example.h5", "foo/A", index=list( 1:3, 1) )
h5read( "example.h5", "foo/A" )
}
get_web_data <- function()
{
# httr allows GET, POST, PUT, DELETE requests if you are authorized
# You can authenticate with a user name or password
# Most modern APIs use something like oauth
# httr works well with Facebook, Google, Twitter, Github, etc.
## Getting Data off Webpages
con = url( "http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en" )
htmlCode = readLines( con )
close( con )
htmlCode
## Parsing with XML
library( XML )
url <- "http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en"
html <- htmlTreeParse( url, useInternalNodes=TRUE )
xpathSApply( html, "//title", xmlValue )
xpathSApply( html, "//td[@id='col-citedby']", xmlValue )
## GET from httr Package
library( httr )
html2 = GET( url )
parseHtml = htmlParse( content2, asText=TRUE )
xpathSApply( parseHtml, "//title", xmlValue )
## Accessing Websites with Passwords
pg1 = GET( "http://httpbin.org/basic-auth/user/passwd" )
pg1
pg2 = GET("http://httpbin.org/basic-auth/user/passwd",
authenticate("user","passwd"))
pg2
names(pg2)
## Using Handles
google = handle("http://google.com")
pg1 = GET( handle=google, path="/" )
pg2 = GET( handle=google, path="search" )
}
get_twitter_api_data <- function()
{
## Accessing Twitter from R
myapp = oauth_app( "twitter", key="yourConsumerKeyHere", secret="yourConsumerSecretHere" )
sig = sign_oauth1.0( myapp, token="yourTokenHere", token_secret="yourTokenSecretHere" )
homeTL = GET( "https://api.twitter.com/1.1/statuses/home_timeline.json", sig )
## Converting the JSON Object
json1 = content(homeTL)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1, 1:4]
}
|
2bdc4ff1eb0073dce0e6df620d704e06ce8703f3 | 57fe6ae279eb902aaced664faca979571d8ce059 | /course-3_data-cleaning/week-2/week-2-quiz.R | f6cb8c950743b952e17148ad2f6df8df9c85e719 | [] | no_license | DrShashiPonraja/coursera_data-science_Johns-Hopkins | 82dc6c4da12f06beff2ca9247e8597db264e594b | 933ec5a97ed1c072b30d8a85fd713291cece7e4f | refs/heads/master | 2020-03-20T17:19:32.346040 | 2018-06-17T00:51:27 | 2018-06-17T00:51:27 | 137,556,939 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,862 | r | week-2-quiz.R | ##Register an application with the Github API here https://github.com/settings/applications.
##Access the API to get information on your instructors repositories (hint: this is the url you want "https://api.github.com/users/jtleek/repos").
##Use this data to find the time that the datasharing repo was created. What time was it created?
##
##This tutorial may be useful (https://github.com/hadley/httr/blob/master/demo/oauth2-github.r).
##You may also need to run the code in the base R package and not R studio.
library(ROAuth)
install.packages('RCurl')
library(RCurl)
library(XML)
myapp <- oauth_app('github', key="7baa729db663163abc67",secret="64febcbb8ba999ce0148893bd2a53b0b8e6a8627")
personal_access_token <- '1336db7183b5b961753323b945b5c91b5055bb22'
sig <- sign_oauth1.0(myapp, token=personal_access_token)
LeekRepoInfo <- GET('https://api.github.com/repos/jtleek/datasharing', sig)
LeekRepoInfo
library(jsonlite)
json1 <- content(LeekRepoInfo)
json2 <- jsonlite::fromJSON(toJSON(json1))
## 1336db7183b5b961753323b945b5c91b5055bb22
json2$created_at
## I don't understand why I needed to make a github api for this - it was readily available online?
##Question 2 and 3
## had to install mysql
## A temporary password is generated for root@localhost: <;-i=+cb6vrI
install.packages('sqldf')
detach("package:RMySQL", unload=TRUE)
library(sqldf)
acs<-read.csv('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv')
z<-sqldf("select pwgtp1 from acs where AGEP < 50")
nrow(z)
nrow(acs)
##Question 3
sqldf("select distinct AGEP from acs")
## Question 4
## How many characters are in the 10th, 20th, 30th and 100th lines of HTML from this page:
## http://biostat.jhsph.edu/~jleek/contact.html
## (Hint: the nchar() function in R may be helpful)
con <-url('http://biostat.jhsph.edu/~jleek/contact.html')
htmlCode <- readLines(con)
close(con)
htmlCode
nchar(htmlCode[10])
nchar(htmlCode[20])
nchar(htmlCode[30])
nchar(htmlCode[100])
## question 5
## Read this data set into R and report the sum of the numbers in the fourth of the nine columns.
##
## https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for
##
## Original source of the data: http://www.cpc.ncep.noaa.gov/data/indices/wksst8110.for
con <-url('https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for')
htmlCode <- readLines(con)
close(con)
htmlCode
##z<-read.table('https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for', skip=6)
htmlCode[4]
install.packages('reshape')
library(reshape)
tess <- strsplit(htmlCode[5:7],' ')
tess
colsplit(tess)
length(tess[1])
class(tess)
as.data.frame(tess)
##correct solution, uses 'fixed width format'. Probably should've read the question first XD
z<-read.fwf('https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for', widths=c(10, 9, 4, 9, 4, 9, 4, 9, 4), skip=4)
sum(z[4])
|
3c54a1fcb1b277c4ed530b868e74c2fbbe4439cb | f0bb7b739b8109def549b8acdbcb15cc79cc8d11 | /cont-model-part-II.R | 6ef57da6071725a334d9ee6ae1702f2ea70a6544 | [] | no_license | vikasgupta1812/rsnippets | 2ba764b47334f33487768ca506eca5ab1835c792 | a4572b1ed5289de06c4cc8f7de5736e9e2b85043 | refs/heads/master | 2021-01-21T00:01:28.739303 | 2016-06-08T21:58:06 | 2016-06-08T21:58:06 | 60,504,464 | 0 | 0 | null | 2016-06-06T06:39:16 | 2016-06-06T06:39:14 | null | UTF-8 | R | false | false | 2,096 | r | cont-model-part-II.R | # Description : Cont Model Part II
# Website : http://rsnippets.blogspot.in/2013/10/cont-model-part-ii.html
cont.run <- function(burn.in, reps, n, d, l ,s) {
tr <- rep(0, n)
sig <- rnorm(reps, 0, d)
r <- rep(0, reps)
for (i in 1:reps) {
r[i] <- (sum(sig[i] > tr) - sum(sig[i] < (-tr))) / (l * n)
tr[runif(n) < s] <- abs(r[i])
}
return(r[burn.in:reps])
}
set.seed(1)
sim.points <- 100
d <- runif(sim.points, 0.002, 0.01)
l <- runif(sim.points, 5, 10)
s <- runif(sim.points, 0.01, 0.1)
m <- runif(sim.points, 1, 2) # comparison multiplier
seeds <- runif(sim.points) # common random numbers seeds
range(mapply(function(d, l, s, m, seed) {
set.seed(seed)
r1 <- cont.run(1000, 10000, 1000, d, l ,s)
set.seed(seed)
r2 <- cont.run(1000, 10000, 1000, d / m, l * m ,s)
range(r1 / m - r2)
}, d, l, s, m, seeds)) # -2.775558e-17 1.387779e-17
library(lattice)
data.set <- read.table("data/sim_output.txt", head = T,
colClasses = rep("numeric", 4))
data.set$dl <- data.set$d * data.set$l
data.set$cs <- cut(data.set$s, seq(0.01, 0.1, len = 10))
data.set$cdl <- cut(data.set$dl, seq(0, 0.2, len = 11))
sum.data <- aggregate(k ~ cdl + cs, data = data.set, mean)
trellis.par.set(regions=list(col=topo.colors(100)))
levelplot(k~cdl+cs, data=sum.data,scales=list(x=list(rot=90)),
xlab = "d * l", ylab = "s")
cont.run.vol <- function(burn.in, reps, n, d, l ,s) {
tr <- rep(0, n)
sig <- rnorm(reps, 0, d)
r <- rep(0, reps)
t <- rep(0, reps)
for (i in 1:reps) {
r[i] <- (sum(sig[i] > tr) - sum(sig[i] < (-tr))) / (l * n)
t[i] <- (sum(sig[i] > tr) + sum(sig[i] < (-tr))) / n
tr[runif(n) < s] <- abs(r[i])
}
c(kurtosis(r[burn.in:reps]), mean(t[burn.in:reps]))
}
library(e1071)
sim.points <- 100
d <- runif(sim.points,0.001,0.01)
l <- runif(sim.points,5,20)
s <- runif(sim.points,0.01,0.1)
data.set <- mapply(function(d, l, s) {
cont.run.vol(1000, 10000, 1000, d, l ,s)
}, d, l, s)
data.set <- t(data.set)
colnames(data.set) <- c("kurtosis", "volume")
data.set <- data.set[, 2:1]
par(mar=c(4, 4, 1, 1))
plot(data.set) |
43abf325f2c477ba5f73cb276f114087768b150a | 6951cfcfbcad0034696c6abe9a4ecf51aa0f3a4b | /man/vignette.Rd | c9dcea01ae8570968dc9fc640428866fe4aa5b2a | [] | no_license | renozao/pkgmaker | df3d4acac47ffbd4798e1d97a31e311bf35693c8 | 2934a52d383adba1d1c00553b9319b865f49d15b | refs/heads/master | 2023-05-10T16:40:30.977394 | 2023-05-03T07:02:51 | 2023-05-03T07:17:17 | 12,726,403 | 8 | 3 | null | 2023-02-14T10:26:07 | 2013-09-10T10:07:35 | R | UTF-8 | R | false | true | 4,518 | rd | vignette.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vignette.R
\name{rnw}
\alias{rnw}
\alias{isManualVignette}
\alias{as.rnw}
\alias{rnwCompiler}
\alias{rnwWrapper}
\alias{rnwDriver}
\alias{rnwIncludes}
\alias{rnwChildren}
\alias{vignetteMakefile}
\alias{compactVignettes}
\title{Utilities for Vignettes}
\usage{
rnw(x, file = NULL, ..., raw = FALSE)
isManualVignette()
as.rnw(x, ..., load = TRUE)
rnwCompiler(x, verbose = TRUE)
rnwWrapper(x, verbose = TRUE)
rnwDriver(x)
rnwIncludes(x)
rnwChildren(x)
vignetteMakefile(
package = NULL,
skip = NULL,
print = TRUE,
template = NULL,
temp = FALSE,
checkMode = isCHECK() || vignetteCheckMode(),
user = NULL,
tests = TRUE
)
compactVignettes(paths, ...)
}
\arguments{
\item{x}{vignette source file specification as a path or a \code{rnw} object.}
\item{file}{output file}
\item{...}{extra arguments passed to \code{as.rnw} that can be used to force
certain building parameters.}
\item{raw}{a logical that indicates if the raw result for the compilation
should be returned, instead of the result file path.}
\item{load}{logical to indicate if all the object's properties should loaded,
which is done by parsing the file and look up for specific tags.}
\item{verbose}{logical that toggles verbosity}
\item{package}{package name.
If \code{NULL}, a DESRIPTION file is looked for one directory up: this
meant to work when building a vignette directly from a package's
\code{'vignettes'} sub-directory.}
\item{skip}{Vignette files to skip (basename).}
\item{print}{logical that specifies if the path should be printed or
only returned.}
\item{template}{template Makefile to use.
The default is to use the file \dQuote{vignette.mk} shipped with the package
\pkg{pkgmaker} and can be found in its install root directory.}
\item{temp}{logical that indicates if the generated makefile should using a
temporary filename (\code{TRUE}), or simply named \dQuote{vignette.mk}}
\item{checkMode}{logical that indicates if the vignettes should be generated as in a
CRAN check (\code{TRUE}) or in development mode, in which case \code{pdflatex}, \code{bibtex},
and, optionally, \code{qpdf} are required.}
\item{user}{character vector containing usernames that enforce \code{checkMode=TRUE},
if the function is called from within their session.}
\item{tests}{logical that enables the compilation of a vignette that gathers all unit
test results.
Note that this means that all unit tests are run before generating the vignette.
However, unit tests are not (re)-run at this stage when the vignettes are built
when checking the package with \code{R CMD check}.}
\item{paths}{A character vector of paths to PDF files, or a length-one
character vector naming a directory, when all \file{.pdf} files in
that directory will be used.}
}
\value{
\code{rnw} returns the result of compiling the vignette with \link{runVignette}.
}
\description{
\code{rnw} provides a unified interface to run vignettes that detects
the type of vignette (Sweave or knitr), and which Sweave driver
to use (either automatically or from an embedded command \code{\\VignetteDriver}
command).
}
\section{Functions}{
\itemize{
\item \code{isManualVignette()}: tells if a vignette is being run through the function \code{runVignette}
of \pkg{pkgmker}, allowing disabling behaviours not allowed in package vignettes that are
checked vi \code{R CMD check}.
\item \code{as.rnw()}: creates a S3 \code{rnw} object that contains information
about a vignette, e.g., source filename, driver, fixed included files, etc..
\item \code{rnwCompiler()}: tries to detect the vignette compiler to use on a vignette
source file, e.g., \code{\link{Sweave}} or \link[knitr:knitr-package]{knitr}.
\item \code{rnwWrapper()}: tries to detect the type of vignette and if it is meant
to be wrapped into another main file.
\item \code{rnwDriver()}: tries to detect Sweave driver to use on a vignette source
file, e.g., \code{SweaveCache}, \code{highlight}, etc..
\item \code{rnwIncludes()}: detects fixed includes, e.g., image or pdf files, that are
required to build the final document.
\item \code{rnwChildren()}: detects included vignette documents and return them as a
list of vignette objects.
\item \code{vignetteMakefile()}: returns the path to a generic makefile used to make
vignettes.
\item \code{compactVignettes()}: compacts vignette PDFs using either \code{gs_quality='none'} or \code{'ebook'},
depending on which compacts best (as per CRAN check criteria).
}}
|
76e14a5dfaf704dd4fa2794fcc5e1ff53d0ac7e4 | 97102dcc2d443aa4aedff36535df5674ebd68788 | /SparqlQueries/getTrustsFromSwirrl.R | 6aaf611e3a3a296606225b3e671cc58c63fde460 | [] | no_license | sinclr4/DataHoles | e20d4ef5ee8294247f27adb0bc24ea67ef4e0ab9 | e1c8a288bdc917fa307c4beed3c0d9ad609baa4d | refs/heads/master | 2020-06-13T13:25:46.916204 | 2017-01-18T20:56:52 | 2017-01-18T20:56:52 | 75,374,337 | 0 | 0 | null | 2016-12-14T18:45:12 | 2016-12-02T08:02:06 | R | UTF-8 | R | false | false | 382 | r | getTrustsFromSwirrl.R | # List of Datasets
list_of_trusts_from_swirrl_for_pdw <- 'PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT DISTINCT ?refArea ?org_name
WHERE {
?observation <http://purl.org/linked-data/cube#dataSet> <http://nhs.publishmydata.com/data/place-pdw> .
?observation <http://nhs.publishmydata.com/def/dimension/refOrganisation> ?refArea .
?refArea rdfs:label ?org_name
}' |
0ce1467a7a18e3d98ee03d0a13a2b0525bd7bf2a | 049b6e37472c3d460bb30911cd7d470d563c612d | /containers/tscan/run.R | 5f7a100185765ea5fbb67d96b802f7432030a094 | [] | no_license | ManuSetty/dynmethods | 9919f4b1dc30c8c75db325b4ddcd4e9ada5e488b | 337d13b7a6f8cac63efdeb0d06d80cd2710d173d | refs/heads/master | 2020-03-21T11:34:35.406210 | 2018-06-24T20:25:50 | 2018-06-24T20:25:50 | 138,512,485 | 1 | 0 | null | 2018-06-24T20:16:12 | 2018-06-24T20:16:11 | null | UTF-8 | R | false | false | 2,595 | r | run.R | library(dynwrap)
library(jsonlite)
library(readr)
library(dplyr)
library(purrr)
library(TSCAN)
library(igraph)
# ____________________________________________________________________________
# Load data ####
data <- read_rds('/input/data.rds')
params <- jsonlite::read_json('/input/params.json')
# ____________________________________________________________________________
# Infer trajectory ####
run_fun <- function(
counts,
minexpr_percent = 0,
minexpr_value = 0,
cvcutoff = 0,
clusternum_lower = 2,
clusternum_upper = 9,
modelNames = "VVV"
) {
requireNamespace("TSCAN")
requireNamespace("igraph")
# process clusternum
clusternum <- seq(clusternum_lower, clusternum_upper, 1)
# TIMING: done with preproc
tl <- add_timing_checkpoint(NULL, "method_afterpreproc")
# preprocess counts
cds_prep <- TSCAN::preprocess(
t(as.matrix(counts)),
takelog = TRUE,
logbase = 2,
pseudocount = 1,
clusternum = NULL,
minexpr_value = minexpr_value,
minexpr_percent = minexpr_percent,
cvcutoff = cvcutoff
)
# cluster the data
cds_clus <- TSCAN::exprmclust(
cds_prep,
clusternum = clusternum,
modelNames = modelNames,
reduce = TRUE
)
# order the cells
cds_order <- TSCAN::TSCANorder(cds_clus)
# TIMING: done with method
tl <- tl %>% add_timing_checkpoint("method_aftermethod")
# process output
cluster_network <- cds_clus$MSTtree %>%
igraph::as_data_frame() %>%
rename(length = weight) %>%
mutate(directed = FALSE)
sample_space <- cds_clus$pcareduceres
cluster_space <- cds_clus$clucenter
rownames(cluster_space) <- as.character(seq_len(nrow(cluster_space)))
colnames(cluster_space) <- colnames(sample_space)
# return output
wrap_prediction_model(
cell_ids = rownames(counts)
) %>% add_dimred_projection(
milestone_ids = rownames(cluster_space),
milestone_network = cluster_network,
dimred_milestones = cluster_space,
dimred = sample_space,
milestone_assignment_cells = cds_clus$clusterid,
num_segments_per_edge = 100
) %>% add_timings(
timings = tl %>% add_timing_checkpoint("method_afterpostproc")
)
}
args <- params[intersect(names(params), names(formals(run_fun)))]
model <- do.call(run_fun, c(args, data))
# ____________________________________________________________________________
# Save output ####
write_rds(model, '/output/output.rds') |
39800170862d93f30db2ffa19d02dd17910281d4 | 9fcb7bf2a0016403dd3de4e6a357774ce617fe06 | /Viz/Tree_Viz.R | 585310a938fce548c6cb52d427e49f3cc1dfbcfa | [] | no_license | jdfreden/Spades | 98c1c1d2b31216cbdef8e320e181805936920061 | 8b924def0fde39fdde23b85b4d4ace7ca0ca20fe | refs/heads/master | 2023-05-10T00:16:56.241052 | 2021-05-26T00:56:56 | 2021-05-26T00:56:56 | 354,326,848 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,526 | r | Tree_Viz.R | # Title : Visualize ICMCTS
# Created by: jdfre
# Created on: 4/14/2021
library(visNetwork)
ROUND_DIGITS = 5
processFile = function(filepath) {
lines = NULL
con = file(filepath, "r")
while ( TRUE ) {
line = readLines(con, n = 1)
if ( length(line) == 0 ) {
break
}
print(line)
lines = c(lines, line)
}
close(con)
return(lines)
}
assign_colors = function(node) {
palette = colorRampPalette(colors=c("#FF0000", "#182848"))
visits = sort(unique(node$value), decreasing = T)
cols = palette(length(visits))
idx = sapply(node$value, function(x) which(x == visits))
color = cols[idx]
return(color)
}
lines = processFile("example_trees/test_tree_2.txt")
lines = gsub(" *", " ", lines)
r = lines[1]
best.move = lines[startsWith(lines, "#")]
lines = lines[lines != ""]
lines = lines[!startsWith(lines, "#")]
lines = lines[-1]
# Process root
r.stats = unlist(strsplit(r, ": "))[2]
r.stats = gsub("\\]", "", r.stats)
r.stats = gsub(" ", "", r.stats)
r.stats = unlist(strsplit(r.stats, "/"))
r.stats = as.numeric(r.stats)
from = NULL
to = NULL
val = r.stats[2]
track = list(root = 0)
for(i in seq_along(lines)) {
cont = unlist(strsplit(lines[i], "\\| "))
cont = cont[cont != ""]
info = unlist(strsplit(cont, "\\] "))
t = gsub("\\[M:", "", unlist(strsplit(info[1], " "))[1])
v = unlist(strsplit(info[1], "A: "))[2]
v = as.numeric(unlist(strsplit(v, "/ ")))
if(t %in% names(track)) {
if(sum(v[1] %in% track[[t]]) == 1) {
idx = which(v[1] %in% track[[t]])
} else {
track[[t]] = c(track[[t]], v[1])
idx = length(track[[t]])
}
} else {
track[[t]] = round(v[1], ROUND_DIGITS)
idx = 1
}
t = paste(t, idx, sep = ":")
v = v[2]
f = unlist(strsplit(info[2], " "))[1]
f = unlist(strsplit(f, ":"))
fv = round(as.numeric(f[2]), ROUND_DIGITS)
f = f[1]
if(f == "None") {
f = "root"
}
fidx = which(track[[which(f == names(track))]] == fv)
f = paste0(f, ":", fidx)
names(v) = t
from = c(from, f)
to = c(to, t)
val = c(val, v)
}
names(val)[1] = "root:1"
val.names = names(val)
val = data.frame(val)
colnames(val)[1] = "value"
val$id = val.names
nodes = data.frame(id = unique(c(from, to)))
nodes$label = gsub("\\:.*", "", nodes$id)
nodes = merge(nodes, val, by = "id")
#nodes$value = val
edges = data.frame(from = from, to = to)
nodes$color = assign_colors(nodes)
visNetwork(nodes, edges) %>%
visOptions(collapse = T, highlightNearest = T) %>%
visHierarchicalLayout(sortMethod = "directed")
|
5824ae783ee962080fb0ae1aea1e383eae5e4204 | 12ade55af2eb10c335e765fb143e8aa0f8c82832 | /Analysis/step14_Gene_Signature_Study.R | c38f37246e4a27e4565bbc11f1611656984249d0 | [] | no_license | zexian/BBCAR_codes | 7340f6dbe13d9e9539d2308064dd014eb3605dad | e924677613e1441ae55e38bc29bb307e40a4a4ff | refs/heads/master | 2022-04-22T22:53:59.762389 | 2020-04-23T05:03:06 | 2020-04-23T05:03:06 | 167,890,339 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,098 | r | step14_Gene_Signature_Study.R | #source("https://bioconductor.org/biocLite.R")
#biocLite("VariantAnnotation")
#source("https://bioconductor.org/biocLite.R")
#biocLite("SomaticSignatures")
#source("https://bioconductor.org/biocLite.R")
#biocLite("BSgenome.Hsapiens.UCSC.hg38")
#install.packages('ggdendro')
#source("https://bioconductor.org/biocLite.R")
#biocLite("signeR")
library(readr)
library(VariantAnnotation)
library(SomaticSignatures)
library(BSgenome.Hsapiens.UCSC.hg19)
library(ggdendro)
library(ggplot2)
library(signeR)
library(rtracklayer)
library(readr)
library(LaplacesDemon)
print('chicken0')
library(readr)
X30_signatures <- read_csv("/projects/p30007/Zexian/tools/DNAtools/30_signatures.csv")[,4:33]
Analysis <- '/projects/p30007/Zexian/Alignment/BBCAR_NEW/administrative/Step14data/'
VCF<-'/projects/p30007/Zexian/Alignment/BBCAR_NEW/WES_Analysis/Mutect/VCF_Anno_Exonic_Somatic/'
print('chicken1')
file<-paste('/projects/p30007/Zexian/tools/DNAtools/exome_count.txt',sep="")
#opp_file<-read_csv(file,locale=locale(tz="Australia/Sydney"))
opp_file<-read.table(file,header = TRUE,sep='\t')
comparison <- read.table('/projects/p30007/Zexian/Alignment/BBCAR_NEW/administrative/Step10data/clinical_final.txt' ,header = TRUE,sep='\t')
print(dim(comparison))
classes<-comparison$CaseControl
u2 <- unique(classes)
u3<-as.factor(c(as.character(u2),'all'))
#u2<-1 #need to delete
print('chicken3')
for (class_type in u3){
print('start')
print(class_type)
if ( class_type %in% c('Case','Control')){
sub_group<-comparison[comparison$CaseControl==class_type,]$Study_ID
}
if ( class_type == 'all'){
sub_group<-comparison$Study_ID
}
print(length(sub_group))
all_vcf<-GRanges()
for (individual in sub_group) {
if (!is.na(individual) ) {
print(individual)
filename <- paste(VCF,'/',individual,'.hg19_multianno.vcf',sep="")
print(filename)
in_vcf <- readVcf(filename, "hg19")
gvcf<-rowRanges(in_vcf)
gvcf$patient_id<-factor(replicate(length(gvcf), individual))
all_vcf<-c(all_vcf,gvcf)}
}
all_vcf$REF<-as.factor(all_vcf$REF)
all_vcf$ALT<-as.factor(unstrsplit(CharacterList(all_vcf$ALT), sep = ","))
all_vcf$QUAL<-as.factor(all_vcf$QUAL)
all_vcf$FILTER<-as.factor(all_vcf$FILTER)
print('ok1')
vvcf = VRanges(
seqnames = seqnames(all_vcf),
ranges = ranges(all_vcf),
ref = all_vcf$REF,
alt = all_vcf$ALT,
study = paste(class_type,as.character(all_vcf$patient_id),sep = '')
)
idx_snv<- ref(vvcf) %in% DNA_BASES & alt(vvcf) %in% DNA_BASES
vvcfR<-vvcf[idx_snv]
chrome<-c('chr1','chr2','chr3', 'chr4','chr5', 'chr6','chr7', 'chr8','chr9', 'chr10','chr11', 'chr12','chr13', 'chr14','chr15', 'chr16','chr17', 'chr18','chr19', 'chr20','chr21', 'chr22','chrX', 'chrY','chrM' )
idx_snv<- as.character(seqnames(vvcfR)) %in% chrome
vvcfR2<-vvcfR[idx_snv]
sca_motifs = mutationContext(vvcfR2,k=3,BSgenome.Hsapiens.UCSC.hg19,strand=FALSE,unify=TRUE,check=TRUE)
print('ok8')
savefile<-paste(Analysis,'/sca_motifs_class_',class_type,'.rda',sep='')
saveRDS(sca_motifs,savefile)
count_person<- as.numeric(as.matrix(table(sca_motifs$study)))
sca_mm = motifMatrix(sca_motifs, group = "study",normalize = TRUE)
sca_mm_count<-t(t(sca_mm)*count_person)
savefile<-paste(Analysis,'/matrix_class_',class_type,'_.txt',sep='')
write.table(sca_mm_count,savefile,sep='\t')
savefile<-paste(Analysis,'/NMF_MutationSpectrum_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 20, height = 120)
print(dim(sca_motifs))
tempplot<-plotMutationSpectrum(sca_motifs, "study")
tempplot<-tempplot+ scale_fill_manual(values = rep("darkred", ncol(sca_mm)))
print(tempplot)
dev.off()
print('ok7')
opp <- opp_file[rep(seq_len(nrow(opp_file)), ncol(sca_mm_count)),]
mut<-t(sca_mm_count)
max_sig<-round(ncol(sca_mm),digits=0)-1
if (max_sig>50){
max_sig=20
}
print('max_sig')
print(max_sig)
signatures <- signeR(M=mut, Opport=opp, nlim=c(2,max_sig))
BIC_score<-lapply(signatures$Test_BICs,mean)
n_signature<-signatures$tested_n[which.max(BIC_score)]
savefile<-paste(Analysis,'/EMu_BICboxplot_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 10, height = 20)
tempplot<-BICboxplot(signatures)
print(tempplot)
dev.off()
n_sigs = 2:max_sig
gof_nmf = assessNumberSignatures(sca_mm, n_sigs, nReplicates = 5)
savefile<-paste(Analysis,'/NMF_NumberSignatures_',class_type,'_.pdf',sep='')
pdf(savefile,width = 10, height = 20)
tempplot<-plotNumberSignatures(gof_nmf)
print(tempplot)
dev.off()
sigs_nmf = identifySignatures(sca_mm, n_signature, nmfDecomposition)
write.table(samples(sigs_nmf),'/projects/p30007/Zexian/Alignment/BBCAR_NEW/administrative/Step14data/wMatrix.csv',sep='\t')
w_sig <- signatures(sigs_nmf)
w_norm <- t(t(w_sig) / colSums(w_sig)) #check column
results<-c()
for (kg in 1:dim(w_norm)[2]){
result<-c()
for (line in 1:dim(X30_signatures)[2]){
number<-KLD(unname(w_norm[,kg]),unname(unlist(X30_signatures[,line])))$mean.sum.KLD
result<-c(result,number)
}
results<-rbind.data.frame(results,result)
}
colnames(results)<-colnames(X30_signatures)
rownames(results)<-colnames(w_norm)
savefile=paste(Analysis,'/NMF_sig_validate',class_type,'.csv',sep='')
write.table(results, file = savefile, sep = ",", qmethod = "double")
savefile=paste(Analysis,'/NMF_data_sig_',class_type,'.rda',sep='')
saveRDS(sigs_nmf,savefile)
savefile=paste(Analysis,'/EMu_data_sig_',class_type,'.rda',sep='')
saveRDS(signatures,savefile)
savefile<-paste(Analysis,'/NMF_SignatureMap_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 10, height = 20)
tempplot<-plotSignatureMap(sigs_nmf) + ggtitle(" Signatures: NMF - Heatmap")
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/EMu_SignatureMap_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 10, height = 20)
tempplot<-SignHeat(signatures$SignExposures)
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/NMF_Signatures_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 10, height = 7)
tempplot<-plotSignatures(sigs_nmf) + ggtitle(" Signatures: NMF - Barchart")+ylim(0,1)
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/EMu_Signatures_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 10, height = 7)
tempplot<-SignPlot(signatures$SignExposures)
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/NMF_ObservedSpectrum_class_',class_type,'.pdf',sep='')
pdf(savefile,width = 20, height = 120)
tempplot<-plotObservedSpectrum(sigs_nmf)
tempplot<-tempplot+ scale_fill_manual(values = rep("darkred", ncol(sca_mm)))
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/NMF_FittedSpectrum_class_',class_type,'.pdf',sep='')
pdf(savefile,width = 20, height = 120)
tempplot<-plotFittedSpectrum(sigs_nmf)
tempplot<-tempplot+ scale_fill_manual(values = rep("darkred", ncol(sca_mm)))
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/NMF_SampleMap_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 20, height = 80)
tempplot<-plotSampleMap(sigs_nmf)
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/EMu_SampleMap_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 20, height = 80)
tempplot<-ExposureHeat(signatures$SignExposures)
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/EMu_SampleExposure_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 10, height = 20)
tempplot<-ExposureBoxplot(signatures$SignExposures)
print(tempplot)
dev.off()
print(class_type)
savefile<-paste(Analysis,'/NMF_Samples_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 30, height = 10)
tempplot<-SomaticSignatures::plotSamples(sigs_nmf)
print(tempplot)
dev.off()
savefile<-paste(Analysis,'/EMu_Samples_class_',class_type,'_.pdf',sep='')
pdf(savefile,width = 30, height = 10)
tempplot<-ExposureBarplot(signatures$SignExposures)
dev.off()
clu_motif = clusterSpectrum(sca_mm, "motif")
savefile<-paste(Analysis,'/NMF_ggdendrogram_class_',class_type,'.pdf',sep='')
pdf(savefile,width = 10, height = 20)
tempplot<-ggdendrogram(clu_motif, rotate = TRUE)
print(tempplot)
dev.off()
print(class_type)
print('end')
}
|
774fac1887e105eaa7d88ccbb814ecf807a72b45 | 5c5915807ea728324875a615a1b9c5b919f2962f | /loadlib.R | b69bbcb4a8d555a5137aaafdd8a42be249230ad6 | [] | no_license | demel/AccessMod_shiny | 3d969228ff6ca8a9076a30a75fbf94ed60a87d55 | 70ffe0ba8ea6c558466689fdb419e3061afb971e | refs/heads/master | 2021-01-17T06:44:53.433833 | 2015-11-02T18:27:09 | 2015-11-02T18:27:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,361 | r | loadlib.R | # load all packages at once, at the begining of the server function.
# this is could be an expensive task !
# TODO: load packages inside functions def, and put
library(devtools)
library(R.utils) # used in amReadLogs to read last subset lines
library(devtools)
library(rgrass7) # R interface to GRASS GIS
library(htmltools) # html tools. NOTE: check for unused package
library(data.table) # provide fast tabular data manipulation #NOTE:Used only in referral analysis. Check if dplyr could do the job.
library(raster) # raster manipulation, import, get info without loading file.
library(rgdal) # striped R version of GDAL. NOTE: redundant with gdalutils ?
library(gdalUtils) # complete access to system GDAL.
library(rgeos) # R interface to geometry engine geos. NOTE: check for unused package
library(maps) # map display. Used in project mondue
library(RSQLite) # R interface to DBI library for SQLITE. Used to check grass db without grass.
library(plyr) # ldply in handson table (amHandson)
library(pingr) # ping utility to check of repository is available in update process.
library(leaflet) # ! fork of shiny leaflet: fxi/AccessMod_leaflet-shiny
library(shinydashboard) # admin LTE/bootstrap template
library(geojsonio) # geojson process. Used in gis preview
library(rio) #Swiss-army knife for data I/O
library(tools)
library(shinyTour)
library(stringr)
|
c77669628f490914ae6ad466a847b09be83e850b | 8cda1e9a8fa1d4e7862883487089e2a82262b074 | /postprocessing_functions.R | 36b30977279fe5d0b994f258566dabb77c28265b | [] | no_license | boukepieter/MCNA_Analysys_Iraq | 0b7cf7e7336ddc4f0b68172afef023b09304abf0 | 2e5083004fe59096dd85c7b5a0568376d77e577f | refs/heads/master | 2020-05-27T14:43:01.708831 | 2020-05-20T07:57:38 | 2020-05-20T07:57:38 | 188,666,116 | 1 | 9 | null | 2019-11-05T08:14:35 | 2019-05-26T09:56:51 | HTML | UTF-8 | R | false | false | 4,335 | r | postprocessing_functions.R | pretty.output <- function(summary, independent.var.value, analysisplan, cluster_lookup_table, lookup_table, severity = FALSE, camp=FALSE) {
subset <- summary[which(summary$independent.var.value == independent.var.value),]
independent.var <- subset$independent.var[1]
if(is.na(independent.var)) {
analplan_subset <- analysisplan
} else {
analplan_subset <- analysisplan[which(analysisplan$independent.variable == independent.var),]
}
vars <- unique(subset$dependent.var)
districts <- unique(subset$repeat.var.value)
start <- ifelse(camp, 1, 19)
df <- data.frame(governorate = lookup_table$filter[start:nrow(lookup_table)][match(districts, lookup_table$name[start:nrow(lookup_table)])],
district = districts, stringsAsFactors = F)
df <- df[with(df, order(governorate, district)),]
for(i in 1:length(vars)){
var_result <- subset[which(subset$dependent.var == vars[i]),]
df[,vars[i]] <- var_result[match(df$district, var_result$repeat.var.value), "numbers"]
df[,sprintf("%s_min", vars[i])] <- var_result[match(df$district, var_result$repeat.var.value), "min"]
df[,sprintf("%s_max", vars[i])] <- var_result[match(df$district, var_result$repeat.var.value), "max"]
}
extra_heading <- data.frame(t(vars), stringsAsFactors = F)
colnames(extra_heading) <- vars
extra_heading[1,] <- t(analplan_subset$Indicator.Group...Sector[match(vars, analplan_subset$dependent.variable)])
extra_heading[2,] <- t(analplan_subset$research.question[match(vars, analplan_subset$dependent.variable)])
extra_heading[3,] <- t(analplan_subset$sub.research.question[match(vars, analplan_subset$dependent.variable)])
extra_heading[4,] <- t(analplan_subset$dependent.variable.type[match(vars, analplan_subset$dependent.variable)])
if (severity){
extra_heading[5,] <- t(analplan_subset$consequence[match(vars, analplan_subset$dependent.variable)])
}
df <- rbind.fill(df, extra_heading)
df <- df[c((nrow(df)-(nrow(extra_heading) - 1)):nrow(df),1:(nrow(df)-nrow(extra_heading))),]
df$district <- lookup_table$english[match(df$district, lookup_table$name)]
if(!camp){df$governorate <- lookup_table$english[match(df$governorate, lookup_table$name)]}
df[1:nrow(extra_heading), which(is.na(df[1,]))] <- ""
df
}
correct.zeroes <- function(summary) {
zeroes <- which(summary$dependent.var.value == 0 & summary$numbers == 1)
summary$dependent.var.value[zeroes] <- 1
summary$numbers[zeroes] <- 0
summary$min[zeroes] <- 0
summary$max[zeroes] <- 0
return(summary)
}
severity_for_pin <- function(filename, analysisplan){
group_data <- read.csv(filename, stringsAsFactors = F)
indicators <- names(group_data)[-c(1,2,which(endsWith(names(group_data), "min") | endsWith(names(group_data), "max")))]
ind_sep <- unique(unlist(strsplit(indicators, "_"))[seq(1,length(indicators)*2,2)])
for (j in 1:length(ind_sep)){
ind_cols <- which(startsWith(names(group_data), paste0(ind_sep[j],"_")) & (!endsWith(names(group_data), "min") &
!endsWith(names(group_data), "max")))
names_ind_cols <- names(group_data)[ind_cols]
sum_cols <- names_ind_cols[which(endsWith(names_ind_cols, "3") | endsWith(names_ind_cols, "4") | endsWith(names_ind_cols, "5"))]
new_df <- as.data.frame(group_data[6:nrow(group_data),sum_cols])
new_df <- apply(new_df,2,FUN=as.numeric)
group_data[6:nrow(group_data),ind_sep[j]] <- rowSums(new_df)
}
group_pin <- group_data[-c(3,4),c("district", "governorate", ind_sep)]
dap_selection <- unlist(strsplit(analysisplan$dependent.variable, "_"))[seq(1,nrow(analysisplan)*2,2)] %in% ind_sep
group_pin[1,] <- c("","",analysisplan$Indicator.Group...Sector[dap_selection][seq(1,length(which(dap_selection)),5)])
group_pin[2,] <- c("","",analysisplan$research.question[dap_selection][seq(1,length(which(dap_selection)),5)])
group_pin[3,] <- c("","",analysisplan$consequence[dap_selection][seq(1,length(which(dap_selection)),5)])
return(group_pin)
}
analysisplan_nationwide <- function(analysisplan) {
analysisplan$repeat.for.variable <- ""
return(analysisplan)
}
analysisplan_pop_group_aggregated <- function(analysisplan) {
analysisplan$independent.variable <- ""
analysisplan$independent.variable.type <- ""
return(analysisplan)
}
|
4d9b125296f63a165b7f0ffa49f365b290730f79 | 6ff24bc1f35410c47d2662d1b8e5a2f34e65b1b7 | /man/search_leakers.Rd | cb6528c051b4413360d3292863dde6bfbe936a51 | [] | no_license | ablanda/Esame | 5d3d7c1408e5ed0e9771ea015855db0788036d8e | b43749d3fc4214e878d93b4e2b7c073c64cb7610 | refs/heads/master | 2020-12-30T11:39:37.681842 | 2018-08-11T12:42:47 | 2018-08-11T12:42:47 | 91,511,654 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 448 | rd | search_leakers.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_leakers.R
\name{search_leakers}
\alias{search_leakers}
\title{Trovare le variabili leakers}
\usage{
search_leakers(data, soglia = 0.5)
}
\arguments{
\item{data}{data.frame}
}
\value{
una tabella con le variabili che hanno ottenuto r^2 piu' alto sulla risposta e una con le variabili che hanno un tempo eccessivo di lm
}
\description{
cerca le variabili leakers
}
|
87eb041e81b55346d242bc13d6314ad7875e21c3 | cf846020dbd10ee4526f713267ad75895c82c0c7 | /results/tunningPso/tunningPso-script.R | 6331c616b26e28061c2cd2da682fa00e788c130e | [
"MIT"
] | permissive | jnthouvenin/Swarm_Robot_Controller | 9a76d26aa58f4b380d7aec8ce68ca004bb9b6123 | 8ef0f187f502a35e5e354f8f5c4837b4911635b0 | refs/heads/main | 2023-08-15T07:33:53.067734 | 2021-09-23T17:27:54 | 2021-09-23T17:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,405 | r | tunningPso-script.R |
data.5.0.0 <- read.csv("trace/5-0-100-13-1.dat",header=F)[,1][5:105]
data.5.1.0 <- read.csv("trace/5-1-100-13-1.dat",header=F)[,1][5:105]
data.5.2.0 <- read.csv("trace/5-2-100-13-1.dat",header=F)[,1][5:105]
data.5.0.1 <- read.csv("trace/5-0-100-13-2.dat",header=F)[,1][5:105]
data.5.1.1 <- read.csv("trace/5-1-100-13-2.dat",header=F)[,1][5:105]
data.5.2.1 <- read.csv("trace/5-2-100-13-2.dat",header=F)[,1][5:105]
data.5.0 <- c(data.5.0.0,data.5.0.1)
data.5.1 <- c(data.5.1.0,data.5.1.1)
data.5.2 <- c(data.5.2.0,data.5.2.1)
data.10.0.0 <- read.csv("trace/10-0-100-13-1.dat",header=F)[,1][10:110]
data.10.1.0 <- read.csv("trace/10-1-100-13-1.dat",header=F)[,1][10:110]
data.10.2.0 <- read.csv("trace/10-2-100-13-1.dat",header=F)[,1][10:110]
data.10.0.1 <- read.csv("trace/10-0-100-13-2.dat",header=F)[,1][10:110]
data.10.1.1 <- read.csv("trace/10-1-100-13-2.dat",header=F)[,1][10:110]
data.10.2.1 <- read.csv("trace/10-2-100-13-2.dat",header=F)[,1][10:110]
data.10.0 <- c(data.10.0.0,data.10.0.1)
data.10.1 <- c(data.10.1.0,data.10.1.1)
data.10.2 <- c(data.10.2.0,data.10.2.1)
data.15.0.0 <- read.csv("trace/15-0-100-13-1.dat",header=F)[,1][15:115]
data.15.1.0 <- read.csv("trace/15-1-100-13-1.dat",header=F)[,1][15:115]
data.15.2.0 <- read.csv("trace/15-2-100-13-1.dat",header=F)[,1][15:115]
data.15.0.1 <- read.csv("trace/15-0-100-13-2.dat",header=F)[,1][15:115]
data.15.1.1 <- read.csv("trace/15-1-100-13-2.dat",header=F)[,1][15:115]
data.15.2.1 <- read.csv("trace/15-2-100-13-2.dat",header=F)[,1][15:115]
data.15.0 <- c(data.15.0.0,data.15.0.1)
data.15.1 <- c(data.15.1.0,data.15.1.1)
data.15.2 <- c(data.15.2.0,data.15.2.1)
data.20.0.0 <- read.csv("trace/20-0-100-13-1.dat",header=F)[,1][20:120]
data.20.1.0 <- read.csv("trace/20-1-100-13-1.dat",header=F)[,1][20:120]
data.20.2.0 <- read.csv("trace/20-2-100-13-1.dat",header=F)[,1][20:120]
data.20.0.1 <- read.csv("trace/20-0-100-13-2.dat",header=F)[,1][20:120]
data.20.1.1 <- read.csv("trace/20-1-100-13-2.dat",header=F)[,1][20:120]
data.20.2.1 <- read.csv("trace/20-2-100-13-2.dat",header=F)[,1][20:120]
data.20.0 <- c(data.20.0.0,data.20.0.1)
data.20.1 <- c(data.20.1.0,data.20.1.1)
data.20.2 <- c(data.20.2.0,data.20.2.1)
data <- data.frame(data.5.0,data.5.1,data.5.2,
data.10.0,data.10.1,data.10.2,
data.15.0,data.15.1,data.15.2,
data.20.0,data.20.1,data.20.2)
boxplot(data) |
a2fe05dfd007dac2bc54f54d3b473af8a7f4cc26 | 93439fef06f5e9e1344f8ee3d70d7be1b3a9d109 | /Scripts/01-rstudio.R | 8038672d652226aa0e8f80c5b831f1cf6a14aedd | [] | no_license | CapellariGui/MetodosHeuristicos | 79022a08c839d061df967d2163e1c23e12fc0bdc | 361c67304a86e230610a440ac14048ecb546bbe6 | refs/heads/master | 2023-07-07T20:16:37.927937 | 2021-08-28T14:12:19 | 2021-08-28T14:12:19 | 398,545,126 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 926 | r | 01-rstudio.R | ##Configurando deretorio
getwd()
## Contribuidores
contributors()
print("Bom Sábado")
## Criar um Gráfico
plot(1:25)
## Instalar Pacotes
installed.packages()
install.packages("randomForest")
## Carregando Pacotes
library(randomForest)
## Descarregando pacotes
detach(package:randomForest)
## Ajuda
help("detach")
?detach
??detach
install.packages("sos")
library(sos)
findFn("detach")
print("Salvando Primeiro Arquivo")
### AULA 2 ###
# Operadores Básicos, Relacionais e Lógicos em R
# Operadores Básicos
# Soma
5 + 5
# Subtração
7 - 3
# Multiplicação
5 * 3
# Divisão
5 / 3
# Potência
3 ^ 2
3 ** 2
# Módulo
16 %% 3
# Operadores relacionais
x <- 7
y <- 5
x + y
X <- 3
x
# Operadores
X > 7
x < 8
X <= 8
x >= 8
X == 8
x != 8
# Operadores Lógicos
# And
(x==8) & (x==6)
(x==7) & (x>=5)
(x==8) & (x==7)
# Or
(x==8) | (x>5)
(x==8) | (x>=5)
# Not
x > 8
print(!x > 8)
|
79103b5a8b96efb7778cc57bf4017f0f86b3e3f2 | 589ec53602da3824e55a93b6cffe6d1017172949 | /man/fixUnSampTrueBorder.Rd | e97bf1d3e1f1f9a2281649bdfc558b39e3756403 | [
"MIT"
] | permissive | mcglinnlab/vario | c6a2b35eba1c5610274d0778cc140db3f6d686e2 | d8c9bbd2dff1a52d1448ecb4f45a53ef4422394f | refs/heads/master | 2023-03-05T03:53:23.641515 | 2023-02-21T04:15:36 | 2023-02-21T04:15:36 | 3,104,931 | 1 | 3 | null | 2015-04-04T19:53:11 | 2012-01-04T20:10:21 | R | UTF-8 | R | false | true | 797 | rd | fixUnSampTrueBorder.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vario.R
\name{fixUnSampTrueBorder}
\alias{fixUnSampTrueBorder}
\title{Internal function that maintains the spatial locations of the unsampled
pixels in a random realization of a two dimentioal spatial array. -999 is the
identifier for unsampled cells, in this case oarray and rarray DO NOT have a
false border of -999}
\usage{
fixUnSampTrueBorder(oarray, rarray)
}
\arguments{
\item{oarray}{observed starting array}
\item{rarray}{randomized array}
}
\description{
Internal function that maintains the spatial locations of the unsampled
pixels in a random realization of a two dimentioal spatial array. -999 is the
identifier for unsampled cells, in this case oarray and rarray DO NOT have a
false border of -999
}
|
f398bf630c08e24318208572405c754a1034d670 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1610030314-test.R | 8597f746fcb8ef5e86b753d60d82260ec444b245 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 383 | r | 1610030314-test.R | testlist <- list(id = integer(0), x = c(NaN, NaN), y = c(-1.46791787790489e+115, NaN, NaN, NaN, 0, 5.41108926696144e-312, -4.28653205370688e+266, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
b4b7be94c898d6e61824a9ef26d45be117afce96 | 8fb22662702c3e8c2c2ca55bd3ba55a74d1f57ca | /R/dataprep_lookup_in_pandora.R | b1c3fbc265bc4348bdf46a51123b2fd9b93ba7eb | [
"MIT"
] | permissive | sidora-tools/sidora.core | 52b9672fc4e836bcbe3f49929071fc88fb70bd2b | 90267d90185dc7c5dbfc05b9cf3acf022fddca79 | refs/heads/master | 2023-05-12T18:30:17.526725 | 2023-04-26T13:32:43 | 2023-04-26T13:32:43 | 237,629,599 | 2 | 4 | NOASSERTION | 2023-04-26T13:32:44 | 2020-02-01T14:50:49 | R | UTF-8 | R | false | false | 2,630 | r | dataprep_lookup_in_pandora.R | #' namecol_value_from_id
#'
#' Given a sidora column name and a 'Id' integer will get the
#' requested corresponding 'human readable' string version of the Id.
#'
#' For example, given the ID 38 and the information that this ID was found in
#' 'extract.Batch', would result in Ex06_KE_2015-11-19
#'
#' @param sidora_col_name character. A sidora table column name
#' @param query_id integer vector. ID(s) to be converted to the human readable 'string' version
#' @param con a pandora connection
#' @param cache_dir a cache directory
#'
#' @examples
#' \dontrun{
#' namecol_value_from_id(sidora_col_name = "extract.Batch", query_id = 38, con = con)
#' }
#'
#' @export
namecol_value_from_id <- function(sidora_col_name, query_id, con, cache_dir = tempdir()) {
if (!any(is.integer(query_id))) {
stop(paste("[sidora.core] error in function namecol_value_from_id()! query_id parameter must be an integer. Sidora column:", sidora_col_name))
}
# determine auxiliary table and auxiliary id and auxiliary namecol given the lookup column
aux_table <- hash::values(hash_sidora_col_name_auxiliary_table, sidora_col_name)
id_column <- hash::values(hash_entity_type_idcol, table_name_to_entity_type(aux_table))
name_column <- hash::values(hash_sidora_col_name_auxiliary_namecol, sidora_col_name)
# download the auxiliary table
lookup_table <- get_df(aux_table, con = con, cache_dir = cache_dir)
# do the lookup of the name column value given the id column value in the auxiliary table
res_vector <- lookup_table[[name_column]][match(query_id, lookup_table[[id_column]])]
# if lookup yields empty character then return input
res_vector[is.na(res_vector)] <- query_id[is.na(res_vector)]
return(res_vector)
}
#' convert_all_ids_to_values
#'
#' A convenience function which simply transforms a given Pandora-Dataframe using all
#' defined default lookups. Typically will convert a pandora back-end numeric ID to a 'human readable string' actually displayed on the pandora webpage.
#'
#' @param df data.frame. A Sidora/Pandora-Dataframe with sidora column names.
#' @param con a pandora connection
#'
#' @return The converted dataframe with lookup-columns replaced by the actual values.
#'
#'
#' @export
convert_all_ids_to_values <- function(df, con) {
cols2update <- names(df[sidora.core::sidora_col_name_has_aux(names(df))])
return (
df %>%
dplyr::mutate(
dplyr::across(
tidyselect::all_of(cols2update),
function(x) {
namecol_value_from_id(con = con, sidora_col_name = dplyr::cur_column(), query_id = x)
}
)
)
)
}
|
309b1e2ad00e2f3e4f532fc3bad16fa0ff876f0c | afb28df33aabc4f4cd52df43f7b4e843afeb8bba | /processing/07_classification.R | 09b8808cab2f03d8c8f63fe8f2b336c4b7ae066d | [] | no_license | gstewart12/delmarva-bayes | cd556fe3b4b247c319eee95c7321f1db6023c77e | 939e0f98cdd659ab3e553ef832656f2e38a5a821 | refs/heads/master | 2023-07-19T18:22:26.703232 | 2021-08-31T21:00:31 | 2021-08-31T21:00:31 | 283,798,470 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,458 | r | 07_classification.R |
rm(list = ls())
settings <- list(
name = "Graham Stewart", # user who ran the script
email = "grahamstewart12@gmail.com", # user's email contact
site = "JLR", # three letter site code
date = lubridate::today(), # date script was run
info = devtools::session_info() # R session info: R, OS, packages
)
# References
# Li, J., Tran, M., & Siwabessy, J. (2016). Selecting Optimal Random Forest
# Predictive Models: A Case Study on Predicting the Spatial Distribution of
# Seabed Hardness. PLOS ONE, 11(2), e0149089.
# https://doi.org/10.1371/journal.pone.0149089
#
# Ma, L., Fu, T., Blaschke, T., Li, M., Tiede, D., Zhou, Z., et al. (2017).
# Evaluation of Feature Selection Methods for Object-Based Land Cover Mapping of
# Unmanned Aerial Vehicle Imagery Using Random Forest and Support Vector Machine
# Classifiers. ISPRS International Journal of Geo-Information, 6(2), 51.
# https://doi.org/10.3390/ijgi6020051
#
# Mikola, J., Virtanen, T., Linkosalmi, M., Vähä, E., Nyman, J., Postanogova,
# O., et al. (2018). Spatial variation and linkages of soil and vegetation in
# the Siberian Arctic tundra – coupling field observations with remote sensing
# data. Biogeosciences, 15(9), 2781–2801.
# https://doi.org/10.5194/bg-15-2781-2018
#
# Millard, K., & Richardson, M. (2015). On the Importance of Training Data
# Sample Selection in Random Forest Image Classification: A Case Study in
# Peatland Ecosystem Mapping. Remote Sensing, 7(7), 8489–8515.
# https://doi.org/10.3390/rs70708489
#
# Räsänen, A., Kuitunen, M., Tomppo, E., & Lensu, A. (2014). Coupling
# high-resolution satellite imagery with ALS-based canopy height model and
# digital elevation model in object-based boreal forest habitat type
# classification. ISPRS Journal of Photogrammetry and Remote Sensing, 94,
# 169–182. https://doi.org/10.1016/j.isprsjprs.2014.05.003
control <- list(
# Maximum iterations for determining variable importance (Mikola et al. 2018)
max_boruta_runs = 1000,
# Training split proportion (Millard & Richardson 2015)
split_prop = 0.795,
# Cutoff for removing highly correlated predictors (Li et al. 2016)
corr_thr = 0.99,
# Number of RF trees to grow (Millard & Richardson 2015)
n_trees = 1000,
n_ens_models = 100
)
#library(SegOptim)
library(progress)
library(tidyverse)
raster_rescale <- function(x, ...) {
values <- raster::values(x)
new_values <- scales::rescale(values, ...)
raster::setValues(x, new_values)
}
plot_stars <- function(data, band, ..., rgb = FALSE) {
band <- rlang::enquo(band)
if (rgb) {
ggplot2::ggplot() +
geom_stars_rgb(data = data, ...) +
ggplot2::scale_fill_identity() +
ggplot2::coord_equal() +
ggplot2::theme_void()
} else {
if (!rlang::quo_is_missing(band)) data <- dplyr::select(data, !!band)
ptype <- data %>%
dplyr::pull(1) %>%
vctrs::vec_ptype()
plot <- ggplot2::ggplot() +
stars::geom_stars(data = data) +
ggplot2::coord_equal() +
ggplot2::theme_void()
if (is.factor(ptype)) {
plot + ggplot2::scale_fill_viridis_d(option = "A")
} else {
plot + ggplot2::scale_fill_viridis_c(option = "A")
}
}
}
geom_stars_rgb <- function(data, r = 1, g = 2, b = 3, max_value = 255, ...) {
if (length(stars::st_dimensions(data)) > 2) {
crs <- sf::st_crs(data)
data <- data %>%
tibble::as_tibble(center = FALSE) %>%
tidyr::pivot_wider(names_from = 3, values_from = 4) %>%
stars::st_as_stars() %>%
sf::st_set_crs(crs)
}
data <- data %>%
dplyr::select(
r = dplyr::all_of(r), g = dplyr::all_of(g), b = dplyr::all_of(b)
) %>%
dplyr::mutate(rgb = grDevices::rgb(r, g, b, maxColorValue = max_value))
stars::geom_stars(data = data, ggplot2::aes(x = x, y = y, fill = rgb), ...)
}
progress_info <- function(len) {
progress_bar$new(
total = len,
format = paste0(
"[:spin] Completed: :current | :percent ",
"Elapsed: :elapsed Remaining: :eta"
),
clear = FALSE
)
}
### Inputs
# Set working directory
wd <- file.path(
"/Users/Graham/Desktop/DATA/Flux", settings$site, "analysis", "spatial"
)
paths <- list(
seeds = "/Users/Graham/Desktop/DATA/Flux/tools/reference/seeds.R",
spatial_ref = "/Users/Graham/Desktop/DATA/Flux/tools/reference/spatial.R",
# Path to site area
delin = file.path(dirname(dirname(wd)), "site_info", "delineation"),
point = "/Users/Graham/Desktop/DATA/Spatial/survey/flux_pts",
# Path to image segments
segm = file.path(wd, "04_segmentation", "segments_rgbn.tif"),
# Path to segment features
feat = file.path(wd, "05_segment_features", "segment_features.csv"),
# Path to training points
train = file.path(wd, "06_training_points", "training_points"),
# Path to output files
out = file.path(wd, "07_classification")
)
# Load reference files
source(paths$seeds)
source(paths$spatial_ref)
seeds <- purrr::pluck(seeds, settings$site, "obia")
### Load input data ============================================================
# Read site area polygon
delin <- sf::read_sf(paths$delin)
# Read points of interest
point <- sf::read_sf(paths$point)
tower <- point %>%
sf::st_set_crs(spatial_ref$epsg) %>%
dplyr::filter(site == settings$site, type == "tower") %>%
sf::st_geometry()
# Load image segments
segm <- paths$segm %>%
stars::read_stars() %>%
sf::st_set_crs(spatial_ref$epsg) %>%
dplyr::select(segment = 1)
# Get train point data
point_train <- sf::read_sf(paths$train)
# Prep training data
point_train <- point_train %>%
dplyr::select(-dplyr::any_of("segment"), -certainty, -subclass) %>%
dplyr::mutate(
ID = as.integer(ID),
class = as.integer(factor(class))
) %>%
sf::st_join(sf::st_as_sf(segm, as_points = FALSE)) %>%
# Keep only one point per segment
#dplyr::distinct(segment, .keep_all = TRUE) %>%
dplyr::select(-segment, -ID)
# Import segment feature data
data_feat <- readr::read_csv(
paths$feat, col_types = readr::cols(.default = readr::col_guess()),
progress = FALSE
)
# Convert raster data to tidy format
# - this is a hacky way of doing it but c.stars simply doesn't work (why??)
# feat_st <- feat_rst %>%
# stars::st_as_stars() %>%
# tibble::as_tibble(center = FALSE) %>%
# dplyr::rename(value = 4) %>%
# tidyr::pivot_wider(names_from = band, values_from = value) %>%
# dplyr::rename_with(~ stringr::str_replace(.x, "\\.", "_")) %>%
# stars::st_as_stars() %>%
# sf::st_set_crs(spatial_ref$epsg)
### Assign features to segments ================================================
# Convert segments to polygon format
segm_poly <- sf::st_as_sf(segm, as_points = FALSE, merge = TRUE)
# Assign training classes to segments
segm_class <- segm_poly %>%
sf::st_join(., point_train, what = "inner", as_points = FALSE) %>%
dplyr::filter(!is.na(class)) %>%
# Remove duplicated segments
# - happens if multiple training points end up within one segment
dplyr::distinct(segment, .keep_all = TRUE) %>%
dplyr::mutate(class = factor(class))
# Check training segments
segm_class %>%
ggplot2::ggplot() +
ggplot2::geom_sf(ggplot2::aes(fill = class, color = class))
# Class balance
table(segm_class$class)
# Add point classifications to feature data
data_feat <- dplyr::left_join(
data_feat, sf::st_drop_geometry(segm_class), by = "segment"
)
# Subset classified training segments
data_class <- data_feat %>%
dplyr::relocate(class) %>%
dplyr::filter(!is.na(class)) %>%
dplyr::mutate(class = factor(class))
### Feature selection ==========================================================
# Necessity & implementation described in Ma et al. 2017
# - important especially for OBIA due to increased feature space
# 1. Remove features that are highly correlated to other features
# - done before variable importance (Li et al. 2016)
keep_corr <- data_class %>%
dplyr::select(-segment, -class) %>%
recipes::recipe(~ .) %>%
# Spearman rank method since variables generally not normally distributed
recipes::step_corr(
dplyr::everything(), threshold = control$corr_thr, method = "spearman"
) %>%
recipes::prep() %>%
recipes::juice() %>%
names()
keep_corr
# Select relevant features to create training set
data_class_corr <- dplyr::select(
data_class, class, segment, dplyr::all_of(keep_corr)
)
# 2. Remove features deemed unimportant for classification
# - tends to improve classification performance (Rasanen et al. 2014)
set.seed(seeds$boruta)
# - ~5 mins for ~200x600 df
var_imp <- Boruta::Boruta(
class ~ .,
data = dplyr::select(data_class_corr, -segment),
maxRuns = control$max_boruta_runs
)
(var_imp <- Boruta::TentativeRoughFix(var_imp))
(keep_imp <- Boruta::getSelectedAttributes(var_imp))
# Variable importance
keep_var_imp <- var_imp %>%
purrr::pluck("ImpHistory") %>%
tibble::as_tibble() %>%
dplyr::select(-dplyr::contains("Shadow")) %>%
dplyr::summarize(dplyr::across(.fns = mean)) %>%
tidyr::pivot_longer(dplyr::everything()) %>%
dplyr::filter(is.finite(value)) %>%
dplyr::arrange(dplyr::desc(value))
keep_var_imp
# Select relevant features to create training set
data_class_imp <- dplyr::select(
data_class_corr, class, segment, dplyr::all_of(keep_imp)
)
# Write to file
readr::write_csv(data_class_imp, file.path(paths$out, "selected_features.csv"))
# Read back in (so feature selection can be skipped)
data_class_imp <- readr::read_csv(
file.path(paths$out, "selected_features.csv"),
col_types = readr::cols(
class = readr::col_factor(levels = c(1, 2, 3)),
.default = readr::col_double()
)
)
# Which vars remain?
keep_imp %>%
tibble::enframe(name = NULL, value = "name") %>%
tidyr::separate(
name, c("var", "desc", "stat"), extra = "merge", fill = "left"
) %>%
tidyr::unite("var", 1:2, na.rm = TRUE) %>%
dplyr::count(var, sort = TRUE)
### Classification =============================================================
# Set preprocessing recipe
rf_rec <- data_class_imp %>%
recipes::recipe(class ~ .) %>%
recipes::update_role(segment, new_role = "ID") %>%
recipes::step_range(recipes::all_predictors(), min = 0, max = 1)
# Set model
rf_mod <- parsnip::rand_forest() %>%
parsnip::set_args(trees = control$n_trees) %>%
parsnip::set_mode("classification") %>%
parsnip::set_engine("randomForest")
# Set model fit workflow
rf_wflow <- workflows::workflow() %>%
workflows::add_model(rf_mod) %>%
workflows::add_recipe(rf_rec)
# First run: using all data for training
# Fit model
set.seed(seeds$fit)
fit_all <- parsnip::fit(rf_wflow, data = data_class_imp)
# Get predictions
pred_segm_all <- segm_poly %>%
dplyr::right_join(dplyr::select(data_feat, segment), ., by = "segment") %>%
dplyr::bind_cols(
predict(fit_all, new_data = data_feat),
predict(fit_all, new_data = data_feat, type = "prob")
) %>%
sf::st_as_sf()
pred_all <- pred_segm_all %>%
sf::st_drop_geometry() %>%
tibble::as_tibble() %>%
dplyr::right_join(tibble::as_tibble(segm, center = FALSE), by = "segment") %>%
dplyr::relocate(x, y) %>%
stars::st_as_stars() %>%
sf::st_set_crs(spatial_ref$epsg)
# Check predictions
pred_all %>%
sf::st_crop(sf::st_bbox(delin), as_points = FALSE) %>%
plot_stars(.pred_class) +
ggplot2::geom_sf(data = delin, fill = NA) +
ggplot2::theme(legend.position = "none")
# Write predicted classes as raster file
class_pred_all <- dplyr::select(pred_all, class = .pred_class)
stars::write_stars(class_pred_all, file.path(paths$out, "classes_all.tif"))
class_prob_all <- pred_all %>%
dplyr::select(prob_1 = .pred_1, prob_2 = .pred_2, prob_3 = .pred_3) %>%
stars::st_redimension(along = list(band = names(.)))
stars::write_stars(class_prob_all, file.path(paths$out, "classes_prob_all.tif"))
# Second run: single model
# Split into training (80%) and testing sets (20%)
# - for some reason need to set prop to 0.795 to get 160/40 split
set.seed(seeds$split)
data_split <- rsample::initial_split(data_class_imp, prop = control$split_prop)
data_train <- rsample::training(data_split)
table(data_train$class)
data_test <- rsample::testing(data_split)
table(data_test$class)
# Fit model
set.seed(seeds$fit)
fit_sgl <- parsnip::fit(rf_wflow, data = data_train)
# Model performance - independent evaluation
fit_sgl %>%
predict(data_test) %>%
dplyr::bind_cols(dplyr::select(data_test, class)) %>%
yardstick::accuracy(class, .pred_class)
# Model performance - overall
fit_sgl %>%
predict(data_class_imp) %>%
dplyr::bind_cols(dplyr::select(data_class_imp, class)) %>%
yardstick::accuracy(class, .pred_class)
# Variable importance
fit_sgl %>%
workflows::pull_workflow_fit() %>%
vip::vip(num_features = 20)
# Get predictions
pred_segm_sgl <- segm_poly %>%
dplyr::right_join(dplyr::select(data_feat, segment), ., by = "segment") %>%
dplyr::bind_cols(
predict(fit_sgl, new_data = data_feat),
predict(fit_sgl, new_data = data_feat, type = "prob")
) %>%
sf::st_as_sf()
pred_sgl <- pred_segm_sgl %>%
sf::st_drop_geometry() %>%
tibble::as_tibble() %>%
dplyr::right_join(tibble::as_tibble(segm, center = FALSE), by = "segment") %>%
dplyr::relocate(x, y) %>%
stars::st_as_stars() %>%
sf::st_set_crs(spatial_ref$epsg)
# Check predictions
pred_sgl %>%
sf::st_crop(sf::st_bbox(delin), as_points = FALSE) %>%
plot_stars(.pred_class) +
ggplot2::geom_sf(data = delin, fill = NA) +
ggplot2::theme(legend.position = "none")
# Write predicted classes as raster file
class_pred_sgl <- dplyr::select(pred_sgl, class = .pred_class)
stars::write_stars(class_pred_sgl, file.path(paths$out, "classes_sgl.tif"))
class_prob_sgl <- pred_sgl %>%
dplyr::select(prob_1 = .pred_1, prob_2 = .pred_2, prob_3 = .pred_3) %>%
stars::st_redimension(along = list(band = names(.)))
stars::write_stars(class_prob_sgl, file.path(paths$out, "classes_prob_sgl.tif"))
# Second run: model ensemble
# Resample to desired number of train/test splits
set.seed(seeds$split)
data_res <- seq(1, control$n_ens_models) %>%
purrr::map(
~ rsample::validation_split(data_class_imp, prop = control$split_prop)
) %>%
dplyr::bind_rows() %>%
dplyr::mutate(id = stringr::str_c(id, dplyr::row_number()))
# Fit the models
set.seed(seeds$fit)
fit_res <- data_res %>%
dplyr::mutate(
train = purrr::map(splits, rsample::analysis),
test = purrr::map(splits, rsample::assessment),
fit = purrr::map(train, ~ parsnip::fit(rf_wflow, data = .x)),
trees = rf_wflow %>%
workflows::pull_workflow_spec() %>%
purrr::pluck("args", "trees") %>%
rlang::eval_tidy(),
oob = fit %>%
purrr::map(tune::extract_model) %>%
purrr::map(purrr::pluck, "err.rate") %>%
purrr::map2_dbl(trees, ~ purrr::pluck(.x, .y)) %>%
magrittr::subtract(1, .),
pred = fit %>%
purrr::map2(test, ~ predict(.x, .y)) %>%
purrr::map2(test, ~ dplyr::bind_cols(.x, dplyr::select(.y, class))),
accuracy = pred %>%
purrr::map(yardstick::accuracy, class, .pred_class) %>%
purrr::map_dbl(dplyr::pull, .estimate)
)
# Model performance - independent evaluation
dplyr::summarize(fit_res, dplyr::across(c(oob, accuracy), mean))
# Variable importance
# TODO write to a file somewhere
vi_res <- fit_res %>%
dplyr::transmute(
model = stringr::str_replace_all(id, "validation", "v"),
vi = fit %>%
purrr::map(workflows::pull_workflow_fit) %>%
purrr::map(vip::vi) %>%
purrr::map(dplyr::rename, var = 1, imp = 2) %>%
purrr::map(tibble::rowid_to_column, var = "rank")
)
vi_res %>%
tidyr::unnest(vi) %>%
dplyr::group_by(var) %>%
dplyr::summarize(dplyr::across(c(rank, imp), mean), .groups = "drop") %>%
dplyr::arrange(rank)
# Get model predictions
pred_res <- fit_res %>%
#dplyr::arrange(dplyr::desc(accuracy), dplyr::desc(oob)) %>%
#dplyr::slice_head(n = 50) %>%
dplyr::transmute(
model = stringr::str_replace_all(id, "validation", "v"),
segment = data_feat %>%
dplyr::select(segment) %>%
rlang::list2(),
pred_c = purrr::map(fit, predict, new_data = data_feat),
pred_p = purrr::map(fit, predict, new_data = data_feat, type = "prob"),
pred = purrr::pmap(list(segment, pred_c, pred_p), dplyr::bind_cols)
) %>%
dplyr::select(model, pred)
# Get ensemble predictions
pred_ens_segm <- pred_res %>%
tidyr::unnest(pred) %>%
dplyr::group_by(segment) %>%
dplyr::summarize(
count = list(vctrs::vec_count(.pred_class)),
class = count %>%
purrr::map_int(purrr::pluck, "key", 1) %>%
forcats::as_factor(),
n = dplyr::n(),
dplyr::across(c(.pred_3, .pred_2, .pred_1), mean),
.groups = "drop"
) %>%
tidyr::unnest(count) %>%
tidyr::pivot_wider(
names_from = key, names_glue = "prob_{key}", values_from = count
) %>%
dplyr::mutate(
class = forcats::fct_inseq(class),
dplyr::across(prob_3:prob_1, ~ tidyr::replace_na(.x / n, 0))
) %>%
dplyr::select(-n)
pred_ens <- pred_ens_segm %>%
dplyr::right_join(tibble::as_tibble(segm, center = FALSE), by = "segment") %>%
dplyr::relocate(x, y) %>%
stars::st_as_stars() %>%
sf::st_set_crs(spatial_ref$epsg)
# Model performance - overall
pred_ens_segm %>%
dplyr::select(segment, pred_class = class) %>%
dplyr::right_join(
dplyr::select(data_class_imp, segment, class), by = "segment"
) %>%
yardstick::accuracy(class, pred_class)
# Check ensemble predictions
pred_ens %>%
#sf::st_crop(sf::st_bbox(delin), as_points = FALSE) %>%
#dplyr::mutate(class = factor(class)) %>%
plot_stars(class) +
ggplot2::geom_sf(data = delin, fill = NA) +
ggplot2::theme(legend.position = "none")
# Check ensemble stability
pred_ens %>%
#sf::st_crop(sf::st_bbox(delin), as_points = FALSE) %>%
dplyr::select(prob_3:prob_1) %>%
stars::st_redimension(along = list(prob = names(.))) %>%
plot_stars() +
ggplot2::facet_wrap(~ prob) +
ggplot2::geom_sf(data = delin, fill = NA) +
ggplot2::scale_fill_distiller(
palette = "GnBu", direction = 1, trans = "log1p"
) +
ggplot2::theme(legend.position = "none")
# Write predicted classes as raster file
class_pred_ens <- dplyr::select(pred_ens, class)
stars::write_stars(class_pred_ens, file.path(paths$out, "classes_ens.tif"))
class_prob_ens <- pred_ens %>%
dplyr::select(prob_1, prob_2, prob_3) %>%
stars::st_redimension(along = list(band = names(.)))
stars::write_stars(class_prob_ens, file.path(paths$out, "classes_prob_ens.tif"))
# Write predicted classes as shapefile
class_pred_segm <- segm_poly %>%
dplyr::left_join(pred_ens_segm, by = "segment") %>%
#dplyr::select(-dplyr::starts_with(".pred")) %>%
dplyr::mutate(class_orig = class, .after = 5)
if (!dir.exists(file.path(paths$out, "classes_segm"))) {
dir.create(file.path(paths$out, "classes_segm"))
}
sf::write_sf(
class_pred_segm, file.path(paths$out, "classes_segm", "classes_segm.shp")
)
# Make a copy for revision, if necessary
if (!dir.exists(file.path(paths$out, "classes_segm_rev"))) {
dir.create(file.path(paths$out, "classes_segm_rev"))
sf::write_sf(
class_pred_segm,
file.path(paths$out, "classes_segm_rev", "classes_segm_rev.shp")
)
}
# (Load the revised shapefile back here to convert to raster)
class_pred_segm_rev <- sf::read_sf(file.path(paths$out, "classes_segm_rev"))
class_pred_rev <- class_pred_segm_rev %>%
sf::st_drop_geometry() %>%
tibble::as_tibble() %>%
dplyr::right_join(tibble::as_tibble(segm, center = FALSE), by = "segment") %>%
dplyr::mutate(dplyr::across(dplyr::contains("class"), as.integer)) %>%
dplyr::select(x, y, class) %>%
stars::st_as_stars() %>%
sf::st_set_crs(spatial_ref$epsg)
stars::write_stars(class_pred_rev, file.path(paths$out, "classes_rev.tif"))
###
# Set new patches within classified image
spectral_distance <- function(x, y, p = 2, type = c("minkowski", "sa")) {
# x and y are vectors of same length
type <- rlang::arg_match(type)
if (type == "minkowski") {
# manhattan distance: p = 1; euclidean_distance: p = 2
dist <- sqrt(sum(abs(x - y)^p))
} else if (type == "sa") {
# spectral angle
dist <- acos(sum(x * y) / sqrt(sum(x^2) * sum(y^2)))
}
dist
}
min_size <- 50
class_pred_ens <- file.path(paths$out, "classes_ens.tif") %>%
stars::read_stars() %>%
sf::st_set_crs(spatial_ref$epsg) %>%
dplyr::select(class = 1)
class_pred_segm <- segm_poly %>%
# Using segmentation features - maybe better to use more meaningful features?
# - e.g. canopy height, DEM, ndvi
dplyr::left_join(dplyr::select(
data_feat, segment, hsv_1_mean, hsv_2_mean, hsv_3_mean, nir_mean
), by = "segment") %>%
sf::st_join(
sf::st_as_sf(class_pred_ens, as_points = FALSE, merge = TRUE),
join = sf::st_covered_by
)
# Rescale features for accurate Euclidean distances
patches <- dplyr::mutate(
class_pred_segm,
dplyr::across(c(-segment, -class, -geometry), scales::rescale)
)
p <- progress_info(nrow(patches)) # takes ~10 min to run
i <- 1 # iterator start
repeat ({
if (i > nrow(patches)) {
break
}
patch <- patches %>%
dplyr::slice(i) %>%
tibble::as_tibble() %>%
tidyr::nest(values = c(-segment, -geometry, -class)) %>%
as.list() %>%
purrr::map_at("values", ~ purrr::as_vector(purrr::flatten(.x)))
# Large segments don't need to be merged
# - but they are still available for joining to smaller ones
if (as.numeric(sf::st_area(patch$geometry)) >= min_size) {
i <- i + 1
p$tick()
next
}
adj <- patch$geometry %>%
# Shared corners & the patch itself don't count (Rook's case)
sf::st_relate(patches, pattern = "F***1****") %>%
purrr::pluck(1) %>%
dplyr::slice(patches, .)
# "Island" segments cannot be merged with anything
if (nrow(adj) == 0) {
i <- i + 1
p$tick()
next
}
adj <- adj %>%
dplyr::filter(class == patch$class) %>%
sf::st_drop_geometry() %>%
dplyr::select(-class)
# Find adjacent segment with lowest spectral distance
adj_segment <- adj %>%
dplyr::rowwise() %>%
dplyr::mutate(
dist = spectral_distance(patch$values, dplyr::c_across(-segment))
) %>%
dplyr::ungroup() %>%
dplyr::arrange(dist) %>%
purrr::pluck("segment", 1)
# Update geometry
# - this is more complicated than group_by/summarize w/ all data, but faster
# - still very slow; there must be a better way to do this
# - could calculate all areas beforehand, keep track of new areas by addition
new_patch <- patches %>%
purrr::assign_in(
list("segment", which(patches$segment == patch$segment)), adj_segment
) %>%
dplyr::filter(segment == adj_segment) %>%
dplyr::group_by(segment) %>%
dplyr::summarize(dplyr::across(-geometry, .fns = mean), .groups = "drop")
patches <- patches %>%
dplyr::filter(!segment %in% c(patch$segment, adj_segment)) %>%
dplyr::bind_rows(new_patch)
#dplyr::arrange(segment)
p$tick()
})
# Check patches
patches %>%
sf::st_crop(sf::st_buffer(tower, 50)) %>%
ggplot() +
geom_sf(aes(fill = class))
# Number of patches within 50-m tower radius
patches %>%
sf::st_crop(sf::st_buffer(tower, 50)) %>%
nrow()
# Avg. patch area
patches %>%
dplyr::mutate(area = sf::st_area(geometry)) %>%
dplyr::summarize(mean(area))
# Write patches to file
patches %>%
dplyr::select(segment) %>%
stars::st_rasterize(template = segm) %>%
sf::st_set_crs(spatial_ref$epsg) %>%
stars::write_stars(file.path(paths$out, "patches_50.tif"))
|
302dcd7ff513e5b895ecdde8115364dfee8c9f53 | 1d6f8de845bb216d3f9133dad3b72a245814bf5b | /Outliers/Codice_outliers.R | f8b25a74c5d6d136121de647dfd1567d717fd56e | [] | no_license | IlariaSartori/CST_atlas | e78776e6d996c44390d1df2c57163f6359289e39 | 4711fc45ba3297a8cdf9e5f28538e566c96cce03 | refs/heads/master | 2020-04-07T09:43:51.889220 | 2019-01-15T09:45:12 | 2019-01-15T09:45:12 | 158,263,121 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,486 | r | Codice_outliers.R | source ("../Create_features_dataset/utility_functions.R")
source("helper_outliers.R")
###############################################################################
####### GRAFICI #########################
###############################################################################
### 1) Read tract and remove points whith RD and AD out of range
source("../ReadCST/helpers_read_tract.R")
# Lettura tratto
setwd("/Users/ILARIASARTORI/Desktop/")
cst = read_csv("/06001", case = "001", scan = "001")
# Pulizia rispetto a RD e AD
cst$Streamlines = map(cst$Streamlines, remove_point_outliers)
# Divide left and right
cst = divide_cst(cst)
### 2) Plots
tract = cst$lhs
####################################################################################
##################################### DISTANCE #####################################
####################################################################################
outliers = get_outliers_distance(tract, separate_sp_bar=T)
outliers.sp_xy = outliers$outliers.sp_xy
outliers.sp_yz = outliers$outliers.sp_yz
outliers.bar_yz = outliers$outliers.bar_yz
outliers.bar_xy = outliers$outliers.bar_xy
library(rgl)
primo=1
for (i in 1:length(tract$Streamlines)) {
if (primo) {
if (is.element(i, unique(c(outliers.bar_yz, outliers.bar_xy)))) {
plot(tract$Streamlines[[i]], col='green')
}
else if (is.element(i, unique(c(outliers.sp_xy, outliers.sp_yz)))){
plot(tract$Streamlines[[i]], col='blue')
}
else
plot(tract$Streamlines[[i]], col='gray')
axes3d()
title3d(xlab='x', ylab='y', zlab='z')
primo=0
}
if (is.element(i, unique(c(outliers.bar_yz, outliers.bar_xy)))) {
plot(tract$Streamlines[[i]], col='green', new_window=FALSE)
}
else if (is.element(i, unique(c(outliers.sp_xy, outliers.sp_yz)))){
plot(tract$Streamlines[[i]], col='blue', new_window=FALSE)
}
else
plot(tract$Streamlines[[i]], col='gray', new_window=FALSE)
}
# Per vedere bene la differenza, fare anche un altro grafico in cui si evidenziano le
# streamline solo classificate outliers per il baricentro (basta invertire la classificazione
# per colore nell'if)
primo=1
for (i in 1:length(tract$Streamlines)) {
if (primo) {
if (is.element(i, unique(c(outliers.sp_xy, outliers.sp_yz)))){
plot(tract$Streamlines[[i]], col='blue')
}
else if (is.element(i, unique(c(outliers.bar_yz, outliers.bar_xy)))) {
plot(tract$Streamlines[[i]], col='green')
}
else
plot(tract$Streamlines[[i]], col='gray')
axes3d()
title3d(xlab='x', ylab='y', zlab='z')
primo=0
}
if (is.element(i, unique(c(outliers.sp_xy, outliers.sp_yz)))){
plot(tract$Streamlines[[i]], col='blue', new_window=FALSE)
}
else if (is.element(i, unique(c(outliers.bar_yz, outliers.bar_xy)))) {
plot(tract$Streamlines[[i]], col='green', new_window=FALSE)
}
else
plot(tract$Streamlines[[i]], col='gray', new_window=FALSE)
}
# Mi sembrerebbe che le streamline outliers dal punto di vista della spatial median siano
# quelle che hanno la parte centrale abbastanza allineata, ma piu' ci avviciniamo alla corteccia
# piu sono strane: il "ramo" e' troppo basso o troppo alto rispetto alle altre streamline,
# o magari e' molto piu' lungo
# Le streamline outliers solo per il baricentro invece forse sono quelle che hanno anche il tronco un po'
# spostato
####################################################################################
##################################### DEPTH #####################################
####################################################################################
library(fields)
outliers = get_outliers_depth(tract, separate_sp_bar=T)
outliers_depth_Barycenter = outliers$outliers_depth_Barycenter
outliers_depth_Median = outliers$outliers_depth_Median
primo=1
for (i in 1:length(tract$Streamlines)) {
if (primo) {
if (is.element(i, outliers_depth_Barycenter)){
plot(tract$Streamlines[[i]], col='green')
}
else if (is.element(i, outliers_depth_Median)) {
plot(tract$Streamlines[[i]], col='blue')
}
else
plot(tract$Streamlines[[i]], col='gray')
axes3d()
title3d(xlab='x', ylab='y', zlab='z')
primo=0
}
if (is.element(i, outliers_depth_Barycenter)){
plot(tract$Streamlines[[i]], col='green', new_window=FALSE)
}
else if (is.element(i, outliers_depth_Median)) {
plot(tract$Streamlines[[i]], col='blue', new_window=FALSE)
}
else
plot(tract$Streamlines[[i]], col='gray', new_window=FALSE)
}
# Per vedere bene la differenza, fare anche un altro grafico in cui si evidenziano le
# streamline solo classificate outliers per il baricentro (basta invertire la classificazione
# per colore nell'if)
primo=1
for (i in 1:length(tract$Streamlines)) {
if (primo) {
if (is.element(i, outliers_depth_Median)) {
plot(tract$Streamlines[[i]], col='blue')
}
else if (is.element(i, outliers_depth_Barycenter)){
plot(tract$Streamlines[[i]], col='green')
}
else
plot(tract$Streamlines[[i]], col='gray')
axes3d()
title3d(xlab='x', ylab='y', zlab='z')
primo=0
}
if (is.element(i, outliers_depth_Median)) {
plot(tract$Streamlines[[i]], col='blue', new_window=FALSE)
}
else if (is.element(i, outliers_depth_Barycenter)){
plot(tract$Streamlines[[i]], col='green', new_window=FALSE)
}
else
plot(tract$Streamlines[[i]], col='gray', new_window=FALSE)
}
|
f958915978a004ffc78a00e6df87320504566b7e | 9a8554d8e55fdae2f30fa9a1a3e57fb1a2530bbf | /Programming_Assignment_#2/cachematrix.R | cc0f642d54ea3266f8bcc77d352fda8c619b32e1 | [] | no_license | maprieto68/DatascienceCoursera | 4d235574beb7ebca31f6fcbe592f71ec66d7d432 | ee75eb8a970184f9a44f420eaa14967943feac47 | refs/heads/master | 2020-03-11T12:07:08.836956 | 2018-05-16T00:57:22 | 2018-05-16T00:57:22 | 129,988,312 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,531 | r | cachematrix.R | ## These functions are aplications of how to save computational power
#in cases where it is required to execute the same process more than once in a loop.
# Both of the functions look to cache the inverse of a matrix and can be introduced
#in any other processes.
## Assignment 1: This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
#set a value for the matrix, initially null
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
#get the matrix
get <- function() x
#calculta the inverse
setInverse <- function() inv <<- solve(x)
#get the inverse
getInverse <- function() inv
#Have all the set and get elements in a single list
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Assignment 2: This function computes the inverse of the special "matrix" returned
#by makeCacheMatrix above. If the inverse has already been calculated
#(and the matrix has not changed), then the cachesolve should retrieve the
#inverse from the cache.
cacheSolve <- function(x, ...) {
#Check if the marix has been already calculated
inv <- x$getinverse()
#if it is the case, return the cached matrix
if(!is.null(inv)) {
message("Returning already cached matrix")
return(inv)
}
#if not calculated before, calculate it and set an object
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
#return the final result
inv
}
|
c4218450d80ec53102bc6b9ba8f00318570ad535 | f10a9f0cb7360aaaedd34a5be84be87464da0f07 | /Mastering Metrics Regression.R | ade4fd9a20a0bb3b4e679c365bb7e7fb9038b199 | [] | no_license | R-Avalos/Mastering-Metrics-Regression | 04c1005f8e17d9ab39dbc4dad94ba205cbdcf86d | 1ab4ff487d9854b43b50d1bbb1e49fefb443a5c0 | refs/heads/master | 2018-01-08T06:05:46.968219 | 2015-10-22T22:44:12 | 2015-10-22T22:44:12 | 44,717,932 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,310 | r | Mastering Metrics Regression.R | ### Mastering Metrics Regression #####
#####################################
## Set Workspace
## Load data ##
Table2.1 <- read.csv(file = "Table 2.1.csv") # load csv
Table2.1$Private <- c(1, 1, 0, 1, 0, 1, 1, 0, 0) #revisit to run apply "if=" function instead of manual entry
Table2.1$A <- c(1, 1, 1, 0, 0, 0, 0, 0, 0) #revisit to run apply function instead of manual entry
FiveStudents <- Table2.1[1:5,] #subset students to first five
## Estimate Equation 2.3 from first five students in Table 2.1
## Long model... Y_i = alpha + beta P_i + gamma A_i + e_i
## Income = alpha + Private College Dummy + A_i = dummy group A
## Why are we only using the first five students?
LongRegression <- lm(X1996.Earnings ~ Private + A, data = FiveStudents)
LongRegression
## Estimate short version of 2.3 (page 70)
## Short model... Y_i = alpha + beta P_i + e_i
ShortRegression <- lm(X1996.Earnings ~ Private, data = FiveStudents)
ShortRegression
## Regress ommitted variable A on private dummy school
OmmittedA <- lm(Private ~ A, data = FiveStudents)
OmmittedA
#### Excercise 2
# Estimated Test Scores = 689.47 - 3.41*STR - 1.62*AVGINC + 0.19*AVGINC*STR
# Summary stats:
# Mean SD
# AVGINC 15 7
# STR 20 2
# A. predicted when AVGINC=8 and STR =20
a <- 689.47 - (3.41*20) - (1.62*8) + (0.19*8*20)
a
# B. predicted when avginc=8 and str=22
b <- 689.47 - (3.41*22) - (1.62*8) + (0.19*8*22)
b
# C. predicted when avginc=15 and str=20
c <- 689.47 - (3.41*20) - (1.62*15) + (0.19*15*20)
c
# D. predicted when avginc=15 and str=22
d <- 689.47 - (3.41*22) - (1.62*15) + (0.19*15*22)
d
# E. predicted when avginc=22 and str=20
e <- 689.47 - (3.41*20) - (1.62*22) + (0.19*22*20)
e
# F. predicted when avginc=22 and str=22
f <- 689.47 - (3.41*22) - (1.62*22) + (0.19*22*22)
f
# G. subtract a from b
g <- b-a
g # holding income constant, higher str decrease testscores
# H. subtract c from d
h <- d-c
h # holding income constant, higher stress dreceases testscores but at a lower rate than a lower income bracket
# I. subtract e from f
i <- f-e
i # the interaction term effect overrides the negative effects of STR and income.
#J, Str...
## Derive B0+ B1*X + B2*X^2 with respect to x
# d'x = B1 + 2*B2*x
## Derive B0 + B1*X1 + B2*X2 + B3*X1*X2 with respect to X1
# d'x1 = B1 + B3*X2
|
65b9391cd1604f7065e626eb08aabe25d97e32e6 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BDP2/examples/BDP2.Rd.R | 0774ee71daf28aa31a00972d5567465d36c98782 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 700 | r | BDP2.Rd.R | library(BDP2)
### Name: BDP2
### Title: Operating characteristics of a single-arm trial with a binary
### endpoint
### Aliases: BDP2
### Keywords: design
### ** Examples
# Operating characteristics with calling for efficacy
BDP2(n=20, interim.at = c(3,9,13,18), ptrue = 0.3,
eff.stop = "call",
pF=0.3, cF=0.01, pE=0.12, cE = 0.9,
type="PostProb",
shape1F=0.3, shape2F=0.7, shape1E=0.12, shape2E=0.88)
# Operating characteristics with stopping for efficacy
BDP2(n=20, interim.at = c(3,9,13,18), ptrue = 0.3,
eff.stop = "stop",
pF=0.3, cF=0.01, pE=0.12, cE = 0.9,
type="PostProb",
shape1F=0.3, shape2F=0.7, shape1E=0.12, shape2E=0.88)
|
f6f29604d47f589cc0f8ec7e4078a10b0e8db24e | a0ed6ffc3e9d7f1170b4f05935bc94f52cda17b6 | /ODEsAnalysisLib.r | 495e809b103c4dc63622fd94dc2a35e1c2ee125b | [] | no_license | JoseDDesoj/Dynamical-Systems-View-of-Cell-Biology | 608c8d6a1d964ae467f4ef78912c732035e17383 | bdf15e68d9eb96ab5a9f259cfae4378b792fc81b | refs/heads/master | 2020-04-02T04:54:14.556230 | 2019-01-11T14:46:22 | 2019-01-11T14:46:22 | 65,590,517 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,256 | r | ODEsAnalysisLib.r | ################################################################################################################################
library(deSolve)
library(rootSolve)
################################################################################################################################
tarrows <- function(out,ds=0.1,...)
{
# select point at predefined distance from point (p1,p2)
p <- unlist(out[nrow(out),2:3])
dd <- (out[,2]-p[1])^2+ (out[,3]-p[2])^2
dd2 <- c(dd[-1],dd[1])
i1<-which(dd<ds&dd2>ds | dd>ds&dd2<ds)
p <- unlist(out[1,2:3])
dd <- (out[,2]-p[1])^2+ (out[,3]-p[2])^2
dd2 <- c(dd[-1],dd[1])
i2<-which(dd<ds&dd2>ds | dd>ds&dd2<ds)[1]
ii <- c(i1,i2)
# ii <- which(dd>ds)
# iseq <- seq(1,length (ii),15)
for (i in ii ) arrows(out[i,2],out[i,3],out[i+1,2],out[i+1,3],length=0.1,lwd=1,...)
}
################################################################################################################################
trajectory <- function(Func, X, parameters, ds=0.1, Col=1) {
times <- seq(0, 100, by = 0.01)
out <- as.data.frame(ode(X, times, Func, parms = parameters))
matplot(out$X1,out$X2,type="l", lwd=2, add=TRUE, col=Col)
tarrows(out,ds)
}
################################################################################################################################
trajectoryAttractor <- function(Func, X, parameters, Attractors, ds=0.1, Cols) {
times <- seq(0, 50, by = 0.05)
out <- as.data.frame(ode(X, times, Func, parms = parameters))
#c(X1=X[1], X2=X[2])
#print(c(X[1], X[2]))
#print(out)
Dists <- t(sapply(1:nrow(Attractors), function(i) {
dist(rbind(out[nrow(out),2:3],Attractors[i,]))
}))
#print(Dists)
#print(which(Dists==min(Dists)))
#print(Attractors[which(Dists==min(Dists)),])
Col=Cols[which(Dists==min(Dists))]
matplot(out$X1,out$X2,type="l", lwd=1, add=TRUE, col=Col)
tarrows(out,ds,col=Col)
}
################################################################################################################################
trajectoryAttractor2 <- function(Func, X, parameters, Attractors, ds=0.1, Cols) {
times <- seq(0, 50, by = 0.05)
out <- as.data.frame(ode(X, times, Func, parms = parameters))
#c(X1=X[1], X2=X[2])
#print(c(X[1], X[2]))
#print(out)
if(abs(out[nrow(out),2]-out[nrow(out),3])<=0.1) Col <- Cols[2] #Simetrico
if(out[nrow(out),2]-out[nrow(out),3]<(-0.1)) Col <- Cols[1] #Simetrico
if(out[nrow(out),2]-out[nrow(out),3]>0.1) Col <- Cols[3] #Simetrico
#if(out[nrow(out),2]>0.8 & out[nrow(out),3]>0.65) Col <- Cols[2] # Verde
#if(out[nrow(out),2]<0.8 & out[nrow(out),3]>0.65) Col <- Cols[1] # Azul
#if(out[nrow(out),2]>0.8 & out[nrow(out),3]<0.65) Col <- Cols[3] # Rojo
matplot(out$X1,out$X2,type="l", lwd=1, add=TRUE, col=Col)
tarrows(out,ds,col=Col)
}
################################################################################################################################
RandomInitial <- function(Nvariables, Nconditions, Rango) {
Nums <- matrix(runif(Nconditions*Nvariables, Rango[1], Rango[2]), Nconditions, Nvariables)
colnames(Nums) <- c("X1", "X2")
return(Nums)
}
################################################################################################################################
RandomInitialRango <- function(Nconditions, RangoV1, RangoV2) {
V1 <- runif(Nconditions, RangoV1[1],RangoV1[2])
V2 <- runif(Nconditions, RangoV2[1], RangoV2[2])
Inis <- as.matrix(cbind(V1, V2))
colnames(Inis) <- c("X1", "X2")
return(Inis)
}
################################################################################################################################
GraphEqPointsNewton <- function(Function, parameters, Inits) {
t(sapply(1:nrow(Inits), function(i) {
Equil <- stode(y = c(X1=Inits[i,1], X2=Inits[i,2]), fun = Function, parms = parameters, pos = TRUE)$y
points(Equil[1], Equil[2], col=2, pch=20)
}))
}
################################################################################################################################
GraphEqPointsSimulation <- function(Function, parameters, Inits) {
t(sapply(1:nrow(Inits), function(i) {
Equil <- runsteady(y = c(X1=Inits[i,1], X2=Inits[i,2]), fun = Function, parms = parameters, times = c(0, 1e5))$y
points(Equil[1], Equil[2], col=2, pch=20)
}))
}
################################################################################################################################
GraphEqPointsStability <- function(Function, parameters, Inits) {
t(sapply(1:nrow(Inits), function(i) {
Equil <- stode(y = Inits[i,], fun = Function, parms = parameters, pos = TRUE)$y
Jacob <- jacobian.full(y=Equil,func=Function, parms=parameters)
EigenVal <- eigen(Jacob)$values
# white:unstable node, black:stable node, grey:saddle
if (sign(EigenVal[1])>0 & sign(EigenVal[2])>=0) col <- "white"
if (sign(EigenVal[1])<0 & sign(EigenVal[2])<=0) col <- "black"
if (sign(EigenVal[1])* sign(EigenVal[2]) <0 ) col <- "grey"
points(Equil[1], Equil[2],pch=21,cex=2.0, bg=col,col="black")
}))
}
################################################################################################################################
EqPointsEigenv <- function(Function, parameters, Inits) {
Equilib <- t(sapply(1:nrow(Inits), function(i) {
Equil <- stode(y = Inits[i,], fun = Function, parms = parameters, pos = TRUE)$y
}))
#print(Equilib)
EqPoints <- as.matrix(Equilib[which(duplicated(round(Equilib, 2),MARGIN=1)==FALSE), ])
#print(EqPoints)
#print(EqPoints)
EigenVals <- t(sapply(1:nrow(EqPoints), function(i) {
Jacob <- jacobian.full(y=c(EqPoints[i,]) ,func=Function, parms=parameters)
EigenVal <- eigen(Jacob)$values
}))
return(cbind(EqPoints, EigenVals))
}
################################################################################################################################
EqPoints <- function(Function, parameters, Inits) {
Equilib <- t(sapply(1:nrow(Inits), function(i) {
Equil <- stode(y = Inits[i,], fun = Function, parms = parameters, pos = TRUE)$y
}))
#print(Equilib)
if(length(which(duplicated(round(Equilib, 2),MARGIN=1)==FALSE))==1) EqPoints <- t(as.matrix(Equilib[which(duplicated(round(Equilib, 2),MARGIN=1)==FALSE),]))
else EqPoints <- as.matrix(Equilib[which(duplicated(round(Equilib, 2),MARGIN=1)==FALSE), ])
#print(EqPoints)
#print(EqPoints)
return(cbind(EqPoints))
}
################################################################################################################################
RunAttractors <- function(Function, parameters, Init) {
Attractors <- t(sapply(1:nrow(Init), function(i) runsteady(y = Init[i,], fun = Function, parms = parameters, times = c(0, 1e5))$y))
if(length(which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE))==1) Attractors <- t(as.matrix(Attractors[which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE),]))
if(length(which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE))>1) Attractors <- Attractors[which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE),]
return(Attractors)
}
################################################################################################################################
# Input - Output from EqPointsEigenv
# Output - Equilibrium points of type = Type ("Saddle", "Stable", "Unstable")
PointStabilityType <- function(EquStabMatrix, Type) {
if(Type=="Saddle") return(EquStabMatrix[which(t(sapply(1:nrow(EqPointsEig), function(i) sum(EqPointsEig[i,3:4]<0)==1))), 1:2]) # Saddle -,+
if(Type=="Stable") return(EquStabMatrix[which(t(sapply(1:nrow(EqPointsEig), function(i) sum(EqPointsEig[i,3:4]<0)==2))), 1:2]) # Stable -,-
if(Type=="Unstable") return(EquStabMatrix[which(t(sapply(1:nrow(EqPointsEig), function(i) sum(EqPointsEig[i,3:4]<0)==0))), 1:2]) # Instable +,+
}
################################################################################################################################
GraphBifurcationAttractStab <- function(Function, parameters, Inits, ParVal) {
t(sapply(1:nrow(Inits), function(i) {
Equil <- stode(y = Inits[i,], fun = Function, parms = parameters, pos = TRUE)$y
Jacob <- jacobian.full(y=Equil,func=Function, parms=parameters)
EigenVal <- eigen(Jacob)$values
# white:unstable node, black:stable node, grey:saddle
if (sign(EigenVal[1])>0 & sign(EigenVal[2])>=0) Col <- "white"
if (sign(EigenVal[1])<0 & sign(EigenVal[2])<=0) Col <- "black"
if (sign(EigenVal[1])* sign(EigenVal[2]) <0 ) Col <- "grey"
points(ParVal, Equil[2],pch=20,col=Col)
}))
}
################################################################################################################################
GraphBifurcationAttractStab2 <- function(Function, parameters, Init, ParVal) {
Attractors <- t(sapply(1:nrow(Init), function(i) runsteady(y = Init[i,], fun = Function, parms = parameters, times = c(0, 1e5))$y))
if(length(which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE))==1) Attractors <- t(as.matrix(Attractors[which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE),]))
else Attractors <- Attractors[which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE),]
#print(Attractors)
for(i in 1:nrow(Attractors)) {
Jacob <- jacobian.full(y=Attractors[i,],func=Function, parms=parameters)
EigenVal <- eigen(Jacob)$values
#print(EigenVal)
#white:unstable node, black:stable node, grey:saddle
if (sign(EigenVal[1])>0 & sign(EigenVal[2])>=0) Col <- "white"
if (sign(EigenVal[1])<0 & sign(EigenVal[2])<=0) Col <- "black"
if (sign(EigenVal[1])* sign(EigenVal[2]) <0 ) Col <- "grey"
points(ParVal, Attractors[i,2],pch=20, col=Col)
}
}
################################################################################################################################
GraphBiAttractStab2 <- function(Function, parameters, Init,...) {
Attractors <- t(sapply(1:nrow(Init), function(i) runsteady(y = Init[i,], fun = Function, parms = parameters, times = c(0, 1e5))$y))
if(length(which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE))==1) Attractors <- t(as.matrix(Attractors[which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE),]))
if(length(which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE))>1) Attractors <- Attractors[which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE),]
#else Attractors <- Attractors[which(duplicated(round(Attractors, 2),MARGIN=1)==FALSE),]
#print(Attractors)
for(i in 1:nrow(Attractors)) {
print("################################")
print(Attractors[i,])
print("################################")
Jacob <- jacobian.full(y=Attractors[i,],func=Function, parms=parameters)
EigenVal <- eigen(Jacob)$values
#print(EigenVal)
#white:unstable node, black:stable node, grey:saddle
if (sign(EigenVal[1])>0 & sign(EigenVal[2])>=0) Col <- "white"
if (sign(EigenVal[1])<0 & sign(EigenVal[2])<=0) Col <- "black"
if (sign(EigenVal[1])* sign(EigenVal[2]) <0 ) Col <- "grey"
points(Attractors[i,1], Attractors[i,2], col=Col,...)
}
}
################################################################################################################################
phasearrows <- function(fun,xlims,ylims,resol=25, col='black', add=F,parms=NULL,jitter=FALSE,...) {
if (add==F) {
plot(1,xlim=xlims, ylim=ylims, type='n',...)#xlab="x",ylab="y");
}
x <- matrix(seq(xlims[1],xlims[2], length=resol), byrow=T, resol,resol);
y <- matrix(seq(ylims[1],ylims[2], length=resol),byrow=F, resol, resol);
npts <- resol*resol;
if(jitter) {
xspace <- abs(diff(xlims))/(resol*10);
yspace <- abs(diff(ylims))/(resol*10);
x <- x + matrix(runif(npts, -xspace, xspace),resol,resol);
y <- y + matrix(runif(npts, -yspace, yspace),resol,resol);
}
z <- fun(x,y,parms);
z1 <- matrix(z[1:npts], resol, resol);
z2 <- matrix(z[(npts+1):(2*npts)], resol, resol);
maxx <- max(abs(z1));
maxy <- max(abs(z2));
dt <- min( abs(diff(xlims))/maxx, abs(diff(ylims))/maxy)/resol;
lens <- sqrt(z1^2 + z2^2);
lens2 <- lens/max(lens);
arrows(c(x), c(y), c(x+dt*z1/((lens2)+.1)), c(y+dt*z2/((lens2)+.1)),length=.04, col=col);
}
################################################################################################################################
nullclines <- function(fun,xlims, ylims, resol=250, add=F,parms=NULL) {
x <- matrix(seq(xlims[1],xlims[2], length=resol), byrow=F, resol,resol);
y <- matrix(seq(ylims[1],ylims[2], length=resol),byrow=T, resol, resol);
npts = resol*resol;
z <- fun(x,y,parms);
z1 <- matrix(z[1:npts], resol, resol);
z2 <- matrix(z[(npts+1):(2*npts)], resol, resol);
contour(x[,1],y[1,],z1,levels=c(0), drawlabels=F,add=add, lwd=2, col="blue");
contour(x[,1],y[1,],z2,levels=c(0), drawlabels=F,add=T, lwd=2, col="forestgreen");
title(main="Blue=x nullcline, Green=y nullcline",cex=0.35);
}
################################################################################################################################
newton=function(func,x0=NULL,parms=NULL,tol=1e-16,niter=40,inc=1e-6,plotit=TRUE) {
x=x0; #initial x
if (is.null(x0)){x = locator(n=1); x=c(x$x,x$y)};
nx = length(x); # length of state vector
######### Newton iteration loop: start
for(i in 1:niter){
y = func(0,x,parms)[[1]]
df = matrix(0,nx,nx); # Compute df
for(j in 1:nx) {
#Increment vector for estimating derivative wrt jth coordinate
v=rep(0,nx);
v[j] = inc;
df[,j]= (func(t,x+v,parms)[[1]] - func(t,x-v,parms)[[1]])/(2*inc)
}
if (sum(y^2) < tol){ #check for convergence
if(plotit){
ev=eigen(df)$values; pch1=1+as.numeric(Im(ev[1])!=0); pch2=1+as.numeric(max(Re(ev))<0);
pchs=matrix( c(2,17,1,16),2,2,byrow=T);
points(x[1],x[2],type="p",pch=pchs[pch1,pch2],cex=1.5)
}
cat("Fixed point (x,y) = ",x,"\n");
cat("Jacobian Df=","\n"); print(df);cat("Eigenvalues","\n"); print(eigen(df)$values); cat("Eigenvectors","\n"); print(eigen(df)$vectors);
return(list(x=x,df=df))
}
x = x - solve(df,y) # one more step if needed
cat(i, x, "\n") #print out the next iterate
}
######### Newton iteration loop: end
cat("Convergence failed");
}
################################################################################################################################
DrawManifolds=function(fun.lsoda,parms,x0=NULL,maxtime=100) {
xbar=newton(fun.lsoda,x0=x0,parms=parms,plotit=FALSE);
x=xbar$x; df=xbar$df; V=eigen(df)$vectors; ev=eigen(df)$values;
if (ev[1]*ev[2] > 0) {
cat("Fixed point is not a saddle \n");
}else{
i1=which(ev>0); i2=which(ev<0);
v1=V[,i1]; v2=V[,i2]; eps=1e-3;
out1=lsoda(times=seq(0,maxtime,.1),y=x+eps*v1,func=fun.lsoda,parms=parms); points(out1[,2],out1[,3],type="l",lwd=2,col="red");
out2=lsoda(times=seq(0,maxtime,.1),y=x-eps*v1,func=fun.lsoda,parms=parms); points(out2[,2],out2[,3],type="l",lwd=2,col="red");
out3=lsoda(times=-seq(0,maxtime,.1),y=x+eps*v2,func=fun.lsoda,parms=parms); points(out3[,2],out3[,3],type="l",lwd=2,col="black");
out4=lsoda(times=-seq(0,maxtime,.1),y=x-eps*v2,func=fun.lsoda,parms=parms); points(out4[,2],out4[,3],type="l",lwd=2,col="black");
title(sub="Black=stable manifold, Red=unstable manifold");
}
}
################################################################################################################################
|
6307281e7bd10fdd88ce958f2a195f14a9b8cb75 | 99d526affc0cfe7adc0b71fa4dfe4390d668ad77 | /R/c.R | 3d9efd24100718f661f659c38f94c0807bd2c409 | [
"MIT"
] | permissive | jasongraf1/JGmisc | 8f84a9d2ada15713a6e4c81a2dca18cdbf94c783 | 7474e0e6fdebada8e3a38c094a6d8a7ccddcff97 | refs/heads/master | 2022-03-05T23:22:08.901452 | 2022-03-05T09:27:14 | 2022-03-05T09:27:14 | 89,917,654 | 0 | 4 | null | null | null | null | UTF-8 | R | false | false | 125 | r | c.R | c. <- function(x, center = NULL) {
y <- as.numeric(x)
if(is.null(center)) return (y - mean(y))
else return (y - center)
}
|
9ada694362b14a4c8abd0a17cbd0cbd369a59dde | 95ea145654590380e932d060a534a0d94ebbcbb0 | /R/fmt.R | 6b0ebf225025e8b062012fc60044c44520c382ed | [] | no_license | simonthelwall/diyar | cee56912feac90de468d6cb17f62ef0ecbb6cfff | a7bc9d2b77f1fa1aec0e8a76db5d5daff08bd1ba | refs/heads/master | 2020-06-14T17:09:54.914214 | 2019-07-04T09:46:00 | 2019-07-04T09:46:00 | 195,067,959 | 0 | 0 | null | 2019-07-03T14:14:35 | 2019-07-03T14:14:34 | null | UTF-8 | R | false | false | 206 | r | fmt.R | #' @title fmt - helper function
#'
#' @description
#'
#'
#' @param g Double. Number to format
#'
#' @return
#'
#' @examples
#'
#'
#' library(dplyr)
fmt <- function(g) formatC(g, format="d", big.mark=",")
|
18401433196ac5af9149d1d0cf0e7164e1714948 | aaaa3f3ed09647ba9e0c5b42b76a8111178a1e45 | /Anand_06_02_17/server.R | d07cd51d04da44db5b3e3e1580e4500ad9ac6183 | [] | no_license | mpjimenez/Stat331-Final_Project | ddf6a917fd82b8850861e38d828621beb1a8138f | d90af57c92ce9d1d858efaeafa26c21027271ca7 | refs/heads/master | 2021-01-24T06:47:12.152028 | 2017-06-14T01:52:22 | 2017-06-14T01:52:22 | 93,320,688 | 0 | 2 | null | 2017-06-12T22:03:08 | 2017-06-04T14:57:21 | HTML | UTF-8 | R | false | false | 834 | r | server.R | library(shiny)
library(ggplot2)
library(leaflet)
library(shinydashboard)
#install.packages("readxl")
library(readxl)
function(input, output) {
#data <- read.table('../crime_data/table_1_crime_in_the_united_states_by_volume_and_rate_per_100000_inhabitants_1996-2015.xls')
data <- read_xls('../../Crime/Excel/01_crime_in_the_united_states_by_volume_and_rate_per_100000_inhabitants_1996-2015.xls',
range = "A4:V24")
data$Year <- strtrim(data$Year,4) # Some entries in the year had a superscript appended to the end. So this line makes it so that the year only has 4 characters.
crimerate <- reactive({
as.numeric(input$choice)
})
output$lineChart <- renderPlot({
ggplot(data, aes(x=data$Year, y=data[,crimerate()], group=1),xlim=input$Year) +
geom_line() + geom_point()
} )
} |
efd8c9d5cc27553dbd7b0418f2493ba5c2ccfeed | 2ede3a798b6f535fc131ae294f9fd01a7210f175 | /Day_1.R | c54bdd92e1016b398267e3e7af2346bcb5fa6d80 | [] | no_license | jessecolephillips/BioStats_2021 | e41251ed92ea486ecd03dfab880bdfe03ff2a0a1 | 96a29b58a0ac65d751bfe02983516c936c566ada | refs/heads/master | 2023-04-13T08:42:13.981867 | 2021-04-22T10:59:14 | 2021-04-22T10:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,080 | r | Day_1.R | #R BioStats 2021
#Day 1 - Types of Data
#19/04/2021
#Jesse Phillips
library(tidyverse)
library(e1071) #package containing 'kurtosis()'
chicks <- as_tibble(ChickWeight)
#Lets scrutinize the data
head(chicks)
tail(chicks, 2)
colnames(chicks)
summary(chicks)
str(chicks)
class(chicks$weight)
#What is our total sample size?
nrow(chicks)
unique(chicks$Chick)
#Note distinction between 'nrow()' and 'true' sample size
#Sample size = 50
#Calculate mean/sd/median/kurtosis of weight of chicks at day 20 grouped by Diets
chicks %>%
group_by(Diet) %>%
filter(Time == 20) %>%
summarise(mean_weight = mean(weight),
sd_weight = sd(weight),
med_weight = median(weight),
kt_weight = kurtosis(weight),
min_wt = min(weight),
qrt1_wt = quantile(weight, p = 0.25),
qrt2_wt = quantile(weight, p = 0.75),
max_wt = max(weight))
#creating fictitious data to illustrate Missing Values
dat1 <- c(NA, 12, 76, 34, 23)
mean(dat1)
mean(dat1, na.rm = TRUE) #tells R to remove the NA from mean calculation
|
9e88038462e5ac5ff3d67925177ea907a628f726 | cf6ac3bcf41832aba2be21c66b0b64bda520a53b | /Zadanie7.R | 6b9428fa774cc5c35f8f6d090f86a14d37e33f37 | [] | no_license | Luibov/MatMod | eac0fc1b4b9c96df796e530a1b86caf86fe46be4 | 9dcbe35d231382ad0999656e1462599b23d62a97 | refs/heads/master | 2021-01-23T06:34:50.141674 | 2017-03-30T18:31:01 | 2017-03-30T18:31:01 | 86,377,171 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 92 | r | Zadanie7.R | ggplot (iris, aes(x= Petal.Length, y= Sepal.Length, col=Species)) + geom_point(alpha = 0.4)
|
eb73d829bc41a61774be6c288c1bf489b8e2e4d9 | 77311e6f219c5c03517aecfd28f1726818d11a16 | /solution.R | 9ee3dddebd6ff71ebf9f879bb798abf7af9681e0 | [] | no_license | anand-anish/Club-Mahindra-DataOlympics | 480f68eaadf974f95b4002360d93301a0f6238ee | e335307c9ef9f1e53fbc0a3fcff579f9aee3787f | refs/heads/master | 2022-01-09T09:52:51.136523 | 2019-05-07T08:25:50 | 2019-05-07T08:25:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,704 | r | solution.R | # Loading the packages
library(Hmisc)
library(dplyr)
library(lubridate)
library(stringr)
library(nlme)
library(randomForest)
library(xgboost)
library(ggplot2)
library(dplyr)
library(caret)
library(moments)
library(glmnet)
library(elasticnet)
library(knitr)
# Import the datasets
train <- read_csv("../input/train.csv")
test <- read_csv("../input/test.csv")
submission <- read_csv("../input/sample_submission.csv")
# combine the two datasets
train$data_flag <-"train"
test$amount_spent_per_room_night_scaled <-NA
test$data_flag <-"test"
nrow(train)
nrow(test)
summary(train$amount_spent_per_room_night_scaled)
hist((train$amount_spent_per_room_night_scaled))
# rescaled
hist(exp(train$amount_spent_per_room_night_scaled-1))
# combine the train and test
combi <- bind_rows(train,test)
# Missing Values
miss_cols=sapply(combi, function(x){sum(is.na(x))/length(x)}*100)
miss_cols
# Data Pre processing
# Missing Values imputation
combi$season_holidayed_code[is.na(combi$season_holidayed_code)] <- -1
combi$state_code_residence[is.na(combi$state_code_residence)] <- -1
# Date prcessing
combi$booking_date <- as.Date(combi$booking_date,format="%d/%m/%y")
combi$checkin_date <- as.Date(combi$checkin_date,format="%d/%m/%y")
combi$checkout_date <- as.Date(combi$checkout_date,format="%d/%m/%y")
#endcoding the unique ids
vars <- c("member_age_buckets","memberid","cluster_code","reservationstatusid_code","resort_id")
combi[,vars] <- lapply(combi[,vars],function(x){as.numeric(as.factor(x))})
# Feature Engineering
# Generate new features
# 1. Dates
# In some records, the booking date is more than checkin date.
# Inferece : Might be because of the fact that the booking date was missing and the rows were generated based on the current system date.
# Solution : We can replace such values with the checkin day, assuming those people directly approached and booked the hotel
combi$booking_date_greater_than_checkin_flag <- ifelse(combi$booking_date>combi$checkin_date,1,0)
combi$booking_date[combi$booking_date>combi$checkin_date] <- combi$checkin_date[combi$booking_date>combi$checkin_date]
combi$booking_mnth <- month(combi$booking_date)
combi$checkin_mnth <- month(combi$checkin_date)
combi$pre_booking <- as.numeric(combi$checkin_date-combi$booking_date)
# pre booking months
combi$pre_booking <- ifelse(combi$pre_booking>=0 & combi$pre_booking<=30,1,
ifelse(combi$pre_booking>30 & combi$pre_booking<=60,2,
ifelse(combi$pre_booking>60 & combi$pre_booking<=90,3,4)))
combi$booking_day <- as.numeric(as.factor(weekdays(combi$booking_date)))
combi$checkin_day <- as.numeric(as.factor(weekdays(combi$checkin_date)))
combi$stay_days <- as.numeric(combi$checkout_date - combi$checkin_date)
combi <- combi[combi$roomnights!=-45,]
# in some cases, we see that the roomnights is not same as the calculated stay_days. Might be extended stays or early checkouts
combi$early_checkout <- ifelse(combi$roomnights>combi$stay_days,1,0)
combi$extended_stays <- ifelse(combi$roomnights<combi$stay_days,1,0)
# 2. Members
# In some entries, total person travelling is not matching the sum of adults+children
# Inference : might be because of newborns, and they were not registered while booking
# Newly weds are more likely to go on trips
combi$newborns <- ifelse(combi$total_pax!=(combi$numberofadults+combi$numberofchildren),1,0)
# 3. check if resort and residence are in the same state
combi$same_area <- ifelse(combi$state_code_residence==combi$state_code_resort,1,0)
# remove the insignificant variables
combi$memberid <- NULL
combi$extended_stays <- NULL
# splitting the data back to train and test
train <- combi[combi$data_flag=="train",]
test <- combi[combi$data_flag=="test",]
target_var <- train$amount_spent_per_room_night_scaled
test_var <- test$amount_spent_per_room_night_scaled
train$data_flag<-NULL
test$data_flag<-NULL
#test$amount_spent_per_room_night_scaled <-NULL
nrow(train)
nrow(test)
set.seed(1234)
RMSE = function(m, o){
sqrt(mean((m - o)^2))
}
# xgb
# xgboost parameters
params <- list()
booster = "gblinear"
params$objective = "reg:linear"
params$eval_metric <- "rmse"
# Converting the data frame to matrix
xgtrain1 <-xgb.DMatrix(data=as.matrix(train[,!(colnames(train) %in% c('reservation_id','booking_date','checkin_date','checkout_date','reservationstatusid_code',"amount_spent_per_room_night_scaled"))]),label=as.matrix(target_var),missing = NA)
xgtest1 <- xgb.DMatrix(data= as.matrix(test[,!(colnames(test) %in% c('reservation_id','booking_date','checkin_date','checkout_date','reservationstatusid_code',"amount_spent_per_room_night_scaled"))]), missing = NA)
# cross-validation
#model_xgb_cv <- xgb.cv(params = params, xgtrain1, nfold=10, nrounds=1000,eta=0.01,max_depth=10,subsample=0.8,min_child_weight=12)
model_xgb_1 <- xgb.train(params = params, xgtrain1,nrounds=1000,eta=0.01,subsample=0.8,min_child_weight=4)
model_xgb_2 <- xgb.train(params = params, xgtrain1,nrounds=1500,eta=0.01,subsample=0.6)
# variable importance
#eat_imp<-data.frame(xgb.importance(feature_names=colnames(train[,!(colnames(train) %in% c("ID","Premium"))]), model=model_xgb_1))
# scoring
xgb_pred_1 <- predict(model_xgb_1, xgtest1)
xgb_pred_2 <- predict(model_xgb_2, xgtest1)
#weighted average of both predictions
xgb_pred <- 0.6*xgb_pred_1 + 0.4*xgb_pred_2
#RMSE(test_var,xgb_pred)
# xgb model
submission_xgb <- data.frame("reservation_id"=test$reservation_id,"amount_spent_per_room_night_scaled"=xgb_pred)
write_csv(submission_xgb,"submission_xgb_tuned.csv")
|
57fa2ea21a7520fc7abb32a2eafb76cf6f36fa45 | 1a9ed5f4ec0fda1436d202bf26b0262dce7d2ada | /man/runEnrichment.Rd | 3f0f546c7dbd11ddcda7da7ec359ae7e7ae80059 | [] | no_license | nstroustrup/HelpingHand | e204ce313ee45e67af79804a1ecd3f49c84795bd | fdb4fb91a6caec14ea3652f62d3b62f57419a69e | refs/heads/master | 2022-12-28T21:33:07.415053 | 2020-10-09T17:25:10 | 2020-10-09T17:25:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,119 | rd | runEnrichment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrichmentAnalysis.R
\name{runEnrichment}
\alias{runEnrichment}
\title{Run enrichment analysis of a gene set for C. elegans}
\usage{
runEnrichment(
gene,
universe = NULL,
keytype = "WORMBASE",
pvalueCutoff = 1,
use_internal_data = TRUE
)
}
\arguments{
\item{gene}{a vector of entrez gene id.}
\item{universe}{background genes. If missing, the all genes listed in the database (eg TERM2GENE table) will be used as background.}
\item{keytype}{the keytype that matches the keys used. For the
\code{select} methods, this is used to indicate the kind of ID being used
with the keys argument. For the \code{keys} method this is used to
indicate which kind of keys are desired from \code{keys}
}
\item{pvalueCutoff}{pvalue cutoff on enrichment tests to report}
\item{use_internal_data}{logical, use KEGG.db or latest online KEGG data}
}
\value{
A list of \code{enrichResult} for KEGG and GO annotations.
}
\description{
Wrapper function around \link[clusterProfiler]{enrichKEGG} and \link[clusterProfiler]{enrichGO}.
}
|
3012fa80f96af13796e16e478b746c99281ae709 | d86014d2282f91f6d3e1a15097d8f469b91dd7de | /R/FactorGraph.R | cf31355e0d83548a151fe4c044ffb44799c314f6 | [] | no_license | jmbh/mgm | 2a4094e6a696bd652ab897a467cc3992820ca968 | 95a21052bc84ad4014821309e00f1d609ef5cf77 | refs/heads/master | 2023-02-24T10:10:53.607201 | 2023-02-10T08:19:10 | 2023-02-10T08:19:10 | 41,727,916 | 29 | 10 | null | 2022-08-19T02:31:29 | 2015-09-01T08:56:08 | R | UTF-8 | R | false | false | 5,116 | r | FactorGraph.R | # jonashaslbeck@gmail.com; March 2016
FactorGraph <- function(object,
labels,
PairwiseAsEdge = FALSE,
Nodewise = FALSE,
DoNotPlot = FALSE,
FactorLabels = TRUE,
colors,
shapes,
shapeSizes = c(8, 4),
estpoint = NULL,
negDashed = FALSE,
...)
{
# --------- Compute Aux Variables ---------
if(Nodewise) PairwiseAsEdge <- FALSE
p <- length(object$call$level)
n_estpoints <- length(object$call$estpoints)
# --------- Input Checks ---------
if(!missing(labels)) if(length(labels) != p) stop("Number of provided labels has to match the number of variables.")
# Checks for time-varying FactorGraph
if("tvmgm" %in% class(object)) {
if(missing(estpoint)) stop("Specify the estimation point for which the factor graph should be visualized.")
if(estpoint > n_estpoints) stop(paste0("The provided fit object has only ", n_estpoints, " estimation points."))
}
if(object$call$k > 4) stop("Please specify additional colors/shapes for interactions with order > 4.")
# --------- Create FractorGraph object ---------
call <- list("object" = object)
FG_object <- list("call" = call,
"graph" = NULL,
"nodetype" = NULL,
"order" = NULL,
"signs" = NULL,
"edgecolor" = NULL,
"nonzero" = NULL,
"qgraph" = NULL)
# --------- Fill in defaults ---------
if(missing(labels)) labels <- 1:p
if(missing(colors)) colors <- c("white", "tomato", "lightblue", "orange")
if(missing(shapes)) shapes <- c("circle", "square", "triangle", "diamond")
layout <- "circle"
cut <- 0
# --------- Compute Factor Graph ----------
# Call different DrawFG() version for stationary/time-varying
if("tvmgm" %in% class(object)) {
# Time-varying
FG <- DrawFGtv(object = object,
PairwiseAsEdge = PairwiseAsEdge,
Nodewise = Nodewise,
estpoint = estpoint)
} else {
# Stationary
FG <- DrawFG(object = object,
PairwiseAsEdge = PairwiseAsEdge,
Nodewise = Nodewise)
}
# Save into FG_object
FG_object$graph <- FG$weightedgraph
FG_object$nodetype <- FG$nodetype
FG_object$order <- FG$order
FG_object$signs <- FG$signs
FG_object$edgecolor <- edge.color <- FG$signcolor
FG_object$nonzero <- FG$nonzero
# Allow overwriting ...
args <- list(...)
if(!is.null(args$cut)) cut <- args$cut
if(!is.null(args$layout)) layout <- args$layout
if(!is.null(args$edge.color)) edge.color <- args$edge.color
# browser()
# Adapt edge labels for zero edges in Nodewise=TRUE
if(!is.null(args$edge.labels)) { # if specified, otherwise set to FALSE
if(is.logical(args$edge.labels)) { # if specified and logical, then adapt for nonzero or FALSE
if(args$edge.labels) {
edge.labels <- FG_object$graph
edge.labels[FG_object$nonzero == 2] <- 0
edge.labels <- round(edge.labels, 2)
} else {
edge.labels = FALSE
}
} else {
# if not logical, take the input
edge.labels <- args$edge.labels
}
} else {
edge.labels = FALSE
}
# Edge lty: allow negative edges to be dashed for greyscale images
edge_lty <- FG_object$nonzero
if(negDashed) edge_lty[edge.color == "red"] <- 2
# --------- Plot & Return ---------
if(!DoNotPlot){
# ----- Compute stuff necessary for plotting -----
# Create labels for factors (label = order of factor/interaction)
ifelse(PairwiseAsEdge, ek <- 1, ek <- 0)
if(FactorLabels) {
tb <- table(FG_object$order)[-1]
if(length(tb)==0) { # For the case PairwiseAsEdge=FALSE and no 3-way interactions
FL <- NULL
} else {
l_lf <- list()
for(k in 1:length(tb)) l_lf[[k]] <- rep(k+1+ek, tb[k])
FL <- unlist(l_lf)
}
labels_ex <- c(labels, FL)
} else {
labels_ex <- c(labels, rep('', sum(FG_object$nodetype)))
}
# ----- Call qgraph -----
qgraph_object <- qgraph(FG_object$graph,
color = colors[FG_object$order + 1],
edge.color = edge.color,
lty = edge_lty,
layout = layout,
labels = labels_ex,
shape = shapes[FG_object$order + 1],
vsize = shapeSizes[FG_object$nodetype + 1],
edge.labels = edge.labels,
cut = cut,
...)
FG_object$qgraph <- qgraph_object
invisible(FG_object) # return output object invisible
} else {
return(FG_object)
}
} # eoF |
b6c50f6d4317ca4106a7a82f64120e3ba330cbb5 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DataVisualizations/examples/BoxplotData.Rd.R | af8b02e8667a6d4049ddde807d18d1863d8c97eb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 231 | r | BoxplotData.Rd.R | library(DataVisualizations)
### Name: BoxplotData
### Title: Boxplots for multiple variables
### Aliases: BoxplotData
### Keywords: plot
### ** Examples
x <- cbind(A = rnorm(200, 1, 3), B = rnorm(100, -2, 5))
BoxplotData(x)
|
3457d1c4e1a7bc606be01feb94e835472a007b97 | ce71a1b5374b417a263004d8618b1e7d534de597 | /PI_CI.R | 5a9a44287d887d70c7c084ed36bea853d3ad9152 | [] | no_license | yuanqingye/Statistics | 689c602345385453a738bc581666e979d0c91707 | a51275311fea1152b0c78350e41506ffb6d4c4e2 | refs/heads/master | 2021-08-08T00:35:07.290450 | 2020-04-01T04:15:37 | 2020-04-01T04:15:37 | 136,457,383 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,148 | r | PI_CI.R | set.seed(123)
hemoglobin<-rnorm(400, mean = 139, sd = 14.75)
df<-data.frame(hemoglobin)
CI<-predict(lm(df$hemoglobin~ 1), interval="confidence")
CI[1,]
PI<-predict(lm(df$hemoglobin~ 1), interval="predict")
PI[1,]
library(ggplot2)
limits_CI <- aes(x=1.3 , ymin=CI[1,2], ymax =CI[1,3])
limits_PI <- aes(x=0.7 , ymin=PI[1,2], ymax =PI[1,3])
PI_CI<-ggplot(df, aes(x=1, y=hemoglobin)) +
geom_jitter(width=0.1, pch=21, fill="grey", alpha=0.5) +
geom_errorbar (limits_PI, width=0.1, col="#1A425C") +
geom_point (aes(x=0.7, y=PI[1,1]), col="#1A425C", size=2) +
geom_errorbar (limits_CI, width=0.1, col="#8AB63F") +
geom_point (aes(x=1.3, y=CI[1,1]), col="#8AB63F", size=2) +
scale_x_continuous(limits=c(0,2))+
scale_y_continuous(limits=c(95,190))+
theme_bw()+ylab("Hemoglobin concentration (g/L)") +
xlab(NULL)+
geom_text(aes(x=0.6, y=160, label="Prediction\ninterval",
hjust="right", cex=2), col="#1A425C")+
geom_text(aes(x=1.4, y=140, label="Confidence\ninterval",
hjust="left", cex=2), col="#8AB63F")+
theme(legend.position="none",
axis.text.x = element_blank(),
axis.ticks.x = element_blank())
PI_CI
Hb<- read.table("http://rforbiostatistics.colmanstatistics.be/wp-content/uploads/2018/06/Hb.txt",
header = TRUE)
library(knitr)
kable(head(Hb))
plot(Hb$New, Hb$Reference,
xlab="Hemoglobin concentration (g/L) - new method",
ylab="Hemoglobin concentration (g/L) - reference method")
fit.lm <- lm(Hb$Reference ~ Hb$New)
plot(Hb$New, Hb$Reference,
xlab="Hemoglobin concentration (g/L) - new method",
ylab="Hemoglobin concentration (g/L) - reference method")
cat ("Adding the regression line:")
abline (a=fit.lm$coefficients[1], b=fit.lm$coefficients[2] )
cat ("Adding the identity line:")
abline (a=0, b=1, lty=2)
CI_ex <- predict(fit.lm, interval="confidence")
colnames(CI_ex)<- c("fit_CI", "lwr_CI", "upr_CI")
PI_ex <- predict(fit.lm, interval="prediction")
## Warning in predict.lm(fit.lm, interval = "prediction"): predictions on current data refer to _future_ responses
colnames(PI_ex)<- c("fit_PI", "lwr_PI", "upr_PI")
Hb_results<-cbind(Hb, CI_ex, PI_ex)
kable(head(round(Hb_results),1))
plot(Hb$New, Hb$Reference,
xlab="Hemoglobin concentration (g/L) - new method",
ylab="Hemoglobin concentration (g/L) - reference method")
Hb_results_s <- Hb_results[order(Hb_results$New),]
lines (x=Hb_results_s$New, y=Hb_results_s$fit_CI)
lines (x=Hb_results_s$New, y=Hb_results_s$lwr_CI,
col="#8AB63F", lwd=1.2)
lines (x=Hb_results_s$New, y=Hb_results_s$upr_CI,
col="#8AB63F", lwd=1.2)
lines (x=Hb_results_s$New, y=Hb_results_s$lwr_PI,
col="#1A425C", lwd=1.2)
lines (x=Hb_results_s$New, y=Hb_results_s$upr_PI,
col="#1A425C", lwd=1.2)
abline (a=0, b=1, lty=2)
library (BivRegBLS)
Hb.BLS = BLS (data = Hb, xcol = c("New"),
ycol = c("Reference"), var.y=10, var.x=8, conf.level=0.95)
XY.plot (Hb.BLS,
yname = "Hemoglobin concentration (g/L) - reference method",
xname = "Hemoglobin concentration (g/L) - new method",
graph.int = c("CI","PI")) |
50635349a90d340163f55fbbd9eed98aa3b338bb | e53d28d4649334be5ddb8c10a1ed41a7dd6b2a76 | /src/pop_density.R | 1d155a143c005252f5181c7a44b985f5359e04c4 | [] | no_license | hamishgibbs/msoa_case_data | 0c9edf194f2df85c1f39a11ee600b30be42e8b78 | 4d609e5f10d7f955b65bf64cfa92707e432157be | refs/heads/master | 2023-01-31T01:09:33.200699 | 2020-12-16T15:07:19 | 2020-12-16T15:07:19 | 320,336,839 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,271 | r | pop_density.R | # -- Template by bubble with <3. --
# Script to compute population density in MSOAs
# Load libraries
suppressPackageStartupMessages({
require(tidyverse)
})
# Define args interactively or accept commandArgs
if(interactive()){
.args <- c("/Users/hamishgibbs/Documents/Covid-19/msoa_data/data/raw/population/SAPE22DT4-mid-2019-msoa-syoa-estimates-unformatted.xlsx",
"/Users/hamishgibbs/Documents/Covid-19/msoa_data/data/raw/geodata/area.csv",
"/Users/hamishgibbs/Documents/Covid-19/msoa_data/data/processed/msoa_pop_density.csv")
} else {
.args <- commandArgs(trailingOnly = T)
}
pop <- readxl::read_excel(.args[1], sheet = 4, skip = 3) %>%
rename(msoa_code = `MSOA Code`,
pop = `All Ages`) %>%
select(msoa_code, pop)
area <- read_csv(.args[2], col_types = cols(LAD11NMW = col_character())) %>%
rename(msoa_code = MSOA11CD,
area_hect = AREAEHECT) %>%
select(msoa_code, area_hect) %>%
mutate(area_km = area_hect / 100)
res <- area %>%
left_join(pop, by = c('msoa_code')) %>%
mutate(pop_density = pop / area_km) %>%
select(msoa_code, area_km, pop, pop_density)
testthat::expect_equal(res %>% filter(is.na(pop)) %>% pull(1) %>% length(), 0)
# Save csv result
write_csv(res, tail(.args, 1))
|
0615725a83080f4e25f3554cae908b8fbf378306 | 4ace9f7146284a7dea3817683849b515c1e5713f | /plot3.R | ddc0788dcd93ca448948ed0d184c3bdda931ffb1 | [] | no_license | Jskywalkergh/ExData_Plotting1 | a893ceee9d6a86027e1d5801a0ed06e1bf2024d9 | dfddfb50af1d6167d4a551690600fd71d966300c | refs/heads/master | 2020-12-11T07:40:39.956689 | 2015-12-13T20:26:00 | 2015-12-13T20:26:00 | 47,934,923 | 0 | 0 | null | 2015-12-13T20:22:50 | 2015-12-13T20:22:49 | null | UTF-8 | R | false | false | 809 | r | plot3.R | #Author: Jian Shi, Univ. of Michigan.
setwd("/Data/Coursera/proj")
df=read.table("household_power_consumption.txt", sep=";",header=TRUE,stringsAsFactors=FALSE)
#Only use the data of this time per the assignment
data <- df[df$Date %in% c("1/2/2007","2/2/2007") ,]
dt <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP <- as.numeric(data$Global_active_power)
sm1 <- as.numeric(data$Sub_metering_1)
sm2 <- as.numeric(data$Sub_metering_2)
sm3 <- as.numeric(data$Sub_metering_3)
png("plot3.png",width=480,height=480)
plot(dt, sm1, type="l",xlab="", ylab="Energy Submetering")
lines(dt, sm2, type="l", col="red")
lines(dt, sm3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
d10a8f7911691b6a03b2f83ffdab1e1e97cbea8d | d243932b54cfc6fb0d8b03c79f7ec09d6ca7dbe0 | /shiny/tests/testthat/test-select_filter.R | 1d49cd134dcebccf285d9b9a4dc88eea013682fd | [] | no_license | alexverse/shiny_vessels | 8e94cea9833fb7108e5f1bcf342e27af366f3588 | e2217d28e4100b6f98d401e090da10f75a8b94d9 | refs/heads/main | 2023-04-23T23:28:12.753449 | 2021-05-10T06:06:16 | 2021-05-10T06:06:16 | 364,634,328 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 463 | r | test-select_filter.R | test_that("check dropdown filters utils", {
#return all if no selected filter
expect_true(all(1:3 %inT% NULL))
expect_true(all(1:3 %inT% ""))
#this are the vectors from selected filters, input reactives are used
args <- list(
vessel_type = c(8, 7),
vessel_name = c(1960, 2006)
)
dat <- data.table::fread("https://aledat.eu/shiny/vessels/results/vessels.csv")
x <- filter_data(args, vars_dt, dat)
expect_equal(nrow(x), 2)
})
|
b9e0ebe096068caa08a7844d86ed42b862ded81e | 7950d582ff90f0b616bc84cf14d3c52cf3132a4c | /Lab and Lecture tasks/Lab 2/Lab 2 assignment.R | ac963c2b5bf405fd705b10751fce1c982f77b566 | [] | no_license | bilalhoda1/Statistics | ae62d765c30174ac8f14a1ee56cd3450899aea10 | 6a98494e497d72b26635895beef80f386ebbfb6a | refs/heads/main | 2023-01-04T18:45:42.798762 | 2020-11-01T19:27:37 | 2020-11-01T19:27:37 | 309,116,380 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,805 | r | Lab 2 assignment.R | #setwd() to set working directory
#setwd("D:/bilal's books 8/Lie Detector/Lab assignments/Lab 2 assignment")
#installed mlbench package using install.packages('mlbench')
#used library command to load the package
library(mlbench)
#used data to load the PimaIndiansDiabetes dataset
data(PimaIndiansDiabetes)
pima <- PimaIndiansDiabetes
pima
#Question 1
#using str function to get a summary of the dataset
#the summary would contain number of observations, variables, class of each variable
#There are 9 variables, 768 observations in the dataset
#The class of diabetes is factor and the class of remaining variables is num
str(pima)
#Question 2
#We use the subset function which returns
#subsets of vectors, matrices or data frames which meet conditions.
#DataFrame1
subset(pima,pregnant >5 & glucose>150)
#nrow(subset(pima,pregnant >5 & glucose>150))
#DataFrame2
#assuming to remove means selecting all those rows which donot have mass between 30 and 40
subset(pima,mass<30 | mass>40)
#nrow(subset(pima,mass<30 | mass>40))
#DataFrame3
subset(pima,age<50 & insulin<400)
#nrow(subset(pima,age<50 & insulin<400))
#DataFrame4
subset(pima,pedigree>1 & diabetes=='neg' & pressure>80)
#nrow(subset(pima,pedigree>1 & diabetes=='neg' & pressure>80))
#Question 3
#Assuming that removing the values means replacing the values with NA otherwise it doesn't make sense
#So there was a column with strings so when the matrix was formed it converted all the columns to numbers
#positive changed into 2 and negative changed into 1 in the diabetes column
#using data.matrix to convert data frame into matrix
#we selected the first 100 observations and the specified columns using [1:100,c(1,2,3,5,6,8,9)]
mat <- data.matrix(pima[1:100,c(1,2,3,5,6,8,9)])
mat
mat[mat==10 | mat==25 | mat==45 | mat==60] <- NA
mat
#Question 4
#The mean glucose levels of women between age 20 and 30 is 114.1751
meanGluAge <- 0
count <- 0
for (i in 1:nrow(pima)){
if (pima$age[i]>=20 & pima$age[i]<=30){
meanGluAge <- meanGluAge + pima$glucose[i]
count <- count + 1
}
}
if(count==0){
meanBetween <- 0
} else {
meanBetween <- meanGluAge/count
}
meanBetween
#The mean glucose level of all women is 120.8945
meanGlu <- mean(pima$glucose)
meanGlu
#normal blood sugar level is between 4.0 to 5.4 mmol/L (72 to 99 mg/dL) when fasting
#Up to 7.8 mmol/L (140 mg/dL) 2 hours after eating (Oral glucose tolerance test)
#The mean glucose level is 120.8945 and the mean for women between 20-30 is 114.1751
#Both of which lie in the normal range
#References: https://www.diabetes.co.uk/diabetes_care/blood-sugar-level-ranges.html
#https://www.mayoclinic.org/diseases-conditions/diabetes/diagnosis-treatment/drc-20371451
#Question 5
#assigned N to those with pregnant values 0 and Y to greater than 0 and stored in a vector
pregnancy <- NULL
for (i in 1:nrow(pima)){
if (pima$pregnant[i]==0){
pregnancy[i] <- 'N'
}
else if (pima$pregnant[i]>0){
pregnancy[i] <- 'Y'
}
}
pregnancy
#Created a new column pregnancy and assigned it the vector pregnancy
pima$pregnancy <- pregnancy
pima
#To find how many women were pregnant we will use the comparison pregnancy == 'Y'
#and then calculate the total number of rows using nrow
#657 women have been pregnant
nrow(pima[pima$pregnancy=='Y',])
#We will use similar approach to find non pregnant women
#111 women were not pregnant
nrow(pima[pima$pregnancy=='N',])
#Question 6
pressureType <- NULL
for (i in 1:nrow(pima)){
if (pima$pressure[i]>80){
pressureType[i] <- 'high'
}
else if (pima$pressure[i]>=40 & pima$pressure[i]<=80){
pressureType[i] <- 'average'
}
else if (pima$pressure[i]<40){
pressureType[i] <- 'low'
}
}
pressureType
pima$pressuretype <- pressureType
#165 women have high blood pressure
nrow(pima[pima$pressuretype=='high',])
#564 women have average blood pressure
nrow(pima[pima$pressuretype=='average',])
#39 women have low blood pressure
nrow(pima[pima$pressuretype=='low',])
#77 women having high blood pressure are diabetic
nrow(pima[pima$pressuretype=='high' & pima$diabetes=='pos',])
#Question 7
minAge <- min(pima$age)
minAge
maxAge <- max(pima$age)
maxAge
#to make the bins of size 5, adjusting the max value of the loop
loop <- maxAge+(5-length(minAge:maxAge)%%5)
#initializing variables
count <-1
pregnancies <- 0
meanPregnancy <- 0
maximum <- 0
ageYear <- 0
rows <- 0
#running the loop from min age to max age
#maintaining a count so that when 5 years are done i.e. count%%5==0 print the average number of pregnancies and age with maximum pregnancy in the 5 year
#after that resetting all the values
for(i in minAge:loop){
#finding age wise sum of pregnancies
current <- sum(pima[pima$age==i,]$pregnant)
pregnancies <- pregnancies + current
#maintaining a record of number of entries for that particular age
rows <- rows + nrow(pima[pima$age==i,])
#if current sum of pregnancies is greater than the maximum update maximum
if(current > maximum){
maximum <- current
ageYear <- i
}
#if count becomes 5 then outputting results for 5 years
if(count%%5 ==0){
#if no pregnancy in 5 years then the number of rows would be 0 so putting a check to detect 0/0 division
if (rows==0){
meanPregnancy <- 0
}else {
meanPregnancy <- pregnancies/rows
}
#outputting results
print(paste("For age",i-4,'-',i,'the maximum number of pregnancies is',maximum, 'for age',ageYear))
print(paste("For age",i-4,'-',i,'the average number of pregnancies is',meanPregnancy))
count <- 0
pregnancies <- 0
rows <- 0
maximum <- 0
ageYear <- 0
}
count <- count + 1
}
#Question 8
#excluding non-diabetic people
nonDiab <- pima[pima$diabetes=='neg',]
nonDiab
minAge <- min(nonDiab$age)
minAge
maxAge <- max(nonDiab$age)
maxAge
#to make bins of size 10, adjusting the max value of the loop
loop <- maxAge+(10-length(minAge:maxAge)%%10)
#initializing variables
count <-1
pregnancies <- 0
meanPregnancy <- 0
maximum <- 0
ageYear <- 0
rows <- 0
#running the loop from min age to max age
#maintaining a count so that when 10 years are done i.e. count%%10==0 print the average number of pregnancies and age with maximum pregnancy in the 10 years
#after that resetting all the values
for(i in minAge:loop){
#finding age wise sum of pregnancies
current <- sum(nonDiab[nonDiab$age==i,]$pregnant)
pregnancies <- pregnancies + current
#maintaining a record of number of entries for that particular age
rows <- rows + nrow(nonDiab[nonDiab$age==i,])
#if current sum of pregnancies is greater than the maximum update maximum
if(current > maximum){
maximum <- current
ageYear <- i
}
#if count becomes 10 then outputting results for 10 years
if(count%%10 ==0){
#if no pregnancy in 10 years then the number of rows would be 0 so putting a check to detect 0/0 division
if (rows==0){
meanPregnancy <- 0
}else {
meanPregnancy <- pregnancies/rows
}
#outputting results
print(paste("For age",i-9,'-',i,'the maximum number of pregnancies is',maximum, 'for age',ageYear))
print(paste("For age",i-9,'-',i,'the average number of pregnancies is',meanPregnancy))
count <- 0
pregnancies <- 0
rows <- 0
maximum <- 0
ageYear <- 0
}
count <- count + 1
}
#Question 9
#installed plyr using install.packages('plyr')
#loaded the package using library
library(plyr)
#Plyr has functions for operating on lists, data.frames and arrays (matrices, or n-dimensional vectors).
#Each function performs:
#A splitting operation
#Apply a function on each split in turn.
#Recombine output data as a single data object.
#Part i
#The first argument we gave was the data.frame we wanted to operate on: in this case the pima data.
#The second argument indicated our split criteria: in this case the “pressuretype” column.
#The third argument is the function we want to apply to each grouping of the data.
#Mean
#average 117.8387
#high 132.5697
#low 115.6923
ddply(
.data = pima,
.variables = "pressuretype",
.fun = function(x) mean(x$glucose)
)
#Median
#average 112
#high 131
#low 115
ddply(
.data = pima,
.variables = "pressuretype",
.fun = function(x) median(x$glucose)
)
#sd
#average 32.25350
#high 29.42917
#low 26.91718
ddply(
.data = pima,
.variables = "pressuretype",
.fun = function(x) sd(x$glucose)
)
#minimum
#average 0
#high 61
#low 73
ddply(
.data = pima,
.variables = "pressuretype",
.fun = function(x) min(x$glucose)
)
#maximum
#average 199
#high 196
#low 183
ddply(
.data = pima,
.variables = "pressuretype",
.fun = function(x) max(x$glucose)
)
#Part ii
#considering pressure type and pregnancy together
#Not putting down values in comments otherwise it would take a lot of space
#and would be tedious
#Mean
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy'),
.fun = function(x) mean(x$mass)
)
#Median
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy'),
.fun = function(x) median(x$mass)
)
#sd
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy'),
.fun = function(x) sd(x$mass)
)
#minimum
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy'),
.fun = function(x) min(x$mass)
)
#maximum
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy'),
.fun = function(x) max(x$mass)
)
#Part iii
#considering blood pressure types, pregnancy categories, and diabetes categories together
#Insulin level
#Mean
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) mean(x$insulin)
)
#Glucose level
#Mean
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) mean(x$glucose)
)
#Insulin level
#Median
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) median(x$insulin)
)
#Glucose level
#Median
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) median(x$glucose)
)
#Insulin level
#sd
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) sd(x$insulin)
)
#Glucose level
#sd
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) sd(x$glucose)
)
#Insulin level
#minimum
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) min(x$insulin)
)
#Glucose level
#minimum
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) min(x$glucose)
)
#Insulin level
#maximum
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) max(x$insulin)
)
#Glucose level
#maximum
ddply(
.data = pima,
.variables = c("pressuretype",'pregnancy','diabetes'),
.fun = function(x) max(x$glucose)
)
#Question 10
#the plot function makes a plot
#the legend function is used to make a legend for the plot
#part i
plot(glucose~insulin, data = pima, type='p',pch=19,xlab="Insulin level (mu U/ml)", ylab="Glucose level (mg/dL)", col=c("red", "blue")[pima$diabetes], main="Insulin vs Glucose level of diabetic and non-diabetic women")
legend("topright", inset=c(0,0),legend = c(levels(pima$diabetes)), col = c("red", "blue"), pch = c(19,19))
#part ii
plot(insulin~pedigree, data = pima, type='p',pch=19,xlab="Pedigree factor", ylab="Insulin level (mu U/ml)", col=c("red", "blue")[pima$diabetes], main="Pedgiree factor vs Insulin level of diabetic and non-diabetic women")
legend("topright", inset=c(0,0),legend = c(levels(pima$diabetes)), col = c("red", "blue"), pch = c(19,19))
#part iii
#encoded the pregnancy labels Y and N to 17 and 16 which corresponds to triangle and circle
#triangle indicates pregnant
#circle indicates not pregnant
#red indicates non-diabetic
#blue indicates diabetic
shapes = c(16, 17)
shapes <- shapes[as.numeric(as.factor(pima$pregnancy))]
plot(pressure~mass, data = pima, type='p',pch=c(shapes),xlab="Body Mass Index (weight in kg/(height in m)^2)", ylab="Blood Pressure (mm Hg)", col=c("red", "blue")[pima$diabetes], main="BMI vs Blood Pressure")
legend("topright", inset=c(0,0),legend = c(unique(pima$pregnancy),levels(pima$diabetes)), col = c("black",'black',"red", "blue"), pch = c(unique(shapes),15,15))
|
86278da2c5f5d35e28df017eeada5dffc5354a81 | a040bdcfb00ebedfba5e35a463d16d43c9569387 | /EnglishProficiency/EnglishProficiency.R | e7a37fb494d9769d780ae7e324314dedc54be493 | [] | no_license | mpudil/projects | 6a9ab02668be9ad6f5e0c4e9690026c9e41baa8f | b9795489011068a262e3e24b76fa0cc482eb7210 | refs/heads/master | 2022-07-13T09:45:24.974556 | 2021-01-29T18:52:41 | 2021-01-29T18:52:41 | 158,999,291 | 1 | 1 | null | 2022-06-22T03:06:53 | 2018-11-25T04:58:22 | Jupyter Notebook | UTF-8 | R | false | false | 19,996 | r | EnglishProficiency.R | require(xml2)
library(tidyverse)
library(quanteda)
library(readtext)
library(reshape2)
library(ggplot2)
library(ggpubr)
library(DescTools)
library(magrittr)
library(randomForest)
library(cluster)
library(corpus)
library(ggpubr)
source("C:/Users/Mitchell Pudil/Downloads/G_Test_DescTools.R")
# Let's load our functions.
setwd("C:/Users/Mitchell Pudil/Documents/textstat_tools/")
source("functions/helper_functions.R")
source("functions/keyness_functions.R")
source("C:/Users/Mitchell Pudil/Desktop/CMU1/Linear Models/heatmapcreator.R")
setwd("C:/Users/Mitchell Pudil/Documents/textstat_tools/data/meta_data")
files_meta <- read.csv("midterm_meta.csv", header=TRUE)
setwd("C:/Users/Mitchell Pudil/Documents/textstat_tools/")
files_list <- files_meta$file_path
# We now need to separate the files by grade
# - low
# - medium (commented out, but may be used for future work if desired)
# - high
low <- files_list[which(files_meta$test_score=="low")] %>% as.character
# And we'll do the same for the medium scores
# medium <- files_list[which(files_meta$test_score=="medium")] %>% as.character
# And high
high <- files_list[which(files_meta$test_score=="high")] %>% as.character
# Now we'll use the readtext function to extract the text.
df_low <- readtext(low)
# df_medium <- readtext(medium)
df_high <- readtext(high)
# Convert these into three corpora...
low_corpus <- corpus(df_low)
# medium_corpus <- corpus(df_medium)
high_corpus <- corpus(df_high)
# Quickly tokenize our corpora...
low_tokens <- tokens(low_corpus, what = "word", remove_punct = T)
# medium_tokens <- tokens(medium_corpus, what = "word", remove_punct = T)
high_tokens <- tokens(high_corpus, what = "word", remove_punct = T)
# Create our dfms...
low_dfm <- dfm(low_tokens)
# medium_dfm <- dfm(medium_tokens)
high_dfm <- dfm(high_tokens)
# Check our token frequencies...
textstat_frequency(low_dfm, n = 25)
# key_shakes_all <- keyness_pairs(low_dfm, medium_dfm, high_dfm)
key_shakes_hl <- keyness_pairs(low_dfm, high_dfm)
# Most significant words: i, you, he, eveverything, student, go,
# understanding, number, often, being, of would
# POS Keyness -------------------------------------------------------------
sub_prsd_low <- spacy_parse(low_corpus, pos = TRUE, tag = TRUE)
# sub_prsd_medium <- spacy_parse(medium_corpus, pos = TRUE, tag = TRUE)
sub_prsd_high <- spacy_parse(high_corpus, pos = TRUE, tag = TRUE)
# sub_prsd_all <- rbind(sub_prsd_low, sub_prsd_medium, sub_prsd_high)
sub_prsd_all <- rbind(sub_prsd_low, sub_prsd_high)
sub_tokens <- as.tokens(sub_prsd_all, include_pos = "pos", concatenator = "_")
sub_tokens <- tokens_select(sub_tokens, "_[A-Z]", selection = "keep",
valuetype = "regex", case_insensitive = T)
sub_tokens <- tokens_select(sub_tokens, "\\W_", selection = "remove",
valuetype = "regex")
sub_tokens <- tokens_select(sub_tokens, "\\d_", selection = "remove",
valuetype = "regex")
sub_tokens <- lapply(sub_tokens, function(x) gsub(pattern = ".*_", "", x)) %>% as.tokens()
sub_dfm <- dfm(sub_tokens)
# Separate low/high
docvars(sub_dfm, "score") <- c(rep("low", 100), rep("high", 100))
low_index <- docvars(sub_dfm, "score") == "low"
# medium_index <- docvars(sub_dfm, "score") == "medium"
high_index <- docvars(sub_dfm, "score") == "high"
# High index is target
report_keywords <- textstat_keyness(sub_dfm, high_index, measure = "lr")
report_keywords
high_keywords <- textstat_keyness(sub_dfm, high_index, measure = "lr") %>% data.frame
high_keywords$feature <- toupper(high_keywords$feature)
high_keywords
# Find the 5 POS that differentiates low and high the most.
arrange(high_keywords, desc(abs(G2)))$feature[1:5]
# "ADJ" "PRON" "PUNCT" "ADP" "ADV"
# Create columns for POS, average word length, number of words, etc -------------
# Determine top 10 words that differentiate low vs. high
top10words <- c(key_shakes_hl %>% head %>% rownames, key_shakes_hl %>% tail %>% rownames)
pos_cols <- data.frame(file_path = unique(sub_prsd_all$doc_id), words=NA, av_word_len = NA,
sentences = NA, uniquewords = NA)
for(i in 1:length(top10words)){
pos_cols <- cbind(pos_cols, NA)
colnames(pos_cols)[ncol(pos_cols)] <- top10words[i]
}
for(j in 1:nrow(pos_cols)){
df <- sub_prsd_all[which(sub_prsd_all$doc_id==pos_cols$file_path[j]),]
# Create bigrams of parts of speech
for(k in 1:nrow(df)){
df$nextpos[k] <- ifelse(k==nrow(df), NA, df$pos[k+1])
}
df$bigram <- paste(df$pos, df$nextpos, sep = "-")
# Add all parts of speech to pos_cols df
t <- table(df$pos)
for(i in 1:nrow(t)){
if(names(t[i]) %in% colnames(pos_cols)){
w <- which(names(t[i]) == colnames(pos_cols))
pos_cols[j,w] <- t[i]
} else {
pos_cols <- cbind(pos_cols, 0)
colnames(pos_cols)[ncol(pos_cols)] <- names(t[i])
pos_cols[j,ncol(pos_cols)] <- t[i]
}
}
# Add bigram pos to pos_cols df
b <- table(df$bigram)
for(i in 1:nrow(b)){
if(names(b[i]) %in% colnames(pos_cols)){
w <- which(names(b[i]) == colnames(pos_cols))
pos_cols[j,w] <- b[i]
} else {
pos_cols <- cbind(pos_cols, 0)
colnames(pos_cols)[ncol(pos_cols)] <- names(b[i])
pos_cols[j,ncol(pos_cols)] <- b[i]
}
}
# Top 10 numbers
for(i in 6:17){
pos_cols[j,i] <- sum(df$token %>% tolower == colnames(pos_cols)[i])
}
# Number of unique words
pos_cols$uniquewords[j] <- filter(df, pos!="PUNCT" & pos!="SPACE")$token %>% tolower %>% unique %>% length
# Average word length
pos_cols$av_word_len[j] <- mean(nchar(df$lemma[df$pos!="PUNCT" & df$pos != "SPACE"]))
# Number words
pos_cols$words[j] <- nrow(subset(df, pos!="PUNCT" & pos != "SPACE"))
# Number sentences
pos_cols$sentences[j] <- nrow(subset(df, tag=="."))
}
pos_cols$file_path <- as.character(pos_cols$file_path)
files_meta$file_path <- as.character(files_meta$file_path) %>% basename
posmeta <- merge(pos_cols, files_meta, by = "file_path")
# Hedging -----------------------------------------------------------------
# High vs. hedged confidence dictionary
hb_dict <- dictionary(file = "dictionaries/hedges_boosters.yml")
# Use actual tokens instead of POS
hb_tokens <- tokens_lookup(c(low_tokens, high_tokens), dictionary = hb_dict, levels = 1)
hb_dfm <- dfm(hb_tokens)
hb_dataframe <- convert(hb_dfm, to = "data.frame")
colnames(hb_dataframe)[1] <- "file_path"
meta <- merge(posmeta, hb_dataframe, by = "file_path")
# EDA ---------------------------------------------------------------------
meta_final <- filter(meta, test_score=="low" | test_score=="high") %>%
mutate(highscore=as.numeric(test_score=="high"),
hedges_norm = (confidencehedged/words)*100,
boosters_norm = (confidencehigh/words)*100)
# Plot Confidence vs. Scores
hb_df_low <- meta_final[which(meta_final$test_score=="low"),c(248:249)] %>%
gather(confidence, freq_norm)
hb_df_low$score <- "low"
hb_df_high <- meta_final[which(meta_final$test_score=="high"),c(248:249)] %>%
gather(confidence, freq_norm)
hb_df_high$score <- "high"
hb_df_all <- rbind(hb_df_low, hb_df_high)
ggplot(hb_df_all,aes(x = confidence, y= freq_norm, color=score, fill = score)) +
geom_boxplot(alpha=0.5) +
theme_minimal() +
scale_x_discrete(labels=c("confidencehigh" = "Boosters (High Conf)",
"confidencehedged" = "Hedges (Low Conf)")) +
labs(x="Confidence", y="Normalized Frequency") +
theme(legend.position = "none",
plot.background = element_rect("#e8e8e8e8"),
panel.grid = element_line(colour = "white",size=0.75))
# Plot pronoun usages, normalized
pronplot <- ggplot(meta_final, aes(test_score, PRON,
color=test_score, fill=test_score)) +
geom_boxplot(alpha=0.5) +
labs(x="Test Score", y="Pronouns") +
theme_minimal() +
theme(legend.position = "none",
plot.background = element_rect("#e8e8e8e8"),
panel.grid = element_line(colour = "white",size=0.75))
# Plot interjections, normalized
intjplot <- ggplot(meta_final, aes(test_score, INTJ,
color=test_score, fill=test_score)) +
geom_boxplot(alpha=0.5) +
theme_classic() +
labs(x="Test Score", y="Interjections") +
theme_minimal() +
theme(legend.position = "none",
plot.background = element_rect("#e8e8e8e8"),
panel.grid = element_line(colour = "white",size=0.75))
# Average Word length
wordlengthplot <- ggplot(meta_final, aes(test_score, av_word_len,
color=test_score, fill=test_score)) +
geom_boxplot(alpha=0.5) +
theme_classic() +
labs(x="Test Score", y="Average Word Length") +
theme_minimal() +
theme(legend.position = "none",
plot.background = element_rect("#e8e8e8e8"),
panel.grid = element_line(colour = "white",size=0.75))
# Number of unique words, normalized = # (different words / total words) * 100
uniquewordsplot <- ggplot(meta_final, aes(test_score, uniquewords,
color=test_score, fill=test_score)) +
geom_boxplot(alpha=0.5) +
theme_classic() +
labs(x="Test Score", y="Unique Words") +
theme_minimal() +
theme(legend.position = "none",
plot.background = element_rect("#e8e8e8e8"),
panel.grid = element_line(colour = "white",size=0.75))
ggarrange(pronplot, intjplot, wordlengthplot, uniquewordsplot, nrow=2, ncol=2)
# Plot most frequent words
low_tokens_count <- tokens(low_corpus) %>% unlist %>% tolower %>% table %>% data.frame
colnames(low_tokens_count) <- c("Token", "Frequency_low")
high_tokens_count <- tokens(high_corpus) %>% unlist %>% tolower %>% table %>% data.frame
colnames(high_tokens_count) <- c("Token", "Frequency_high")
all_tokens_count <- merge(low_tokens_count, high_tokens_count,
by="Token", all=TRUE)
all_tokens_count[is.na(all_tokens_count)] <- 0
all_tokens_count$NFlow <- (all_tokens_count$Frequency_low / sum(all_tokens_count$Frequency_low))*100
all_tokens_count$NFhigh <- (all_tokens_count$Frequency_high / sum(all_tokens_count$Frequency_high))*100
all_tokens_count <- filter(all_tokens_count, Token!="." & Token!=",")
max_prop <- max(c(all_tokens_count$NFlow, all_tokens_count$NFhigh))
ggplot(data=all_tokens_count, mapping=aes(x=NFlow, y=NFhigh), label=Token) +
geom_point() +
coord_cartesian(xlim=c(0,max_prop), ylim=c(0,max_prop)) +
geom_abline(slope=1, color="red", linetype="dashed") +
labs(x="Failing Scores (Avg.)", y="Passing Scores (Avg.)",
caption = "Normalized Frequencies of Words in Passing vs. Failing TOEFL Exams") +
geom_text(aes(label=ifelse((all_tokens_count$NFlow > 1.15 | all_tokens_count$NFhigh > 1.15),
as.character(Token), ''), hjust=-0.2, vjust=0)) +
theme_minimal() +
theme(legend.position = "none",
plot.background = element_rect("#e8e8e8e8"),
panel.grid = element_line(colour = "white",size=0.75))
# Correlation Matrix
heatmapCreator(meta_final[,-c(1,11:14,18:19)])
# Modeling ---------------------------------------------
# Full model
glm.1 <- glm(highscore ~ pron + adj + adp + intj + punct + words + av_word_len + sentences +
confidencehedged + confidencehigh + uniquewords, data=meta_final, family="binomial")
summary(glm.1)
car::vif(glm.1) # Note the high collinearity between words and average word length
PseudoR2(glm.1, which = "Nagelkerke")
Cstat(glm.1)
# Without word length (since collinear with words)
glm.2 <- glm(highscore ~ pron + adj + adp + intj + punct + av_word_len + sentences +
confidencehedged + confidencehigh + uniquewords, data=meta_final, family="binomial")
car::vif(glm.2)
summary(glm.2)
PseudoR2(glm.2, which = "Nagelkerke")
Cstat(glm.2)
# PCA
meta_pca <- meta[,which(sapply(meta, class)=="numeric")]
meta.pr <- prcomp(meta_pca, center = TRUE, scale = TRUE)
screeplot(meta.pr, type = "l", npcs = 15, main = "Screeplot of the first 10 PCs")
abline(h = 1, col="red", lty=5)
legend("topright", legend=c("Eigenvalue = 1"),
col=c("red"), lty=5, cex=0.6)
cumpro <- cumsum(meta.pr$sdev^2 / sum(meta.pr$sdev^2))
plot(cumpro[0:15], xlab = "PC #", ylab = "Amount of explained variance", main = "Cumulative variance plot")
abline(v = 6, col="blue", lty=5)
abline(h = 0.88759, col="blue", lty=5)
legend("topleft", legend=c("Cut-off @ PC6"),
col=c("blue"), lty=5, cex=0.6)
library("factoextra")
fviz_pca_ind(meta.pr, geom.ind = "point", pointshape = 21,
pointsize = 2,
fill.ind = meta$test_score,
col.ind = "black",
palette = "jco",
addEllipses = TRUE,
label = "var",
col.var = "black",
repel = TRUE,
legend.title = "Score") +
ggtitle("2D PCA-plot from 200+ features ") +
theme(plot.title = element_text(hjust = 0.5),
plot.background = element_rect(fill="#e8e8e8"))
# Random Forest
meta$test_score <- as.numeric(meta$test_score == "high")
meta_final <- meta[,which(sapply(meta, class) %in% c("numeric", "integer"))]
write.csv(meta_final, "english_cleaned5.csv", row.names = FALSE)
set.seed(42)
english <- read.csv("english_cleaned5.csv")
train.rows <- sample(1:nrow(english), size = round(nrow(english))*0.7, replace=FALSE)
train <- english[train.rows,]
test <- english[-train.rows,]
rf <- randomForest(formula = test_score ~ ., data=train)
preds <- round(predict(rf, test)) %>% as.numeric
tp <- mean(preds==1 & test$test_score==1)
fp <- mean(preds==1 & test$test_score==0)
tn <- mean(preds==0 & test$test_score == 0)
fn <- mean(preds==0 & test$test_score == 1)
list(tp=tp, fp=fp, tn=tn, fn=fn)
# Full model
rf_full <- randomForest(formula = test_score ~ ., data=english)
# rpart tree
library(rpart)
library(rpart.plot)
tree <- rpart(formula = test_score ~ ., data=english)
rpart.plot(tree, box.palette = "RdBu", shadow.col = "gray", nn=TRUE) +
theme(plot.background = element_rect(fill="#e8e8e8"))
english$test_result <- ifelse(english$test_score==1, "Pass", "Fail")
binary.model <- rpart(formula = test_result ~ ., data=english[,-which(colnames(english)=="test_score")], cp = .02)
rpart.plot(binary.model)
# Importance
imp <-rf_full$importance %>%
data.frame() %>%
rownames_to_column("feature") %>%
dplyr::arrange(desc(IncNodePurity)) %>%
dplyr::top_n(20)
imp2 <- imp
imp2$feature <- c("Unique Words", "Total Words", "Adj-Part", "Average Word Length",
"Prepositions", "Sentences", "Adp-Noun", "Conditional Phrases",
"Adjectives", "Pron-Verb", "Adv-Verb", "Adverb", "Particle-Verb",
"Pronoun", "Adp-Verb", "Noun-Adp", "Adj-Noun", "Being", "Adp",
"Noun-Punct")
imp2 %>%
ggplot(aes(x = reorder(feature, IncNodePurity), y = IncNodePurity)) +
geom_col(fill="cadetblue2") +
coord_flip() +
labs(x = "", y = "Node Purity") +
ggtitle("Top 20 Important Variables") +
theme_minimal() +
theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.text.x = element_blank(),
plot.title = element_text(size=16, hjust = 0.5), axis.title.x = element_text(size=14),
plot.background = element_rect(fill="#e8e8e8")) +
geom_text(label=round(imp$IncNodePurity, 2), hjust = 1.5)
# Predict -----------------------------------------------------------------
# Function for text to pred
text2pred <- function(text){
text <- as.character(text)
english <- read.csv("english_cleaned5.csv")
df <- spacy_parse(text) %>% data.frame
pos_cols <- data.frame(matrix(nrow=1, ncol=ncol(english)))
colnames(pos_cols) <- colnames(english)
for(j in 1:nrow(df)){
# Create bigrams of parts of speech
df$nextpos[j] <- ifelse(j==nrow(df), NA, df$pos[j+1])
}
df$bigram <- paste(df$pos, df$nextpos, sep = ".")
# Add all parts of speech to pos_cols df
t <- table(df$pos)
for(i in 1:nrow(t)){
w <- which(names(t[i]) == colnames(pos_cols))
pos_cols[1,w] <- t[i]
}
# Add bigram pos to pos_cols df
b <- table(df$bigram)
for(i in 1:nrow(b)){
w <- which(names(b[i]) == colnames(pos_cols))
pos_cols[1,w] <- b[i]
}
# Top 10 words
for(i in 5:16){
pos_cols[1,i] <- sum(df$token %>% tolower == colnames(pos_cols)[i])
}
# Number of unique words
pos_cols$uniquewords <- filter(df, pos!="PUNCT" & pos!="SPACE")$token %>% tolower %>% unique %>% length
# Average word length
pos_cols$av_word_len <- mean(nchar(df$lemma[df$pos!="PUNCT" & df$pos != "SPACE"]))
# Number words
pos_cols$words <- nrow(subset(df, pos!="PUNCT" & pos != "SPACE"))
# Number sentences
pos_cols$sentences <- nrow(subset(df, token=="." | token=="!" | token=="?"))
# Confidence
# High vs. hedged confidence dictionary
hb_dict <- dictionary(file = "C:/Users/Mitchell Pudil/Documents/textstat_tools/dictionaries/hedges_boosters.yml")
# Use actual tokens instead of POS
hb_tokens <- tokens_lookup(c(tokens(text)), dictionary = hb_dict, levels = 1)
pos_cols$confidencehedged <- sum(hb_tokens=="ConfidenceHedged")
pos_cols$confidencehigh <- sum(hb_tokens=="ConfidenceHigh")
# Fill NA with 0
pos_cols[is.na(pos_cols)] <- 0
# Random Forest model
rf <- randomForest(formula = test_score ~ ., data=english)
# Make prediction for
pred <- predict(rf, pos_cols)
if(pred < 0) {
pred <- 0
}
if(pred > 1) {
pred <- 1
}
pred <- round(pred*100, 2)
print(paste0("Your probability of passing the English exam is ", pred, "%."))
struct <- c("unique words", "total words", "average word length", "sentences")
pass_avg <- sapply(english[english$test_score==1,], mean)[c("uniquewords", "words", "av_word_len",
"sentences")]
names(pass_avg) <- NULL
you <- pos_cols[c("uniquewords", "words", "av_word_len","sentences")] %>% as.numeric
all_results <- data.frame(struct, pass_avg, you)
nyxlong <- reshape2::melt(all_results, id=c("struct"))
gg1 <- ggplot(nyxlong[-c(3, 4, 7, 8),]) +
geom_bar(aes(x = struct, y = value, fill = variable),
stat="identity", position = "dodge", width = 0.7) +
scale_fill_manual("", values = c("red","blue"),
labels = c("Average Passing", "You")) +
labs(x="",y="") +
theme_bw(base_size = 14) +
ylim(0,450) +
geom_text(aes(x = struct, y = value, label=value),
hjust=ifelse(nyxlong[-c(3, 4, 7, 8),]$variable=="pass_avg", 1.5, -2),
vjust=-1)
gg2 <- ggplot(nyxlong[c(3, 4, 7, 8),]) +
geom_bar(aes(x = struct, y = value, fill = variable),
stat="identity", position = "dodge", width = 0.7) +
scale_fill_manual("", values = c("red","blue"),
labels = c("Average Passing", "You")) +
labs(x=paste0("\nYour probability of passing the English exam is ", pred, "%."),y="") +
theme_bw(base_size = 14) +
ylim(0,25) +
geom_text(aes(x = struct, y = value, label=round(value, 2)),
hjust=ifelse(nyxlong[c(3, 4, 7, 8),]$variable=="pass_avg", 1.5, -3),
vjust=-1)
ggarrange(gg1, gg2, ncol=1)
}
|
ac110d99e1caefb4031a458e769b1e2b63b11837 | 96dd0f70cfcb97754853ae9279b858133891682c | /man/halflife.Rd | abf4bc114266d06575fd28de29036fd9990bd1e5 | [] | no_license | JClavel/mvMORPH | 27e18d6172eefb28e527fde88671275f80afca07 | e75c68a0fece428e5e98d8f9ae7281569b7159c8 | refs/heads/master | 2023-07-10T21:12:01.839493 | 2023-06-30T14:37:11 | 2023-06-30T14:37:11 | 36,449,296 | 17 | 8 | null | 2022-06-22T14:40:37 | 2015-05-28T15:50:01 | R | UTF-8 | R | false | false | 2,794 | rd | halflife.Rd | \name{halflife}
\alias{halflife}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
The phylogenetic half-life for an Ornstein-Uhlenbeck process
%% ~~function to do ... ~~
}
\description{
This function returns the phylogenetic half-life for an Ornstein-Uhlenbeck process (object of class "ou").
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
halflife(object)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
Object fitted with the "mvOU" function.
%% ~~Describe \code{tree} here~~
}
}
\details{
The phylogenetic half-life describes the time to move halfway from the ancestral state to the primary optimum (Hansen, 1997).
The multivariate counterpart is computed on the eigenvalues of the "selection" matrix (Bartoszek et al. 2012).
%% ~~ If necessary, more details than the description above ~~
}
\value{
The phylogenetic half-life computed from each eigenvalues (or alpha for the univariate case)
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Bartoszek K., Pienaar J., Mostad P., Andersson S., Hansen T.F. 2012. A phylogenetic comparative method for studying multivariate adaptation. J. Theor. Biol. 314:204-215.
Hansen T.F. 1997. Stabilizing selection and the comparative analysis of adaptation. Evolution. 51:1341-1351.
%% ~put references to the literature/web site here ~
}
\author{
Julien Clavel
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{mvMORPH}}
\code{\link{mvOU}}
\code{\link{stationary}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# Simulated dataset
set.seed(14)
# Generating a random tree
tree<-pbtree(n=50)
# Setting the regime states of tip species
sta<-as.vector(c(rep("Forest",20),rep("Savannah",30))); names(sta)<-tree$tip.label
# Making the simmap tree with mapped states
tree<-make.simmap(tree,sta , model="ER", nsim=1)
col<-c("blue","orange"); names(col)<-c("Forest","Savannah")
# Plot of the phylogeny for illustration
plotSimmap(tree,col,fsize=0.6,node.numbers=FALSE,lwd=3, pts=FALSE)
# Simulate the traits
alpha<-matrix(c(2,0.5,0.5,1),2)
sigma<-matrix(c(0.1,0.05,0.05,0.1),2)
theta<-c(2,3,1,1.3)
data<-mvSIM(tree, param=list(sigma=sigma, alpha=alpha, ntraits=2, theta=theta,
names_traits=c("head.size","mouth.size")), model="OUM", nsim=1)
## Fitting the models
# OUM - Analysis with multiple optima
result<-mvOU(tree, data)
halflife(result)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Ornstein Uhlenbeck }
\keyword{ half-life }
\keyword{ OU }% __ONLY ONE__ keyword per line
|
a1c4c9c8111cedad58d61fd09a23bde1512a78ae | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/expoRkit/examples/orani.Rd.R | 4dc0614ff1731c842a9f3f0d29e5203d277dfe44 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 624 | r | orani.Rd.R | library(expoRkit)
### Name: orani
### Title: Australia Economic Model data, 1968-68.
### Aliases: orani
### Keywords: data datasets
### ** Examples
data(orani) ## Load the data as a 'dgCMatrix' (CCS format)
v <- rep(1, 2529)
### Solving a system of 2529 coupled linear differential equations
system.time(wCCS <- expv(orani, v = v, t = 10))
oraniCOO <- as(orani, "TsparseMatrix") ## Coerce to COO format
### In this case, the COO format gives a slight increase in
### computational time as reported in Sidje (1998).
system.time(wCOO <- expv(oraniCOO, v = v, t = 10))
print(cbind(wCCS[1:5], wCOO[1:5]), digits = 14)
|
f1e42e5383e6e5f2b6ded7402980aba155b5aa86 | 135ac4c834cc1b90c48d623d0255fb9711ce24ae | /app.R | b032f211391818b6c5f9858034b4fc087f6c99ab | [] | no_license | JohnCoene/cran | fc5109ae9af7c77e4345dc76899606fa722ba1af | 65831f2fa76cebe2db495c61d3d5640856ccc7ce | refs/heads/master | 2020-08-28T23:06:49.172086 | 2020-02-04T20:34:56 | 2020-02-04T20:34:56 | 217,847,468 | 3 | 1 | null | 2020-02-04T20:34:57 | 2019-10-27T11:51:13 | R | UTF-8 | R | false | false | 4,107 | r | app.R | library(shiny)
library(waiter)
library(pushbar)
library(grapher)
shiny::addResourcePath("assets", "./assets")
code <- readLines("./script/script.R") %>%
paste0(collapse = "\n")
load("pkgs.RData")
ui <- fluidPage(
title = "CRAN Dependency Network",
tags$head(
tags$link( rel="stylesheet", type="text/css", href = "./assets/css/prism.css"),
tags$link( rel="stylesheet", type="text/css", href = "./assets/css/styles.css")
),
use_waiter(),
pushbar_deps(),
tags$script(src = "./assets/js/prism.js"),
show_waiter_on_load(
color = "#000",
tagList(
spin_folding_cube(),
span("Loading dependency graph", style = "color:white;")
)
),
div(
dqshiny::autocomplete_input("search", "Package", pkgs, placeholder = "e.g.: dplyr, data.table"),
graphOutput("g", height = "100vh"),
uiOutput("clicked"),
div(
id = "buttons",
actionLink("code", "", icon = icon("code fa-lg")),
actionLink("about", "", icon = icon("question fa-lg"))
)
),
pushbar(
id = "code_bar",
from = "left",
class = "bars",
h1("Source code"),
p(
"The visualisation is powered by the",
tags$a("grapher package", href = "https://grapher.network/")
),
style = "width:30%;",
tags$pre(tags$code(class = "language-r", code))
),
pushbar(
id = "about_bar",
class = "bars",
from = "right",
h1("CRAN Dependency Graph"),
p(
"Each node is an R package on CRAN, connections represent dependencies",
tags$code("Depends", class = "language-r"), tags$code("Imports", class = "language-r"),
"and", tags$code("LinkingTo.", class = "language-r")
),
p(
"You can navigate the graph with the", tags$kbd("w"), tags$kbd("a"),
tags$kbd("s"), tags$kbd("d"), "and the arrow keys (",
tags$kbd(HTML("←")), tags$kbd(HTML("↑")), tags$kbd(HTML("→")),
tags$kbd(HTML("↓")), ") to rotate the camera", tags$kbd("q"), tags$kbd("e"),
"will rotate it."
),
p("Click on a node to reveal more information about it."),
p("Type the name of a package in the search box in the top left corner to zoom in on it."),
p(
"While all packages are visualised not all dependencies are, to avoid",
"a hairball graph edges that are over a certain length are hidden. This",
"allows keeping sight of smaller communities."
),
p("You view the source used to build the visualisation", actionLink("code2", "here")),
p(tags$a("with 💕 by John Coene", id = "footer", href = "https://john-coene.com")),
style = "width:30%;"
),
hide_waiter_on_drawn("g"),
tags$script(src = "./assets/js/mobile.js"),
)
server <- function(input, output, session){
setup_pushbar()
output$g <- render_graph({
graph("./assets/data/graph.json")
})
observeEvent(input$search, {
graph_proxy("g") %>%
graph_focus_node(input$search, dist = -40)
})
observeEvent(input$code, {
pushbar_open(id = "code_bar")
})
observeEvent(input$code2, {
pushbar_open(id = "code_bar")
})
observeEvent(input$about, {
pushbar_open(id = "about_bar")
})
focus <- reactiveValues(pkg = NULL)
observeEvent(input$g_node_click, {
focus$pkg <- input$g_node_click
})
observeEvent(input$g_retrieve_node, {
focus$pkg <- input$g_retrieve_node
})
observeEvent(input$search, {
graph_proxy("g") %>%
retrieve_node(input$search)
})
output$clicked <- renderUI({
sel <- focus$pkg
if(is.null(sel))
return(span())
deps <- sel$links %>%
dplyr::filter(fromId != sel$id) %>%
nrow()
tagList(
strong(sel$id, style = "color:white;"),
br(),
span("Reverse Dependencies:", prettyNum(deps, big.mark = ","), style = "color:white;")
)
})
observeEvent(input$screen_width, {
if(input$screen_width < 760)
showModal(
modalDialog(
title = NULL,
"Apologies, this website is only available on desktop 🖥️",
footer = NULL,
fade = FALSE
)
)
})
}
shinyApp(ui, server)
|
893043d6bf579e99f98f6762c66e655524230a8b | 72d03ec10b4955bcc7daac5f820f63f3e5ed7e75 | /input/gcam-data-system/aglu-processing-code/level1/LB152.ag_GTAP_R_C_GLU_irr.R | c72ff17fbca5225cb4c8dbf63002ae40f35d684e | [
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bgmishra/gcam-core | 54daddc3d037571bf745c4cf0d54c0d7a77f493f | bbfb78aeb0cde4d75f307fc3967526d70157c2f8 | refs/heads/master | 2022-04-17T11:18:25.911460 | 2020-03-17T18:03:21 | 2020-03-17T18:03:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,096 | r | LB152.ag_GTAP_R_C_GLU_irr.R | # Before we can load headers we need some paths defined. They
# may be provided by a system environment variable or just
# having already been set in the workspace
if( !exists( "AGLUPROC_DIR" ) ){
if( Sys.getenv( "AGLUPROC" ) != "" ){
AGLUPROC_DIR <- Sys.getenv( "AGLUPROC" )
} else {
stop("Could not determine location of aglu processing scripts, please set the R var AGLUPROC_DIR to the appropriate location")
}
}
# Universal header file - provides logging, file support, etc.
source(paste(AGLUPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
source(paste(AGLUPROC_DIR,"/../_common/headers/AGLU_header.R",sep=""))
logstart( "LB152.ag_GTAP_R_C_GLU_irr.R" )
adddep(paste(AGLUPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
adddep(paste(AGLUPROC_DIR,"/../_common/headers/AGLU_header.R",sep=""))
printlog( "Agricultural good data from LDS/GTAP, split into irrigated and rainfed from MIRCA, assigned to GCAM region / commodity / GLU" )
# -----------------------------------------------------------------------------
# 1. Read files
sourcedata( "COMMON_ASSUMPTIONS", "A_common_data", extension = ".R" )
sourcedata( "COMMON_ASSUMPTIONS", "unit_conversions", extension = ".R" )
sourcedata( "AGLU_ASSUMPTIONS", "A_aglu_data", extension = ".R" )
iso_GCAM_regID <- readdata( "COMMON_MAPPINGS", "iso_GCAM_regID" )
FAO_ag_items_PRODSTAT <- readdata( "AGLU_MAPPINGS", "FAO_ag_items_PRODSTAT" )
L151.ag_irrHA_ha_ctry_crop <- readdata( "AGLU_LEVEL1_DATA", "L151.ag_irrHA_ha_ctry_crop" )
L151.ag_rfdHA_ha_ctry_crop <- readdata( "AGLU_LEVEL1_DATA", "L151.ag_rfdHA_ha_ctry_crop" )
L151.ag_irrProd_t_ctry_crop <- readdata( "AGLU_LEVEL1_DATA", "L151.ag_irrProd_t_ctry_crop" )
L151.ag_rfdProd_t_ctry_crop <- readdata( "AGLU_LEVEL1_DATA", "L151.ag_rfdProd_t_ctry_crop" )
# -----------------------------------------------------------------------------
# 2. Perform computations
#add lookup vectors to each of the tables
printlog( "Adding region and crop lookup vectors to GTAP tables" )
with( iso_GCAM_regID, {
L151.ag_irrHA_ha_ctry_crop[[R]] <<- GCAM_region_ID[ match ( L151.ag_irrHA_ha_ctry_crop$iso, iso ) ]
L151.ag_rfdHA_ha_ctry_crop[[R]] <<- GCAM_region_ID[ match ( L151.ag_rfdHA_ha_ctry_crop$iso, iso ) ]
L151.ag_irrProd_t_ctry_crop[[R]] <<- GCAM_region_ID[ match( L151.ag_irrProd_t_ctry_crop$iso, iso ) ]
L151.ag_rfdProd_t_ctry_crop[[R]] <<- GCAM_region_ID[ match( L151.ag_rfdProd_t_ctry_crop$iso, iso ) ]
} )
with( FAO_ag_items_PRODSTAT, {
L151.ag_irrHA_ha_ctry_crop[[C]] <<- GCAM_commodity[ match ( L151.ag_irrHA_ha_ctry_crop$GTAP_crop, GTAP_crop ) ]
L151.ag_rfdHA_ha_ctry_crop[[C]] <<- GCAM_commodity[ match ( L151.ag_rfdHA_ha_ctry_crop$GTAP_crop, GTAP_crop ) ]
L151.ag_irrProd_t_ctry_crop[[C]] <<- GCAM_commodity[ match( L151.ag_irrProd_t_ctry_crop$GTAP_crop, GTAP_crop ) ]
L151.ag_rfdProd_t_ctry_crop[[C]] <<- GCAM_commodity[ match( L151.ag_rfdProd_t_ctry_crop$GTAP_crop, GTAP_crop ) ]
} )
#build tables collapsed by GCAM regions and crop names
printlog( "Collapsing ag commodity data into GCAM regions and commodities, and converting to appropriate units (bm2 and Mt)" )
L152.ag_irrHA_bm2_R_C_GLU <- aggregate( L151.ag_irrHA_ha_ctry_crop[ "irrHA" ] * conv_Ha_bm2,
by = L151.ag_irrHA_ha_ctry_crop[ R_C_GLU ], sum )
L152.ag_rfdHA_bm2_R_C_GLU <- aggregate( L151.ag_rfdHA_ha_ctry_crop[ "rfdHA" ] * conv_Ha_bm2,
by = L151.ag_rfdHA_ha_ctry_crop[ R_C_GLU ], sum )
L152.ag_irrProd_Mt_R_C_GLU <- aggregate( L151.ag_irrProd_t_ctry_crop[ "irrProd" ] * conv_t_Mt,
by = L151.ag_irrProd_t_ctry_crop[ R_C_GLU ], sum )
L152.ag_rfdProd_Mt_R_C_GLU <- aggregate( L151.ag_rfdProd_t_ctry_crop[ "rfdProd" ] * conv_t_Mt,
by = L151.ag_rfdProd_t_ctry_crop[ R_C_GLU ], sum )
# -----------------------------------------------------------------------------
# 3. Output
#Add comments to tables
comments.L152.ag_irrHA_bm2_R_C_GLU <- c( "Irrigated harvested area by GCAM region / commodity / GLU","Unit = bm2" )
comments.L152.ag_rfdHA_bm2_R_C_GLU <- c( "Rainfed harvested area by GCAM region / commodity / GLU","Unit = bm2" )
comments.L152.ag_irrProd_Mt_R_C_GLU <- c( "Irrigated crop production by GCAM region / commodity / GLU","Unit = Mt" )
comments.L152.ag_rfdProd_Mt_R_C_GLU <- c( "Rainfed crop production by GCAM region / commodity / GLU","Unit = Mt" )
#export final tables as CSV files
writedata( L152.ag_irrHA_bm2_R_C_GLU, domain="AGLU_LEVEL1_DATA",fn="L152.ag_irrHA_bm2_R_C_GLU", comments=comments.L152.ag_irrHA_bm2_R_C_GLU )
writedata( L152.ag_rfdHA_bm2_R_C_GLU, domain="AGLU_LEVEL1_DATA",fn="L152.ag_rfdHA_bm2_R_C_GLU", comments=comments.L152.ag_rfdHA_bm2_R_C_GLU )
writedata( L152.ag_irrProd_Mt_R_C_GLU, domain="AGLU_LEVEL1_DATA",fn="L152.ag_irrProd_Mt_R_C_GLU", comments=comments.L152.ag_irrProd_Mt_R_C_GLU )
writedata( L152.ag_rfdProd_Mt_R_C_GLU, domain="AGLU_LEVEL1_DATA",fn="L152.ag_rfdProd_Mt_R_C_GLU", comments=comments.L152.ag_rfdProd_Mt_R_C_GLU )
# Every script should finish with this line
logstop()
|
4ce69baeeebfcc8d3b713fb2466da4b5e06721e8 | ebbe08d58a57ae2e9d308a12df500e1e0ef8d098 | /wgk/age/step6b_compControl.R | dc3c18b8c17bafad85d761864fd0c0822c21ec1e | [] | no_license | Drizzle-Zhang/bioinformatics | a20b8b01e3c6807a9b6b605394b400daf1a848a3 | 9a24fc1107d42ac4e2bc37b1c866324b766c4a86 | refs/heads/master | 2022-02-19T15:57:43.723344 | 2022-02-14T02:32:47 | 2022-02-14T02:32:47 | 171,384,799 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,891 | r | step6b_compControl.R | #########
## Fig2主图: 比较儿童和成年人年龄段(C4/C6 vs C1/C2/C3/C5)
#########
library(tibble)
library(dplyr)
library(Seurat)
library(pheatmap)
library(ggplot2)
library(ggrepel)
#.libPaths(c("/home/yzj/R/x86_64-pc-linux-gnu-library/4.0","/home/zy/tools/R-4.0.0/library"))
########################
#### step2.1 DEG画heatmap,参照卵巢衰老的文章
########################
save_pheatmap_pdf <- function(x, filename, width=4, height=5) {
stopifnot(!missing(x))
stopifnot(!missing(filename))
pdf(filename, width=width, height=height)
grid::grid.newpage()
grid::grid.draw(x$gtable)
dev.off()
}
pbmc_chond <- readRDS('JingMA_NEW/res/Harmony/ALL/RDS/seurat_celltype_Chond.Rdata')
Idents(pbmc_chond) <- pbmc_chond$type
pbmc_C <- subset(pbmc_chond,idents = 'Normal')
pbmc_C@meta.data$Phase <- 'Adults'
pbmc_C$Phase[pbmc_C$batch %in% c('C4','C6')] <- 'Children'
pbmc_C$Phase <- factor(pbmc_C$Phase,levels = c('Children','Adults'))
MK.lst <- readRDS('/home/yzj/JingMA_NEW/res/Harmony/ALL/RDS/DEGs_inChond_inChildrenAdults.RDS')
print(names(MK.lst))
data <- MK.lst[['CSC']]
up_CSC <- rownames(data)[data$avg_logFC > log(1.5) & data$p_val_adj < 0.05]
dn_CSC <- rownames(data)[data$avg_logFC < -log(1.5) & data$p_val_adj < 0.05]
data <- MK.lst[["TC"]]
up_TC <- rownames(data)[data$avg_logFC > log(1.5) & data$p_val_adj < 0.05]
dn_TC <- rownames(data)[data$avg_logFC < -log(1.5) & data$p_val_adj < 0.05]
data <- MK.lst[["C1"]]
up_C1 <- rownames(data)[data$avg_logFC > log(1.5) & data$p_val_adj < 0.05]
dn_C1 <- rownames(data)[data$avg_logFC < -log(1.5) & data$p_val_adj < 0.05]
data <- MK.lst[["C2"]]
up_C2 <- rownames(data)[data$avg_logFC > log(1.5) & data$p_val_adj < 0.05]
dn_C2 <- rownames(data)[data$avg_logFC < -log(1.5) & data$p_val_adj < 0.05]
get_values <- function(sigCSC,sigTC,sigC1,sigC2){
sigGene <- unique(c(sigCSC,sigTC,sigC1,sigC2))
values <- matrix(c(rep(0,4*length(sigGene))),ncol = 4,dimnames = list(sigGene,c('CSC','TC','C1','C2')))
for(i in 1:length(sigGene)){
g=sigGene[i]
if(g %in% sigCSC){values[i,1] <-1};
if(g %in% sigTC){values[i,2] <-1};
if(g %in% sigC1){values[i,3] <-1};
if(g %in% sigC2){values[i,4] <-1};
}
values_sum <- apply(values, 1, sum)
values <- values[order(values_sum,decreasing = T),]
return(values)
}
## 对成人来说,下调矩阵
upValues_mtx <- get_values(up_CSC,up_TC,up_C1,up_C2)
up_sum <- apply(upValues_mtx,1,sum)
up_df <- upValues_mtx[-(which(up_sum>1)),]
annotation_col = data.frame(CellType = factor(c("CSC", "TC","C1","C2")))
rownames(annotation_col) <- colnames(upValues_mtx)
annotation_row = data.frame(GeneClass = factor(rep(c("Common", "CSC", "TC","C1","C2"),
c(length(which(up_sum>1)),
length(which(up_df[,1]==1)),
length(which(up_df[,2]==1)),
length(which(up_df[,3]==1)),
length(which(up_df[,4]==1))))))
rownames(annotation_row) = rownames(upValues_mtx)
ann_colors = list( CellType = c(CSC="#EE9572",TC="#B2DF8A",C1="#A6CEE3",C2="#9999FF"),
GeneClass = c(Common='grey',CSC="#EE9572",TC="#B2DF8A",C1="#A6CEE3",C2="#9999FF"))
p_UP <- pheatmap(upValues_mtx,cluster_rows = F,cluster_cols = F,color = colorRampPalette(c("#EFEFEF", "white", "#7F99CE"))(100),
border_color ='transparent',show_rownames = F,angle_col='45',
annotation_row = annotation_row,annotation_colors = ann_colors,legend=F,annotation_legend = FALSE)
save_pheatmap_pdf(p_UP,'JingMA_NEW/res/compControl/ChildrenvsAdults/FIG/DEGHeatmap_UP.pdf',height = 4,width = 2)
saveRDS(upValues_mtx,'JingMA_NEW/res/compControl/ChildrenvsAdults/FIG/DEGHeatmap_UPmtx.RDS')
## 对成人来说上调矩阵
dnValues_mtx <- get_values(dn_CSC,dn_TC,dn_C1,dn_C2)
dn_sum <- apply(dnValues_mtx,1,sum)
dn_df <- dnValues_mtx[-(which(dn_sum>1)),]
annotation_col = data.frame(CellType = factor(c("CSC", "TC","C1","C2")))
rownames(annotation_col) <- colnames(dnValues_mtx)
annotation_row = data.frame(GeneClass = factor(rep(c("Common", "CSC", "TC","C1","C2"),
c(length(which(dn_sum>1)),
length(which(dn_df[,1]==1)),
length(which(dn_df[,2]==1)),
length(which(dn_df[,3]==1)),
length(which(dn_df[,4]==1))))))
rownames(annotation_row) = rownames(dnValues_mtx)
ann_colors = list( CellType = c(CSC="#EE9572",TC="#B2DF8A",C1="#A6CEE3",C2="#9999FF"),
GeneClass = c(Common='grey',CSC="#EE9572",TC="#B2DF8A",C1="#A6CEE3",C2="#9999FF"))
p_DN <- pheatmap(dnValues_mtx,cluster_rows = F,cluster_cols = F,color = colorRampPalette(c("#EFEFEF", "white","#B15E72"))(100),
border_color ='transparent',show_rownames = F,legend=F,angle_col='45',
annotation_row = annotation_row,annotation_colors = ann_colors,annotation_legend = FALSE)
save_pheatmap_pdf(p_DN,'JingMA_NEW/res/compControl/ChildrenvsAdults/FIG/DEGHeatmap_DN.pdf',height = 4,width = 2)
saveRDS(dnValues_mtx,'JingMA_NEW/res/compControl/ChildrenvsAdults/FIG/DEGHeatmap_DNmtx.RDS')
up_sum <- apply(upValues_mtx,1,sum)
length(which(up_sum==4))
length(which(up_sum>1))
up_df <- upValues_mtx[-(which(up_sum>1)),]
length(which(up_df[,1]==1))
length(which(up_df[,2]==1))
length(which(up_df[,3]==1))
length(which(up_df[,4]==1))
dn_sum <- apply(dnValues_mtx,1,sum)
length(which(dn_sum==4))
length(which(dn_sum>1))
dn_df <- dnValues_mtx[-(which(dn_sum>1)),]
length(which(dn_df[,1]==1))
length(which(dn_df[,2]==1))
length(which(dn_df[,3]==1))
length(which(dn_df[,4]==1))
#########
### 2.2 挑选term组合画图
#########
term_down_BP <- c("cartilage development","chondrocyte differentiation","cartilage condensation","chondrocyte morphogenesis",
"extracellular matrix organization","collagen fibril organization",
"NAD metabolic process")
term_down_MF <- c("cartilage development","chondrocyte differentiation","cartilage condensation","chondrocyte morphogenesis",
"extracellular matrix organization","collagen fibril organization",
"NAD metabolic process")
library(xlsx)
CSC_DN <- read.xlsx('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/ClusterPro/FC1.5_adjP0.05/CSC_BP.xlsx',sheetName = 'DN')
CSC_DN <- CSC_DN[CSC_DN$p.adjust < 0.1,]
print(CSC_DN$Description)
index <- c(8,17,22,24,29,49)
pickCSC_DN <- CSC_DN[index,]
geneCSC_DN <- c()
for(i in 1:nrow(pickCSC_DN)){
geneCSC_DN <- c(geneCSC_DN,unlist(strsplit(pickCSC_DN[i,8],'/')))
}
geneCSC_DN <-unique(geneCSC_DN)
print(length(geneCSC_DN))
CSC_UP <- read.xlsx('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/ClusterPro/FC1.5_adjP0.05/CSC_BP.xlsx',sheetName = 'UP')
CSC_UP <- CSC_UP[CSC_UP$p.adjust < 0.1,]
print(CSC_UP$Description)
index <- c(12,14,27,30,35,89,123,145,153,154,193,217,302)
pickCSC_UP <- CSC_UP[index,]
geneCSC_UP <- c()
for(i in 1:nrow(pickCSC_UP)){
geneCSC_UP <- c(geneCSC_UP,unlist(strsplit(pickCSC_UP[i,8],'/')))
}
geneCSC_UP <-unique(geneCSC_UP)
print(length(geneCSC_UP))
####
Trans_DN <- read.xlsx('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/ClusterPro/FC1.5_adjP0.05/TC_BP.xlsx',sheetName = 'DN')
Trans_DN <- Trans_DN[Trans_DN$p.adjust < 0.1,]
print(Trans_DN$Description)
index <- c(11,13,18,27,29)
pickTrans_DN <- Trans_DN[index,]
geneTrans_DN <- c()
for(i in 1:nrow(pickTrans_DN)){
geneTrans_DN <- c(geneTrans_DN,unlist(strsplit(pickTrans_DN[i,8],'/')))
}
geneTrans_DN <-unique(geneTrans_DN)
print(length(geneTrans_DN))
Trans_UP <- read.xlsx('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/ClusterPro/FC1.5_adjP0.05/TC_BP.xlsx',sheetName = 'UP')
Trans_UP <- Trans_UP[Trans_UP$p.adjust < 0.1,]
print(Trans_UP$Description)
index <- c(12,28,73,118,123,130)
pickTrans_UP <- Trans_UP[index,]
geneTrans_UP <- c()
for(i in 1:nrow(pickTrans_UP)){
geneTrans_UP <- c(geneTrans_UP,unlist(strsplit(pickTrans_UP[i,8],'/')))
}
geneTrans_UP <-unique(geneTrans_UP)
print(length(geneTrans_UP))
####
Chond1_DN <- read.xlsx('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/ClusterPro/FC1.5_adjP0.05/C1_BP.xlsx',sheetName = 'DN')
Chond1_DN <- Chond1_DN[Chond1_DN$p.adjust < 0.1,]
print(Chond1_DN$Description)
index <- c(40)
pickChond1_DN <- Chond1_DN[index,]
geneChond1_DN<- c()
for(i in 1:nrow(pickChond1_DN)){
geneChond1_DN <- c(geneChond1_DN,unlist(strsplit(pickChond1_DN[i,8],'/')))
}
geneChond1_DN <-unique(geneChond1_DN)
print(length(geneChond1_DN))
Chond1_UP <- read.xlsx('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/ClusterPro/FC1.5_adjP0.05/C1_BP.xlsx',sheetName = 'UP')
Chond1_UP <- Chond1_UP[Chond1_UP$p.adjust < 0.1,]
print(Chond1_UP$Description)
index <- c(12,48,90,138,143,165,200)
pickChond1_UP <- Chond1_UP[index,]
geneChond1_UP <- c()
for(i in 1:nrow(pickChond1_UP)){
geneChond1_UP <- c(geneChond1_UP,unlist(strsplit(pickChond1_UP[i,8],'/')))
}
geneChond1_UP <-unique(geneChond1_UP)
print(length(geneChond1_UP))
####
Chond2_DN <- read.xlsx('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/ClusterPro/FC1.5_adjP0.05/C2_BP.xlsx',sheetName = 'DN')
Chond2_DN <- Chond2_DN[Chond2_DN$p.adjust < 0.1,]
print(Chond2_DN$Description)
index <- c(63)
pickChond2_DN <- Chond2_DN[index,]
geneChond2_DN<- c()
for(i in 1:nrow(pickChond2_DN)){
geneChond2_DN <- c(geneChond2_DN,unlist(strsplit(pickChond2_DN[i,8],'/')))
}
geneChond2_DN <-unique(geneChond2_DN)
print(length(geneChond2_DN))
Chond2_UP <- read.xlsx('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/ClusterPro/FC1.5_adjP0.05/C2_BP.xlsx',sheetName = 'UP')
Chond2_UP <- Chond2_UP[Chond2_UP$p.adjust < 0.1,]
print(Chond2_UP$Description)
index <- c(13,14,43,51,74,144,166,199,204)
pickChond2_UP <- Chond2_UP[index,]
geneChond2_UP <- c()
for(i in 1:nrow(pickChond2_UP)){
geneChond2_UP <- c(geneChond2_UP,unlist(strsplit(pickChond2_UP[i,8],'/')))
}
geneChond2_UP <-unique(geneChond2_UP)
print(length(geneChond2_UP))
bar_DN <- rbind(pickCSC_DN,pickTrans_DN,pickChond1_DN,pickChond2_DN)
bar_DN$CellType <- c(rep('CSC',nrow(pickCSC_DN)),rep('TC',nrow(pickTrans_DN)),
rep('C1',nrow(pickChond1_DN)),rep('C2',nrow(pickChond2_DN)))
bar_DN$CellType <- factor(bar_DN$CellType,levels = c('CSC','TC','C1','C2'))
bar_DN$Group <- 'Children'
bar_DN$log10Pval <- -log(bar_DN$p.adjust,10)
bar_UP <- rbind(pickCSC_UP,pickTrans_UP,pickChond1_UP,pickChond2_UP)
bar_UP$CellType <- c(rep('CSC',nrow(pickCSC_UP)),rep('TC',nrow(pickTrans_UP)),
rep('C1',nrow(pickChond1_UP)),rep('C2',nrow(pickChond2_UP)))
bar_UP$CellType <- factor(bar_UP$CellType,levels = c('CSC','TC','C1','C2'))
bar_UP$Group <- 'Adults'
bar_UP$log10Pval <- log(bar_UP$p.adjust,10)
bar_df <- rbind(bar_DN,bar_UP)
levels_DN=rev(c("cartilage development","chondrocyte differentiation","cartilage condensation","chondrocyte morphogenesis",
"extracellular matrix organization","collagen fibril organization",
"NAD metabolic process"))
setdiff(unique(bar_DN$Description),levels_DN)
levels_UP=rev(c("negative regulation of stem cell differentiation","cell cycle arrest",
"autophagy","aging","cellular senescence",
"response to oxidative stress","reactive oxygen species metabolic process","reactive oxygen species biosynthetic process",
"cell death in response to oxidative stress",
"DNA damage response, signal transduction by p53 class mediator",
"ERK1 and ERK2 cascade", 'p38MAPK cascade',
# "positive regulation of p38MAPK cascade",
# "response to interleukin-6","extrinsic apoptotic signaling pathway",
"intrinsic apoptotic signaling pathway"))
setdiff(unique(bar_UP$Description),levels_UP)
bar_df$Description <- factor(bar_df$Description,levels = rev(c(levels_UP,levels_DN)))
bar_df$Count <- as.numeric(bar_df$Count)
library(reshape2)
mat.plot <- bar_df[,c('Description','CellType','Group','log10Pval')]
mat.plot <- dcast(mat.plot,Description~CellType+Group)
mat.plot[is.na(mat.plot)] <- 0
rownames(mat.plot) <- mat.plot$Description
mat.plot <- mat.plot[,-1]
colNames <- c('CSC_Children','TC_Children','C1_Children','C2_Children','CSC_Adults','TC_Adults','C1_Adults','C2_Adults')
mat.plot <- dplyr::select(mat.plot,colNames)
# col annotation
annotation_col = data.frame(
CellType = factor(c(rep('CSC', 2),rep('TC', 2),rep('C1', 2),rep('C2', 2)),
levels = c('CSC', 'TC','C1', 'C2')),
Phase = factor(rep(c('Children', 'Adults'), 4), levels = c('Children', 'Adults')),
row.names = colNames
)
annotation_col = data.frame(
CellType = factor(rep(c('CSC', 'TC','C1', 'C2'), 2),
levels = c('CSC', 'TC','C1', 'C2')),
Phase = factor(rep(c('Children', 'Adults'), each=4), levels = c('Children', 'Adults')),
row.names = colNames
)
ann_colors = list(
CellType = c(CSC="#EE9572",TC="#B2DF8A",C1="#A6CEE3",C2="#9999FF"),
Phase = c(Children = "#6C6C6C", Adults = "#637FBF")
)
bk <- c(seq(-8,-0.1,by=0.01),seq(0,8,by=0.01))
plot.heatmap <- pheatmap::pheatmap(mat.plot,
cluster_rows = F, cluster_cols = F, scale = "none",
display_numbers = F,
annotation_col = annotation_col ,annotation_colors = ann_colors,
show_rownames = T, show_colnames = F, legend = T,
gaps_col = c(4),
color = c(colorRampPalette(colors = c("red","white"))(length(bk)/2),colorRampPalette(colors = c("white","blue"))(length(bk)/2)),
legend_breaks=seq(-8,8,2),
breaks=bk
)
ggsave('/home/yzj/JingMA_NEW/res/compControl/ChildrenvsAdults/FIG/Fig3C_pickHeatmap.pdf',plot.heatmap,width = 8,height = 5)
########################
#### 2.2 挑选基因画vlnplot
########################
library(ggpubr)
get_vlnplot <- function(gene){
pickEXP <- data.frame(cells=colnames(EXP),exp=as.numeric(EXP[rownames(EXP) ==gene,]),celltype=pbmc_C$celltype,phase=pbmc_C$Phase)
p <- ggplot(pickEXP, aes(x=celltype, y=exp,fill=phase)) +
geom_violin(trim=FALSE,color="white") +
geom_boxplot(width=0.2,position=position_dodge(0.9))+
scale_fill_manual(values = c("#6C6C6C", "#637FBF"))+
theme_bw()+
labs(title=gene)+
theme(axis.text.x=element_blank(),axis.ticks.x =element_blank(),
axis.text.y=element_text(size=12,colour="black"),axis.title.y=element_text(size = 12,colour="black"), axis.ticks.y =element_line(colour="black"),
legend.text=element_text(colour="black", size=12),legend.title=element_blank(),
panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
plot.title = element_text(hjust = 0.5,size = 12,face = 'bold.italic'))+
ylab("")+xlab("")+
facet_wrap(~celltype,ncol = 4,scales= "free_x")+
theme(strip.background = element_rect(color = "black", fill = "#8C90C6",size = 1.2),
strip.text.x = element_text(size = 10, color = "black",face = 'bold'),
panel.grid = element_blank(),panel.border = element_rect(color = 'black',size = 2))+
stat_compare_means(label = "p.signif",label.x=1.5)
return(p)
}
EXP <- as.data.frame(pbmc_C@assays$RNA@data)
pick_genes <- c('COL2A1','COL11A1','COL11A2','COL9A1','COL11A2','COL9A2','COL9A2','ELN','TIMP4',
'MATN3','VIT','CYTL1','PTX3','PTGS2','GPX3','SOD2','MGP','MMP3','CDKN1A','IL6')
pdf('JingMA_NEW/res/compControl/ChildrenvsAdults/pickGene_vlnplot.pdf',width = 6,height = 3)
for(i in 1:length(pick_genes)){
gene=pick_genes[i]
p <- get_vlnplot(gene)
print(p)
}
dev.off()
|
249e2219f8bfc09a219e2896df24bcffcbcb6ce1 | d5863e788438a0994e43455a52e9496ba6146a72 | /manipdata.R | fb551f9a4c48c6976d093ba018bab6b05bc67ca0 | [] | no_license | Mouzri/Reproducible_proj | 22c905dae8b78aeeca859e76177451ff49eed783 | 124355596eb3bca2eddee87e0fd19d47a15f4023 | refs/heads/master | 2020-09-21T16:28:44.190540 | 2019-11-29T11:50:21 | 2019-11-29T12:03:41 | 224,848,822 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,592 | r | manipdata.R | #calculating the mean per day
sumperday <- with(raw_data,tapply(steps,date, sum,na.rm=TRUE))
#histogram per day
sumperday <- with(raw_data,tapply(steps,date, sum,na.rm=TRUE))
hist(sumperday, col="gold", main = "sum per day", xlim = c(0,25000))
# calcul of the mean
mean(sumperday)
#calcul of the median
median(sumperday)
## What is the average daily activity pattern?
#removing missing values
omitted_NA_data <- na.omit(raw_data)
#using the by() function to split the data
int_mean <- by(simplify = FALSE,omitted_NA_data,INDICES = omitted_NA_data$interval,function(x){mean(x$steps)})
#reforming the data to a data frame
res <- do.call("rbind",int_mean)
nw_res <- data.frame(mean=res,interval=as.numeric(row.names(res)))
par(mar=c(2,2,2,2))
plot(nw_res$interval,nw_res$mean,type = "l",xlab = "Interval",ylab = "mean of steps",main = "average daily activity pattern")
#return interval with the max of mean
subset(nw_res,mean==max(mean),select = "interval")
#Calculate and report the total number of missing values in the dataset.
sum(is.na(raw_data$steps))
#Imputing the NA
# first we determise the intervals where steps is NA
na_interval <- raw_data[is.na(raw_data$steps),"interval"]
#we give index to na_interval of the match in the second vector, which are intervals in the nw_res data frame
index <- match(na_interval,nw_res$interval)
searched_mean <- nw_res[index,"mean"]
#fill the NAs values in the original data
raw_data[is.na(raw_data$steps),"steps"] <- searched_mean
#Creating a new data frame
new_df <- raw_data
head(new_df)
#Make a histogram of the total number of steps taken each day and Calculate and report the mean and median total
#number of steps taken per day. Do these values differ from the estimates from the first part of the assignment? What is the impact of imputing missing data on the estimates of the total daily number of steps?
splt_new_df <- by(new_df,new_df$date,function(x){sum(x$step)},simplify = FALSE)
nw_sumperday <- do.call("rbind",splt_new_df)
#plotting the histogram
hist(nw_sumperday,col="gold",main="total steps per day after imputing the NAs",xlab = "days")
#Calculate and report the mean and median total number of steps taken per day.
mean(tapply(new_df$steps, new_df$date, sum))
median(tapply(new_df$steps, new_df$date, sum))
#Create a new factor variable in the dataset with two levels - "weekday" and "weekend" indicating whether a given date is a weekday or weekend day.
ind <- match(raw_data$date,c("Saturday","Sunday"))
my_dy <- sapply(raw_data$date, function(x){if (weekdays(x)=="Saturday"|weekdays(x)=="Sunday"){
day_vect <- "weekend"
}
else {
day_vect <- "weekday"
}
day_vect
})
raw_data$day_type <- factor(my_dy)
#Make a panel plot containing a time series plot (i.e. type = "l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all weekday days or weekend days (y-axis). See the README file in the GitHub repository to see an example of what this plot should look like using simulated data.
#let's split the data first based on the interval and day_type
spl_dt <- aggregate(steps~interval+day_type,data=raw_data,FUN = mean, na.rm=TRUE)
#create the subsets
weekdaydt <- subset(spl_dt,day_type=="weekday" ,select = c("interval","steps"))
weekendt <- subset(spl_dt,day_type=="weekend",c(1,3))
par(mfrow=c(2,1),mar=c(3,3,3,4))
plot(weekdaydt$interval,weekdaydt$steps,type = 'l',col="darkblue",xlab = "Interval",ylab = "mean of the steps",main = "Weekdays")
plot(weekendt$interval,weekendt$steps,type = 'l',col="red",xlab = "Interval",ylab = "mean of steps",main = "Weekend") |
4f2ec017cf11d48d970d65a9fd53ae7e9a3f0c3e | 8f04d44d2393d300c247eb36ecb1dd6e377badbe | /R/url_non_dominant_arm_data.R | 9f8cf6b505a350a97d68b56407fd84df149a4143 | [] | no_license | wathenmj/marpalSVD | 4234df6ce8d15e68a041f993ff9db5ef5c2213fc | ebd98d011ff0d4d29bfb67a8135f4729e54903c3 | refs/heads/master | 2021-03-22T03:33:40.006560 | 2018-08-06T21:31:28 | 2018-08-06T21:31:28 | 89,888,745 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,080 | r | url_non_dominant_arm_data.R | # url_non_dominant_arm_data
fmsURL<-"http://www.stat-gen.org/book.e1/data/FMS_data.txt"
fms<-read.delim(file=fmsURL, header=TRUE, sep="\t")
attach(fms)
write.table(fms,"fms",
quote = F,
row.names = F,
col.names = T)
# see page 21 of Applied Statistical Genetics with R. Andrea S. Foulkes
colnames(fms)
GenoCount <- summary(actn3_rs540874)
GenoCount
NumbObs <- sum(!is.na(actn3_rs540874))
NumbObs
# genotype frequencies for AA, GA, GG, and NA's are given respectively
GenoFreq <- as.vector(GenoCount/NumbObs)
GenoFreq
# frequencies of A and G alleles are calulated as follows
FreqA <- (2*GenoFreq[1] + GenoFreq[2])/2
FreqA
FreqG <- (2*GenoFreq[3] + GenoFreq[2])/2
FreqG
# so A is the minor Allele with a frequency of 0.431
library(genetics); library(coin)
# Cochran-Armitage (C-A) trend test p.42
Geno <- genotype(actn3_rs540874, sep = "")
summary(Geno)
Geno <- esr1_rs1042717
Trait <- as.numeric(pre.BMI>25)
GenoOrd <- ordered(Geno)
independence_test(Trait~GenoOrd, teststat ="quad",
scores=list(GenoOrd=c(0,1,2)))
|
534b85b6e278f3a999c2651641abe7be941305bb | 02f053ce70b065724d4a02619fb402adcc0ec997 | /analysis/boot/boot924.R | 3c218c700cfc156bd0ed7181f812be3e17d574c6 | [] | no_license | patperry/interaction-proc | 27950482929240bba55c7d0f2f8c5235d770feea | cf8dfd6b5e1d0684bc1e67e012bf8b8a3e2225a4 | refs/heads/master | 2021-01-01T06:11:47.125853 | 2012-12-04T20:01:42 | 2012-12-04T20:01:42 | 673,564 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 3,755 | r | boot924.R | seed <- 924
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225630.84087658627
df.resid <- 35402
df <- 165
coefs <- c(6.504440233216872, 5.979280224987526, 5.857389339388361, 5.294899748104516, 4.99509057015, 4.887164375867232, 4.8548582847207165, 4.6736544184368025, 4.392971521222499, 4.307160873468095, 4.3219929834362025, 4.172239075316716, 4.02880491412071, 3.99111364095116, 3.783470535293636, 3.5530714873712594, 3.2797708055668027, 2.953163916505277, 2.558150365270266, 2.1310900593004702, 1.6799266307634695, 0.8994123842407146, 0.9297313005357085, 0.2749783404873936, 0.2823919136696772, -1.0282565596258697, -0.14386934605608323, 0.901518000693679, 1.0516171422839857, -2.0244405878563647, -2.368110334257145, -2.527275217181585, -0.3894914298973378, 0.7806581564137982, 1.2105482407065897, -1.103694185008514, -0.3146736654802764, -1.1838570731150826, -9.497332033515107e-2, -0.24115424231597493, 0.9169831637438138, 0.8171259208588406, -0.8718884820306466, -1.7233624136670285, -0.9918682851288608, -0.8911701085773486, -0.5765337494967294, 0.36471779998916537, -0.10041064265439628, -0.8323639923080093, -0.19255187459779102, 0.9890271242363629, -2.479120262527345, 1.6204130272831696, 0.7416233249815383, 1.0947569350935724, -2.0960863536398997, -0.17463743208625565, -0.3419715501402861, 1.1149624206185982, 0.8626716865285223, 0.7804173854550487, -1.7886196289919756, -1.3232215864886094, -0.9374103615814988, 0.16763938357254682, 0.697436689453054, -0.5540091919116011, -1.0033310149473502, -0.46913984538240144, -1.3261291812874973, -0.5056505843235146, 0.691184896218048, 1.0787606765393623, 0.7047523208339482, -0.6677092317884535, -1.035994323171949, -1.600933717366849, 5.225215008357486e-2, 0.6657451468024832, 1.1662343284442094, -7.591781605074852e-4, 6.415319364090956e-2, -1.9966765673543636, -0.25669116366274175, 0.5797018539643797, 1.1881312606053074, 0.17520738402179556, 0.9191274759265573, -1.3433533032894451, 0.47675186322114865, 0.7678920619599768, 0.7778435651437129, 0.4385979929294574, 3.195145816910965e-2, 1.2807212183503016, -0.6065737223584898, 0.6845456241124895, 5.9858261649406455e-2, -0.23173757627513328, 0.4439362203401004, -0.28222600478895177, 0.8512723711550085, -0.18926593964094704, 0.7703856776478271, 0.8204898032848603, 1.0768526424853406, -0.6104262334651188, -5.6028584520540436e-2, -1.6595744809507398, 0.3924771921841039, 0.5713815941585575, 1.5650627437017053, -0.3856177469880528, -0.10884717792489115, -0.7241887113999199, 0.7048752305470668, -0.23802754557866246, 0.38243294672585343, 0.6781767427795291, -0.5067270794730939, -0.5123066643927137, -1.022838195628307, -0.404152381080632, 0.42412543605431513, 0.9416258182610856, -8.538296397481604e-2, 0.9688135431417813, -0.6986351254937591, -0.38927335145156067, 0.21666372943360593, 0.9385183207964739, 0.801538319976856, 0.6059781629348485, -7.625351435114069e-3, 1.026697563731493, -0.3169550832823232, 1.053360195711055, 0.7413131150761798, 1.0029104285450217, 0.7611613420803116, -0.6639754526900341, -1.4093614087829385, 0.7504982481234941, 0.32966468691998074, 0.6090131894625713, -0.24072186675927917, -0.42351473347933366, -2.082185975627921, 1.2706804015932416, 0.15694469195146996, 1.208509983848645, -0.16106592857031343, -0.11217542294217203, -0.2583376219896294, -1.1861255209934645, -1.126346645163544, 0.8779418779237168, 1.1459805485308643, -0.3723419154137388, 1.5308138241404614, -0.23427338699017833, -0.28336524773447785, 9.473374409504813e-3, 1.1599779938143304)
|
891d5e620eaeadb35a494e381c6b39e984f1e19f | 75ce364ad9f9946cda3e437ba094103fd6b55f6f | /spammer.R | 6ea926d43f72950ab08dee6caa89c7423a8fbff9 | [] | no_license | LiShengHZ/R-spam | a3cdab41f1069ac2e7813affbe58fd886f6d03ed | c492649afeec618f8b3dee4123ee056201108d5c | refs/heads/master | 2021-01-15T13:33:57.207000 | 2014-05-09T13:20:09 | 2014-05-09T13:20:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,985 | r | spammer.R | library(tm)
library(ggplot2)
# train classifier
spam.path <- "data/spam/"
spam2.path <- "data/spam_2/"
easyham.path <- "data/easy_ham/"
easyham2.path <- "data/easy_ham_2/"
hardham.path <- "data/hard_ham/"
hardham2.path <- "data/hard_ham_2/"
get.msg <- function(path) {
con <- file(path, open="rt", encoding="latin1")
text <- readLines(con)
# email body beginds after first full line break
from <- which(text=="")[1]+1
to <- length(text)
msg <- text[seq(from, to, 1)]
close(con)
return(paste(msg, collapse="\n"))
}
# create one vector w/ all text content
spam.docs <- dir(spam.path)
# ignore some dataset files
spam.docs <- spam.docs[which(spam.docs!="cmds")]
# will create one huge vector and set filenames as names
all.spam <- sapply(spam.docs, function(p) get.msg(paste(spam.path, p, sep="")))
all.spam <- all.spam[seq(1, 500, 1)]
# get Term document matrix (TDM) [n terms; m docs]
get.tdm <- function(doc.vec) {
doc.corpus <- Corpus(VectorSource(doc.vec))
control <- list(stopwords=TRUE, removePunctuation=TRUE, removeNumbers=TRUE, minDocFreq=2)
doc.tdm <- TermDocumentMatrix(doc.corpus, control)
return(doc.tdm)
}
spam.tdm <- get.tdm(all.spam)
# now begin build classifier
# 1. create training data from spam
# construct data frame that contains all observed probabilities for
# each term (given that we now its spam)
spam.matrix <- as.matrix(spam.tdm)
spam.counts <- rowSums(spam.matrix)
spam.df <- data.frame(cbind(names(spam.counts), as.numeric(spam.counts)), stringsAsFactors = FALSE)
names(spam.df) <- c("term", "frequency")
spam.df$frequency <- as.numeric(spam.df$frequency)
# what is the percentage of documents that this term does appear
# if I take any spam term, in how many percent of the documents does
# this term appear in.
# in how many docs does this term appear (percent of docs)
spam.occurrence <- sapply(1:nrow(spam.matrix), function(i) {
length(which(spam.matrix[i,] > 0)) / ncol(spam.matrix)
})
# if I take any spam term, how large is the percentage of it being
# the current term
# how often does this term appear (percent of all the words)
spam.density <- spam.df$frequency/sum(spam.df$frequency)
# add new vectors to data frame
spam.df <- transform(spam.df, density=spam.density, occurrence=spam.occurrence)
# second approach is better, because some chars like tr appear
# often (html tags), they would destroy the filter weighting, so
# therefore we use occurrence instead of density
# now balance classifier with ham messages
# 2. create training data from ham
easyham.docs <- dir(easyham.path)
easyham.docs <- easyham.docs[which(easyham.docs!="cmds")]
all.easyham <- sapply(easyham.docs, function(p) get.msg(paste(easyham.path, p, sep="")))
all.easyham <- all.easyham[seq(1, 500, 1)]
easyham.tdm <- get.tdm(all.easyham)
easyham.matrix <- as.matrix(easyham.tdm)
easyham.counts <- rowSums(easyham.matrix)
easyham.df <- data.frame(cbind(names(easyham.counts), as.numeric(easyham.counts)), stringsAsFactors = FALSE)
names(easyham.df) <- c("term", "frequency")
easyham.df$frequency <- as.numeric(easyham.df$frequency)
easyham.occurrence <- sapply(1:nrow(easyham.matrix), function(i) {
length(which(easyham.matrix[i,] > 0)) / ncol(easyham.matrix)
})
easyham.density <- easyham.df$frequency/sum(easyham.df$frequency)
easyham.df <- transform(easyham.df, density=easyham.density, occurrence=easyham.occurrence)
# print sorted by strongest indicators
# print(head(spam.df[with(spam.df, order(-occurrence)),], 20))
# print(head(easyham.df[with(easyham.df, order(-occurrence)),], 20))
# maybe there's something wrong with the data so far
classify.email <- function(path, training.df, prior=0.5, c=1e-6) {
msg <- get.msg(path)
msg.tdm <- get.tdm(msg)
msg.freq <- rowSums(as.matrix(msg.tdm))
# find intersections of words
msg.match <- intersect(names(msg.freq), training.df$term)
if(length(msg.match) < 1) {
return(prior*c^(length(msg.freq)))
} else {
match.probs <- training.df$occurrence[match(msg.match, training.df$term)]
return (prior * prod(match.probs) * c^(length(msg.freq)-length(msg.match)))
}
}
# check classifier
hardham.docs <- dir(hardham.path)
hardham.docs <- hardham.docs[which(hardham.docs != "cmd")]
hardham.spamtest <- sapply(hardham.docs, function(p) classify.email(paste(hardham.path, p, sep=""), training.df=spam.df))
hardham.hamtest <- sapply(hardham.docs, function (p) classify.email(paste(hardham.path, p ,sep=""), training.df=easyham.df))
hardham.res <- ifelse(hardham.spamtest > hardham.hamtest, TRUE, FALSE)
print(summary(hardham.res))
# now test the classifier against all messages
spam.classifier <- function (path) {
pr.spam <- classify.email(path, spam.df)
pr.ham <- classify.email(path, easyham.df)
return (c(pr.spam, pr.ham, ifelse(pr.spam > pr.ham, 1, 0)))
}
#print("a")
#ra <- spam.classifier(easyham2.path)
#print("b")
#rb <- spam.classifier(hardham2.path)
#print("c")
#rc <- spam.classifier(spam2.path)
# print(summary(ra))
# print(summary(rb))
# print(summary(rc)) |
0889ad726494d59cf458fb7b98be778a26859b63 | 6bce4504bc7cc7ea5bff83c6c5b60aea8a39187e | /man/freewaySpeedMap.Rd | 4ffc592947a118acf0eac344a494008c3ef12003 | [] | no_license | bpb824/portalr | 38d10bc4424f629338c1fc03e1759bd444e7fd0b | 26d574a1febfb6c472045d9e33095e734fd06c34 | refs/heads/master | 2020-05-30T11:41:37.127217 | 2015-10-07T22:23:34 | 2015-10-07T22:23:34 | 41,750,697 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 978 | rd | freewaySpeedMap.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/plotting.R
\name{freewaySpeedMap}
\alias{freewaySpeedMap}
\title{freewaySpeedMap}
\usage{
freewaySpeedMap(con, corridorID, startDate, endDate, weekdays = TRUE,
outputPng = NULL)
}
\arguments{
\item{con}{database connection PORTAL PostgreSQL database}
\item{corridorID}{ID number of the corridor to plot. See 'corrdidors' table to select ID for plotting.}
\item{startDate}{Start date of data to query (YYYY-MM-DD format)}
\item{endDate}{End date of data to query (YYYY-MM-DD format)}
\item{weekdays}{Boolean indicating whether to subset data to weekdays. Defaults to TRUE.}
\item{outputPng}{.png file path of output plot. Default is NULL; if NULL plots to current device (i.e. RStudio plot device).}
}
\value{
None
}
\description{
Produces a speed-based heatmap (AKA brainscan plot) for a corridor with PORTAL's freeway data system. See Freeway Speed Map vignette for example usage.
}
|
4ea7c6a6f7be2e563acc406a367d2b112bcf563a | 0dd7ba5c65f37a4674f6c5f57620af3cb4a28e81 | /apps/CVShiny/refresh.R | a931fcafc32620aa72cc9c1b2e24a78882c825a5 | [] | no_license | uwban/cvapps | 2a7d86096c579392b47fb2e57270a7bad4fcb17d | efdf0f702c1ee53ccb7db4d4a1a30d13f28cc939 | refs/heads/master | 2020-04-16T16:15:43.372554 | 2018-07-05T19:13:34 | 2018-07-05T19:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,809 | r | refresh.R | library(data.table)
library(magrittr)
library(pool)
library(RPostgreSQL)
library (feather)
cvponl_write <- dbPool(drv = RPostgreSQL::PostgreSQL(),
host = "shiny.hc.local",
dbname = "cvponl",
user = "",
password = "")
#make sure this runs after a database and/or meddra update
write_feather_files <- function() {
max_date <- meddra_and_date %>%
`[[`(1)
max_meddra <- meddra_and_date %>%
`[[`(2)
cv_reports <- tbl(cvponl_write, in_schema("current2", "reports_table"))
cv_report_drug <- tbl(cvponl_write, in_schema("current2", "report_drug" )) #%>%
#select(report_id, drug_product_id, drugname, druginvolv_eng, indication_name_eng)
cv_drug_product_ingredients <- tbl(cvponl_write, in_schema("current2", "drug_product_ingredients")) #%>%
#select(active_ingredient_name, drugname, drug_product_id)
cv_reactions <- tbl(cvponl_write, in_schema("meddra", gsub('\\.', '_', max_meddra)))
cv_reports_temp <- cv_reports %>%
select(report_id, seriousness_eng, death)
cv_report_drug %<>% left_join(cv_reports_temp, "report_id" = "report_id")
cv_reactions %<>% left_join(cv_reports_temp, "report_id" = "report_id")
#following Queries are used to generate autocomplete lists
topbrands <- cv_report_drug %>%
distinct(drugname) %>%
as.data.frame() %>%
`[[`(1) %>%
sort() %>%
`[`(-c(1,2))%>% #dropping +ARTHRI-PLUS\u0099 which is problematic
as.data.frame()
topings_cv <- cv_drug_product_ingredients %>%
distinct(active_ingredient_name) %>%
as.data.frame() %>%
`[[`(1) %>%
sort() %>%
as.data.frame()
smq_choices <- cv_reactions %>%
distinct(smq_name) %>%
as.data.frame() %>%
filter(!is.na(smq_name)) %>%
`[[`(1) %>%
sort()
pt_choices <- cv_reactions %>%
distinct(pt_name_eng) %>%
as.data.frame() %>%
`[[`(1) %>%
c(smq_choices) %>%
sort() %>%
as.data.frame()
smq_choices %<>% as.data.frame()
soc_choices <- cv_reactions %>%
distinct(soc_name_eng) %>%
as.data.frame() %>%
`[[`(1) %>%
sort() %>%
as.data.frame()
directory <- getwd()
topbrands_path <- paste0(directory, '/apps/CVShiny/feather_files/topbrands.feather')
topings_cv_path <- paste0(directory, '/apps/CVShiny/feather_files/topings_cv.feather')
smq_choices_path <- paste0(directory, '/apps/CVShiny/feather_files/smq_choices.feather')
pt_choices_path <- paste0(directory, '/apps/CVShiny/feather_files/pt_choices.feather')
soc_choices_path <- paste0(directory, '/apps/CVShiny/feather_files/soc_choices.feather')
dir.create(file.path(directory, 'apps/CVShiny/feather_files'))
file.create(topbrands_path)
file.create(topings_cv_path)
file.create(smq_choices_path)
file.create(pt_choices_path)
file.create(soc_choices_path)
write_feather(topbrands, topbrands_path)
write_feather(topings_cv, topings_cv_path)
write_feather(smq_choices, smq_choices_path)
write_feather(pt_choices, pt_choices_path)
write_feather(soc_choices, soc_choices_path)
}
#categorizes into age groups, can't use what is in the reports table as is because it has a lot of NULL values
#INPUT: cv_reports: table
age_group_clean <- function(cv_reports){
cv_reports %<>%
mutate(#age_group_clean = NA,
age_group_clean = ifelse(is.na(age_y), "Unknown",
ifelse(age_y <= 25/365, "Neonate",
ifelse(age_y > 25/365 & age_y < 1, "Infant",
ifelse(age_y >= 1 & age_y < 13, "Child",
ifelse(age_y >= 13 & age_y < 18, "Adolescent",
ifelse(age_y >= 18 & age_y <= 65, "Adult",
ifelse(age_y > 65, "Elderly", age_group_eng ))))))))
}
#get the file name of most recent meddra folder. Should be in the form /home/shared/MedDRA/meddra_20_1_english
#parses the version number v.20.1 from the filename
#RETURN: c(meddra_version, meddra_path)
meddra_parse <- function(){
meddra_file <- max(list.files(path='/home/shared/MedDRA'))
meddra_name <- meddra_file %>%
gsub('meddra', 'v', .) %>%
gsub('_english', '', .)
meddra_version <- meddra_name %>% gsub('_', '.', .)
#finds the maximum file in this list and uses it
meddra_path <- paste0('/home/shared/MedDRA/', meddra_file)
return(c(meddra_version, meddra_file, meddra_path, meddra_name))
}
#creates new meddra into the new schema that was made
#INPUT: meddra_list: list of three objects where the first is the version and the second is the file and third is path
meddra_make <- function(meddra_list, con){
dbGetQuery(con, "CREATE SCHEMA IF NOT EXISTS meddra")
# get tables from postgresql db. current is the schema used, use format: schema.tablename to access tables
cv_reports <- dbGetQuery(con, "SELECT * FROM remote.reports")
#as per specifications in dist_file_format_20_1.pdf (tablename: filename), Select only columns necessary
#meddra_hlt_pref_comp: hlt_pt.asc
meddra_hlt_pref_comp <- fread(paste0(meddra_list[3], '/MedAscii/hlt_pt.asc'), sep = '$') %>%
select('V1','V2') %>%
plyr::rename(c('V1' = 'hlt_code', 'V2' = 'pt_code'))
meddra_hlt_pref_term <- fread(paste0(meddra_list[3], '/MedAscii/hlt.asc'), sep = '$') %>%
select('V1','V2') %>%
plyr::rename(c('V1' = 'hlt_code', 'V2' = 'hlt_name'))
#meddra_pref_term: pt.asc
meddra_pref_term <- fread(paste0(meddra_list[3], '/MedAscii/pt.asc'), sep = '$') %>%
select('V1','V4') %>%
plyr::rename(c('V1' = 'pt_code', 'V4' = 'pt_soc_code'))
#meddra_smq_content: smq_content.asc TAKE EXTRA CARE WHEN JOINING SMQ_CONTENT TO OTHER THINGS
meddra_smq_content <- fread(paste0(meddra_list[3], '/MedAscii/smq_content.asc'), sep = '$') %>%
select('V1', 'V2') %>%
plyr::rename(c('V1' = 'smq_code', 'V2' = 'term_code'))
#meddra_smq_list: smq_list.asc
meddra_smq_list <- fread(paste0(meddra_list[3], '/MedAscii/smq_list.asc'), sep = '$') %>%
select('V1', 'V2') %>%
plyr::rename(c('V1' = 'smq_code', 'V2' = 'smq_name'))
#map hlt_name and smq_name to pt_soc_code which we can join in reactions table by: soc_code = pt_soc_code
final_table <- left_join(meddra_hlt_pref_term, meddra_hlt_pref_comp, by = "hlt_code") %>%
left_join(meddra_pref_term, by = "pt_code") %>%
left_join(meddra_smq_content, by = c("pt_code" = "term_code")) %>%
left_join(meddra_smq_list, by = "smq_code")
#get table to with soc_code to join with final_table (complete map)
reactions_soc <- dbGetQuery(cvponl_write, "SELECT reaction_id, report_id, pt_code, pt_name_eng, pt_name_fr, soc_code, soc_name_fr, soc_name_eng FROM remote.reactions") %>%
left_join(final_table, na_matches = 'never', by = "pt_code")
#upload table (recently changed from reactions_soc to final_table)
dbWriteTable(cvponl_write, c("meddra", meddra_list[4]), final_table, overwrite = FALSE, temporary = FALSE, row.names = FALSE)
#create indices for values used later: this might not be a complete list
dbGetQuery(con, paste0("CREATE INDEX ON meddra.", meddra_list[4], " (report_id)"))
dbGetQuery(con, paste0("CREATE INDEX ON meddra.", meddra_list[4], " (smq_name)"))
dbGetQuery(con, paste0("CREATE INDEX ON meddra.", meddra_list[4], " (pt_name_eng)"))
dbGetQuery(con, paste0("CREATE INDEX ON meddra.", meddra_list[4], " (soc_name_eng)"))
}
#updates database table with the maximum date and current meddra version
#INPUT: max_date; max date of a report in the remote table
# : con; a connection/pool
date_update <- function(max_date, con){
schema_name <- paste0("refresh_", gsub("-", "_", toString(max_date)))
meddra_version <- meddra_parse() %>%
`[`(1)
dbGetQuery(con, "CREATE SCHEMA IF NOT EXISTS date_refresh")
history_table <- data.frame(datintreceivede=max_date,
schema=schema_name,
meddra_version=meddra_version,
stringsAsFactors = FALSE)
dbWriteTable(con, c("date_refresh", "history"),
history_table, overwrite = FALSE, temporary = FALSE, row.names = FALSE, append=TRUE)
return(schema_name)
}
#get the most recent date of a report published in remote schema, used in check function for reactiveTimer
dateCheck <- function() {
#get the most recent date of a report published in remote schema
remote_date <- dbGetQuery(cvponl_pool, "SELECT * FROM remote.reports") %>%
dplyr::summarize(max_date = max(datintreceived)) %>%
`[[`(1)
current_date <- dbGetQuery(cvponl_pool, "SELECT * FROM date_refresh.history") %>%
dplyr::summarize(max_date = max(datintreceived)) %>%
`[[`(1)
if (current_date >= remote_date){
return(FALSE)
}
else{
return(TRUE)
}
}
#useful for development
close_all_con <- function() {
all_cons <- dbListConnections(RPostgreSQL::PostgreSQL())
for(con in all_cons)
+ dbDisconnect(con)
}
#could break this down into smaller functions, but it only has one use case
#this function is the main function that calls if the check function fails (I GUESS), need to move the if statements
#therefore it is only called if date in remote has changed! Calling refresh() should update
refresh <- function() {
#TODO: getting date from here would be the fastest way to find out if the current schema is out of date
#get the date from the refresh tracking schema
current_date <- dbGetQuery(cvponl_write, "SELECT * FROM date_refresh.history") %>%
dplyr::summarize(max_date = max(datintreceived)) %>%
`[[`(1)
#get the most recent date of a report published in remote schema
remote_date <- dbGetQuery(cvponl_write, "SELECT * FROM remote.reports") %>%
dplyr::summarize(max_date = max(datintreceived)) %>%
`[[`(1)
#add indexes queries to this list for meddra and for current
index_list <- c()
#if there has been an update to remote schema
if(current_date != remote_date){
schema_new <- date_update(current_date, cvponl_write)
dbGetQuery(cvponl_write, paste0("ALTER SCHEMA current2 RENAME TO ", schema_new))
schema_name <- "current2"
#get a list of all tables from remote schema to be copied
remote_table_list <- dbGetQuery(cvponl_write, "SELECT DISTINCT table_name
FROM information_schema.tables WHERE table_schema = 'remote'") %>%
`[[`(1)
dbGetQuery(cvponl_write, paste0("CREATE SCHEMA IF NOT EXISTS", schema_name))
query_list <- lapply(remote_table_list, function(x) paste0("CREATE TABLE ", schema_name, ".", x, " AS SELECT * FROM remote.", x))
#applies each query
lapply(query_list, dbGetQuery, con=cvponl_write)
#Edit reports table
reports <- dbGetQuery(cvponl_write, "SELECT * FROM remote.reports") %>%
#repoorts <- reports %>% mutate(milli_time = as.integer(as.POSIXct(datintreceived))*1000)
updated_reports <- age_group_clean(reports)
#add the age_group_clean column to the reports table, this is a work around and should be done upstream to save time, but for now this works
#this means that there is an extra table called reports_table within the schema at the moment, ideally reports would just have an extra column
dbWriteTable(cvponl_write, c(schema_name, "reports_table"), value = updated_reports, append = FALSE, row.names = FALSE)
dbGetQuery(cvponl_write, paste0("ALTER TABLE ", schema_name, ".reports_table ALTER COLUMN datintreceived TYPE date"))
#get all column names for each table that is used for creating indices
index_list <- c(index_list, dbGetQuery(cvponl_write, paste0("SELECT DISTINCT column_name
FROM information_schema.columns WHERE table_schema = '", schema_name, "' AND table_name = 'reports_table'")) %>%
`[[`(1) %>%
lapply(function(x) paste0('CREATE INDEX ON ', schema_name, '.reports_table', ' (', x, ')')))
index_list <- c(index_list, dbGetQuery(cvponl_write, paste0("SELECT DISTINCT column_name
FROM information_schema.columns WHERE table_schema = '", schema_name, "' AND table_name = 'report_drug'")) %>%
`[[`(1) %>%
lapply(function(x) paste0('CREATE INDEX ON ', schema_name, '.report_drug', ' (', x, ')')))
index_list <- c(index_list, dbGetQuery(cvponl_write, paste0("SELECT DISTINCT column_name
FROM information_schema.columns WHERE table_schema = '", schema_name, "' AND table_name = 'drug_product_ingredients'")) %>%
`[[`(1) %>%
lapply(function(x) paste0('CREATE INDEX ON ', schema_name, '.drug_product_ingredients', ' (', x, ')')))
}
current_meddra <- dbGetQuery(cvponl_write, "SELECT * FROM date_refresh.history") %>%
dplyr::summarize(max_med = max(meddra_version)) %>%
`[[`(1)
meddra <- meddra_parse()
most_recent_meddra <- meddra[1]
if (most_recent_meddra > current_meddra) {
meddra_make(meddra, cvponl_write)
index_list <- c(index_list, dbGetQuery(cvponl_write, paste0("SELECT DISTINCT column_name
FROM information_schema.columns WHERE table_schema = 'meddra' AND table_name = '",meddra[4],"'")) %>%
`[[`(1) %>%
lapply(function(x) paste0('CREATE INDEX ON meddra.', meddra[4], ' (', x, ')')))
}
if (!is.null(index_list)){
#create indices on all the columns (overkill, but whatever)
lapply(index_list, dbGetQuery, con=cvponl_write)
}
#finish up by creating autocomplete lists
write_feather_files()
}
close_all_con <- function() {
all_cons <- dbListConnections(RPostgreSQL::PostgreSQL())
for(con in all_cons)
+ dbDisconnect(con)
}
refresh()
|
e2c0df639ccc1b918351b3855b3d6a4e71947455 | 785c23c1f961e40a5c4b5c168ed5b43e02806d0b | /plot1.R | 95cc87ba59713a62b0476f1900c33f5f88e56d02 | [] | no_license | jhuno137/ExData_Plotting1 | 4df0539c29e5071828a20739938322d74a09e26b | 59a6a6e6f1eaf1bf2d5a06c674e8c443637fc83d | refs/heads/master | 2020-12-29T18:48:00.227285 | 2016-05-14T14:35:00 | 2016-05-14T14:35:00 | 58,776,675 | 0 | 0 | null | 2016-05-13T22:41:51 | 2016-05-13T22:41:51 | null | UTF-8 | R | false | false | 1,382 | r | plot1.R | # Author : Antonio Camacho
# Dataset : Electric power consumption
# File : https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
#
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
# In order to get the line number for the fist row regarding 2007-02-01, the
# following bash command has been used:
# $ grep -n "^1/2/2007.*" household_power_consumption.txt | head -1
# 66638:1/2/2007;00:00:00;0.326;0.128;243.150;1.400;0.000;0.000;0.000
# Similarly, for the last line regarding 2007-02-02:
# grep -n "^2/2/2007.*" rdir/data/household_power_consumption.txt | tail -1
# 69517:2/2/2007;23:59:00;3.680;0.224;240.370;15.200;0.000;2.000;18.000
# Therefore, the number of rows is 69517 - 66638 + 1 = 2880 which is the number of
# minutes in two days (60*24*2)
# Read data
hpc <- read.table(
"data/household_power_consumption.txt",
sep=";",
na.strings = "?",
skip = 66637, # 66638 - 1 (starting row)
nrows = 2880) # 60*24*2
names <- read.table(
"data/household_power_consumption.txt",
header = FALSE,
sep=";",
stringsAsFactors = FALSE,
nrows = 1)
names(hpc) <- tolower(unlist(names))
png(
filename = "./plot1.png",
width = 480,
height = 480,
units = "px")
hist(hpc$global_active_power,col="red",xlab = "Global Active Power (kilowatts)",main = "Global Active Power")
dev.off()
|
ba1cc5696d749ba238b5dfa6dc17c140232eaa87 | ef499e12563a76de0046fea7b0160207758d7f32 | /covid_calcs_oct5_week.R | 642b3c7668a32f8aade136b38b59efc944979ed4 | [] | no_license | benwansell/COVID-activity | 8af5adf8d652d4484f36adb3c06968275b97d585 | 4deec13ff142ec724bee00e820f22ca4273525e7 | refs/heads/master | 2023-03-02T07:04:33.958364 | 2021-02-04T20:26:33 | 2021-02-04T20:26:33 | 255,839,707 | 2 | 1 | null | 2020-04-15T15:07:49 | 2020-04-15T07:33:17 | R | UTF-8 | R | false | false | 3,881 | r | covid_calcs_oct5_week.R | # Use Government API to download COVID case data
remotes::install_github("publichealthengland/coronavirus-dashboard-api-R-sdk")
# Use https://coronavirus.data.gov.uk/developers-guide#sdks
library(tidyverse)
library(ukcovid19)
cases_and_deaths = list(
date = "date",
areaName = "areaName",
areaCode = "areaCode",
newCasesBySpecimenDate = "newCasesBySpecimenDate",
cumCasesBySpecimenDate = "cumCasesBySpecimenDate",
cumCasesBySpecimenDateRate = "cumCasesBySpecimenDateRate",
newDeaths28DaysByPublishDate = "newDeaths28DaysByPublishDate",
cumDeaths28DaysByPublishDate = "cumDeaths28DaysByPublishDate",
cumDeaths28DaysByPublishDateRate = "cumDeaths28DaysByPublishDateRate"
)
cov_filters_oct2 <- c("areaType=ltla", "date=2020-10-02")
uk_covid_oct2 <- get_data(
filters = cov_filters_oct2,
structure = cases_and_deaths
)
cov_filters_oct3 <- c("areaType=ltla", "date=2020-10-03")
uk_covid_oct3 <- get_data(
filters = cov_filters_oct3,
structure = cases_and_deaths
)
cov_filters_oct4 <- c("areaType=ltla", "date=2020-10-04")
uk_covid_oct4 <- get_data(
filters = cov_filters_oct4,
structure = cases_and_deaths
)
cov_filters_oct5 <- c("areaType=ltla", "date=2020-10-05")
uk_covid_oct5 <- get_data(
filters = cov_filters_oct5,
structure = cases_and_deaths
)
cov_filters_sep24 <- c("areaType=ltla", "date=2020-09-24")
uk_covid_sep24 <- get_data(
filters = cov_filters_sep24,
structure = cases_and_deaths
)
cov_filters_sep25 <- c("areaType=ltla", "date=2020-09-25")
uk_covid_sep25 <- get_data(
filters = cov_filters_sep25,
structure = cases_and_deaths
)
cov_filters_sep26 <- c("areaType=ltla", "date=2020-09-26")
uk_covid_sep26 <- get_data(
filters = cov_filters_sep26,
structure = cases_and_deaths
)
cov_filters_sep27 <- c("areaType=ltla", "date=2020-09-27")
uk_covid_sep27 <- get_data(
filters = cov_filters_sep27,
structure = cases_and_deaths
)
cov_filters_sep28 <- c("areaType=ltla", "date=2020-09-28")
uk_covid_sep28 <- get_data(
filters = cov_filters_sep28,
structure = cases_and_deaths
)
cov_filters_sep29 <- c("areaType=ltla", "date=2020-09-29")
uk_covid_sep29 <- get_data(
filters = cov_filters_sep29,
structure = cases_and_deaths
)
cov_filters_sep30 <- c("areaType=ltla", "date=2020-09-30")
uk_covid_sep30 <- get_data(
filters = cov_filters_sep30,
structure = cases_and_deaths
)
cov_filters_oct1 <- c("areaType=ltla", "date=2020-10-01")
uk_covid_oct1 <- get_data(
filters = cov_filters_oct1,
structure = cases_and_deaths
)
uk_covid_data <- rbind( uk_covid_sep29, uk_covid_sep30, uk_covid_oct1, uk_covid_oct2, uk_covid_oct3, uk_covid_oct4, uk_covid_oct5)
uk_covid_data %>%
arrange(areaName, date) %>%
group_by(areaName) %>% View()
uk_covid_data <- uk_covid_data %>%
group_by(areaName) %>%
mutate(week_cases = sum(newCasesBySpecimenDate),
week_cases_rate = week_cases/(cumCasesBySpecimenDate/cumCasesBySpecimenDateRate),
week_deaths = sum(newDeaths28DaysByPublishDate) ) %>%
filter(date == "2020-10-05") %>%
select(-c(newCasesBySpecimenDate, newDeaths28DaysByPublishDate))
uk_covid_data %>%
filter(str_sub(areaCode, 1, 1)!="N") %>%
mutate(russell = if_else(areaName %in% c("Nottingham", "Newcastle upon Tyne", "Birmingham", "County Durham", "Manchester",
"Leeds", "Sheffield", "Southampton", "Exeter", "Liverpool", "Cardiff",
"City of Edinburgh", "Glasgow City", "Bristol, City of", "Coventry", "York"), "black", "grey")) %>%
ggplot(aes(x = cumCasesBySpecimenDateRate, y = week_cases_rate))+
geom_text(aes(label=areaName, color = russell))+
scale_color_manual(values = c("black", "grey"))+
xlab("Cumulative Case Rate per 100,000")+ylab("Weekly Case Rate per 100,000 (Oct 5)")+
theme_classic()+
theme(legend.position = "none")
|
f5fcd5083fdeb39f7d86047b19e94bc939699b11 | 4900d10bb453f88bc963fde53ebac70da9a4ca23 | /dieroller/R/plot-roll.R | 25fbca481fe674c9aac9ba0314371c6f636dde59 | [] | no_license | lehman-brothers/stat133_sp18 | 3229a8a51c55870a9dc543178a216dec1f2313c5 | bee24e3d1525ba502e17a6f9a55ae4dd813306c7 | refs/heads/master | 2020-04-13T09:39:34.935531 | 2018-12-26T04:54:36 | 2018-12-26T04:54:36 | 163,117,145 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 951 | r | plot-roll.R | #' @title plot-roll function
#' @description returns a barplot with the frequency of each die face appearing over the number of rolls
#' @param rolls is a set series of die rolls
#' @return a barplot with "sides of die" on the x axis and "relative frequencies" on the y axis
plot_roll <- function(x){
a <- table(x$rolls) / x$total
b <- barplot(a, main = "Frequencies in a series of die rolls", xlab = "sides of die", ylab = "relative frequencies")
}
#plot_roll(fair_50)
one_freqs <- function(x) {
cumsum(x$rolls == x$rolls[1]) / x$total
}
two_freqs <- function(x) {
cumsum(x$rolls == x$rolls[2]) / x$total
}
three_freqs <- function(x) {
cumsum(x$rolls == x$rolls[3]) / x$total
}
four_freqs <- function(x) {
cumsum(x$rolls == x$rolls[4]) / x$total
}
five_freqs <- function(x) {
cumsum(x$rolls == x$rolls[5]) / x$total
}
six_freqs <- function(x) {
cumsum(x$rolls == x$rolls[6]) / x$total
}
|
5af5bdb92528a2334be86a67ee25ab371dc1836a | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/1,2,3,4,5-pentachlor.R | 40c0d0880732c0aad5110b973cb21faa46a6be78 | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 276 | r | 1,2,3,4,5-pentachlor.R | library("knitr")
library("rgl")
#knit("1,2,3,4,5-pentachlor.Rmd")
#markdownToHTML('1,2,3,4,5-pentachlor.md', '1,2,3,4,5-pentachlor.html', options=c("use_xhml"))
#system("pandoc -s 1,2,3,4,5-pentachlor.html -o 1,2,3,4,5-pentachlor.pdf")
knit2html('1,2,3,4,5-pentachlor.Rmd')
|
9b7415c5139fd3bb31fabf825b6d21bc70c1713e | 94b933c02458144f4534d7ac0591f23423c94d3b | /Code/RaceTracking/Basic Model Code/EstCode_PoisRaceBasic.R | 107f5f3b00014e371a7805b8abca36f0a0d961b8 | [
"MIT"
] | permissive | jeff-dotson/mouse-tracking | 41043e960f6976853a0b7165b26a1a1d92ff5d83 | 93633662c4bd300fd8e861679e173444b2d467ee | refs/heads/master | 2020-04-28T18:56:05.602509 | 2020-02-05T22:02:54 | 2020-02-05T22:02:54 | 175,494,384 | 1 | 0 | MIT | 2020-02-05T22:02:56 | 2019-03-13T20:31:14 | R | UTF-8 | R | false | false | 9,284 | r | EstCode_PoisRaceBasic.R | #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
#
# Poisson race basic reponse time model - Estimation Code
# February 2016
#
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
library(gtools)
library(bayesm)
setwd("/Users/rogerbailey/Desktop/Projects/ESM/RaceTracking/Basic Model Code")
set.seed(77)
#Load data
load("Simdata_PoisRaceBasic.RData")
#Set MCMC variables
R=60000 #number of iterations
keep=20 #thinning
accup=50#interval size for updating RW step size
space=10
#Set priors for beta
nub=nbeta+3#dof for cov matrix of partworths
Vb=nub*diag(nbeta)#loc for cov of partworths
Ab=matrix(.01)#prec for betabar
bdoublebar=matrix(double(nbeta),nc=1)#mean of betabar distribution
#Set priors for delta and lambda
nudl=4+3#dof for cov matrix of deltas and lambdas
Vdl=nudl*diag(4)#loc for cov of deltas and lambdas
Adl=matrix(.01)#prec for deltalambdabar
dldoublebar=matrix(double(4),nc=1)#mean of deltalambdbar distribution
#Set priors for hit thresholds, K
lnthetabar=matrix(0)
Ath=matrix(.01)
#Set initial values
oldbbar=matrix(double(nbeta),nc=1)#initial betabar
oldbmat=matrix(double(nbeta*nresp),nr=nresp)#initial betas
olddlbar=matrix(double(4),nc=1)#initial deltalambdabar
olddlmat=matrix(double(4*nresp),nr=nresp)#initial deltas and lambdas
stepbdl=.2 #stepsize for beta, delta, and lambda RW steps
oldVb=diag(nbeta)#initial loc for cov of partworths
oldVbi=backsolve(chol(oldVb),diag(nbeta))#initial inv chol Vbeta
oldVdl=diag(4)#initial loc for cov of deltas and lambdas
oldVdli=backsolve(chol(oldVdl),diag(4))#initial inv chol Vdeltalambda
oldKmat=matrix(double(nresp)+1,nr=nresp)#initial hit thresholds
oldtheta=.01#initial rate for draw of hit thresholds
stepth=.1 #initial stepsize for theta RW
acceptpropbdl=double(accup)+.23 #running record of proportion of beta,lambda,delta draws accepted
acceptpropK=double(accup)+.23 #running record of proportion of K draws accepted
acceptpropth=double(accup)+.23 #running record of proportion of theta draws accepted
llike=matrix(double(nresp),nc=1) #log likelihood
#Setup storage
betadraws=array(double(nbeta*nresp*R/keep),dim=c(R/keep,nresp,nbeta))
betabardraws=matrix(double(nbeta*R/keep),nr=R/keep)
Vbetadraws=matrix(double(nbeta*nbeta*R/keep),nr=R/keep)
deltalambdadraws=array(double(4*nresp*R/keep),dim=c(R/keep,nresp,4))
deltalambdabardraws=matrix(double(4*R/keep),nr=R/keep)
Vdeltalambdadraws=matrix(double(16*R/keep),nr=R/keep)
Kdraws=matrix(double(nresp*R/keep),nr=R/keep)
thetadraws=matrix(double(R/keep),nr=R/keep)
llikes=matrix(double(nresp*R/keep),nr=R/keep)
accpropbdl=matrix(double(R/keep),nr=R/keep)
accpropK=matrix(double(R/keep),nr=R/keep)
accpropth=matrix(double(R/keep),nr=R/keep)
stsizebdl=matrix(double(R/keep),nr=R/keep)
stsizeth=matrix(double(R/keep),nr=R/keep)
#*************************************************************
#Setup functions
#function that returns the value of the prior for norm upper
logprior=function(beta,betabar,Vbi){
return((beta-betabar)%*%Vbi%*%t(beta-betabar)*(-.5))
}
#loglike function for choice/response time
loglikeyt=function(bdl,K,X,y,t){
baserates=exp(X%*%matrix(bdl[1:nbeta],nc=1))
temp1=colSums(matrix(baserates,nr=nalt))^-1#inverted "attractiveness"
temp2=exp(bdl[(nbeta+1)] + bdl[(nbeta+2)]*c(0:(ntask-1)))#"accessibility"
temp3=exp(bdl[(nbeta+3)] + bdl[(nbeta+4)]*c(0:(ntask-1)))#scaling
fvals=temp1*temp2+temp3
rates=baserates*rep(fvals,each=nalt)
choiceind=matrix(diag(nalt)[,y])
tvec=rep(t,each=nalt)
ll=0
for(m in 1:(ntask*nalt)){
if(choiceind[m]==1){
ll=ll+dgamma(tvec[m],shape=K,scale=rates[m],log=T)
}else{ll=ll+log(1-pgamma(tvec[m],shape=K,scale=rates[m]))}
}
return(ll)
}
#logprior function for K given theta
logpriorK=function(K,th){
return(dpois(K-1,th,log=T))
}
#function for determining the change in the step size
stepupdate=function(accprop){
step=1
if(is.na(accprop)){return(step)}else{
if(accprop<.21) {step=.99}
if(accprop<.19) {step=.95}
if(accprop<.15) {step=.85}
if(accprop<.10) {step=.7}
if(accprop>.25) {step=1.01}
if(accprop>.27) {step=1.05}
if(accprop>.3) {step=1.15}
if(accprop>.4) {step=1.35}
return(step)}
}
#*************************************************************
#begin MCMC routine
#set timer
itime = proc.time()[3]
for(r in 1:R){
accept=matrix(double(nresp*2),nr=nresp)
llikevec=matrix(double(nresp))
for(i in 1:nresp){
#draw proposal for betas, deltas, and lambdas
oldbdl=c(oldbmat[i,],olddlmat[i,])
newbdl=c(oldbdl[1:nbeta]+t(chol(Vb))%*%rnorm(nbeta)*stepbdl,
oldbdl[(nbeta+1):(nbeta+4)]+t(chol(Vdl))%*%rnorm(4)*stepbdl)
#calculate likelihood of choices/response times and priors
oldllikebdl=loglikeyt(oldbdl,oldKmat[i,],data[[i]]$X,data[[i]]$y,data[[i]]$time)
newllikebdl=loglikeyt(newbdl,oldKmat[i,],data[[i]]$X,data[[i]]$y,data[[i]]$time)
oldlprb=logprior(t(oldbdl[1:nbeta]),t(oldbbar),oldVbi)
newlprb=logprior(t(newbdl[1:nbeta]),t(oldbbar),oldVbi)
oldlprdl=logprior(t(oldbdl[(nbeta+1):(nbeta+4)]),t(olddlbar),oldVdli)
newlprdl=logprior(t(newbdl[(nbeta+1):(nbeta+4)]),t(olddlbar),oldVdli)
diffvecbdl=newllikebdl+newlprb+newlprdl-(oldllikebdl+oldlprb+newlprdl)
if(is.nan(diffvecbdl)){diffvecbdl=-Inf}
alphabdl=min(exp(diffvecbdl), 1)
#accept or reject new draw of beta,lambda and delta
drawbdl=runif(1)
acceptbdl=0
if(alphabdl>drawbdl){acceptbdl=1}
accept[i,1]=acceptbdl
if(acceptbdl==1){
oldbmat[i,]=newbdl[1:nbeta]
olddlmat[i,]=newbdl[(nbeta+1):(nbeta+4)]
oldbdl=newbdl
}
#draw proposal for K
oldK=oldKmat[i,]
newK=oldKmat[i,]+(rbinom(1,1,.5)*2-1)
#calculate likelihood of choices/response times and priors
oldllikeK=loglikeyt(oldbdl,oldK,data[[i]]$X,data[[i]]$y,data[[i]]$time)
if(newK>0){ #only consider proposals with K>0
newllikeK=loglikeyt(oldbdl,newK,data[[i]]$X,data[[i]]$y,data[[i]]$time)
oldlprK=logpriorK(oldK,oldtheta)
newlprK=logpriorK(newK,oldtheta)
diffvecK=newllikeK+newlprK-(oldllikeK+oldlprK)
if(is.nan(diffvecK)){diffvecK=-Inf}
alphaK=min(exp(diffvecK), 1)
#accept or reject new draw of beta,lambda and delta
drawK=runif(1)
acceptK=0
if(alphaK>drawK){acceptK=1}
}else{acceptK=0}
accept[i,2]=acceptK
llikevec[i]=oldllikeK
if(acceptK==1){
oldKmat[i,]=newK
llikevec[i]=newllikeK
}
}
#draw new proposal for theta
newtheta=exp(log(oldtheta)+rnorm(1)*stepth)
#calculate lieklihood and prior for thetas(efficieny can be
#increased by doing this as part of the above respondent-level loop)
newlliketh=0
oldlliketh=0
for(i in 1:nresp){
oldlliketh=oldlliketh+logpriorK(oldKmat[i,],oldtheta)
newlliketh=newlliketh+logpriorK(oldKmat[i,],newtheta)
}
oldlprth=logprior(matrix(log(oldtheta)),matrix(lnthetabar),Ath)
newlprth=logprior(matrix(log(newtheta)),matrix(lnthetabar),Ath)
diffvecth=newlliketh+newlprth-(oldlliketh+oldlprth)
if(is.nan(diffvecth)){diffvecth=-Inf}
alphath=min(exp(diffvecth), 1)
#accept or reject new draw of theta
drawth=runif(1)
acceptth=0
if(alphath>drawth){acceptth=1}
if(acceptth==1){
oldtheta=newtheta
}
#draw new values of beta hyperparameters
outbetahyp=rmultireg(oldbmat,matrix(1,nr=nresp,nc=1),matrix(bdoublebar,nr=1),Ab,nub,Vb)
oldbbar=matrix(outbetahyp$B)
oldVb=outbetahyp$Sigma
oldVbi=chol2inv(chol(oldVb))
#draw new values of delta and lambda hyperparameters
outdeltalambdahyp=rmultireg(olddlmat,matrix(1,nr=nresp,nc=1),matrix(dldoublebar,nr=1),Adl,nudl,Vdl)
olddlbar=matrix(outdeltalambdahyp$B)
oldVdl=outdeltalambdahyp$Sigma
oldVdli=chol2inv(chol(oldVdl))
#Store acceptance proportions
acceptpropbdl=c(acceptpropbdl[2:accup],mean(accept[,1]))
acceptpropK=c(acceptpropK[2:accup],accept[,2])
acceptpropth=c(acceptpropth[2:accup],acceptth)
#Store values
if(r%%keep==0){
#Setup storage
betadraws[r/keep,,]=oldbmat
betabardraws[r/keep,]=oldbbar
Vbetadraws[r/keep,]=oldVb
deltalambdadraws[r/keep,,]=olddlmat
deltalambdabardraws[r/keep,]=olddlbar
Vdeltalambdadraws[r/keep,]=oldVdl
Kdraws[r/keep,]=oldKmat
thetadraws[r/keep]=oldtheta
llikes[r/keep,]=llikevec
accpropbdl[r/keep]=mean(acceptpropbdl)
accpropK[r/keep]=mean(acceptpropK)
accpropth[r/keep]=mean(acceptpropth)
stsizebdl[r/keep]=stepbdl
stsizeth[r/keep]=stepth
}
#print progress
#print tte and chart current draw progress
if(r%%(keep*space)==0){
par(mfrow=c(4,1))
ctime = proc.time()[3]
tuntilend = ((ctime - itime)/r) * (R + 1 - r)
cat(" ", r, " (", round(tuntilend/60, 1), ")",
fill = TRUE)
plot(rowSums(llikes),type="l",ylab="Log Likelihood")
matplot(betabardraws,type="l",ylab="Betabar Draws")
matplot(deltalambdabardraws,type="l",ylab="Delta Lambda Draws")
plot(thetadraws,type="l", col="blue",ylab="Theta Draws")
fsh()
}
#update stepsizes
if(r%%accup==0&&r<(.3*R)){
stepbdl=stepbdl*stepupdate(mean(acceptpropbdl))
stepth=stepth*stepupdate(mean(acceptpropth))
}
}
plot.bayesm.mat(betabardraws,tvalue=tbetabar,burnin=500)
plot.bayesm.mat(deltalambdabardraws,tvalue=tdeltalambdabar,burnin=500)
plot.bayesm.mat(thetadraws,tvalue=ttheta,burnin=500)
|
85b2b40d69b9e86b5e7c80534bf20b190323d057 | 737ecfe52d53a672681ce10b204c1d1d9c9ab31f | /man/selectPCs.Rd | c4321ebe768944ec3756f46681cb0606a7f20806 | [] | no_license | debruine/frlgmm | bed684455138d9c8621debab8399daca4d8a7c15 | b3e7cea304e5e905da9f7b055053a3557a8d15cd | refs/heads/master | 2021-01-10T11:55:52.745156 | 2017-08-23T10:22:40 | 2017-08-23T10:22:40 | 43,598,322 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 942 | rd | selectPCs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selectPCs.R
\name{selectPCs}
\alias{selectPCs}
\title{Select PCs using different criteria}
\usage{
selectPCs(data, method = "broken-stick", total.var = 0.95)
}
\arguments{
\item{data}{Data structure from geomorph or created by readTem()}
\item{method}{The method to use to choose PCs (Default "broken-stick")}
\item{total.var}{Total variance to choose for "total variance" method (deafult .95)}
}
\value{
A list of chosen PCs
}
\description{
\code{selectPCs} returns a list of significant PCs as chosen by one of 3 methods
(from D.A. Jackson (1993). Ecology, 74, 2204-2214). Defaults to the most accurate
and conservative "broken-stick"/"bs" method. Other methods are "Kaiser-Guttman"/"kg"
(PCs with eigenvalues greater than the mean eigenvalue) and "total variance"/"tv"
(PCs explaining at least total.var variance)
}
\examples{
chosen.pcs <- selectPCs(data)
}
|
09a3610d28cd9d2c9911f747a6061b2a08602c29 | 669da083b9392b8b9bc878ab7e544cd78c962475 | /man/available.catalogs.Rd | 7c566c46d9f3b73540006cfc25a5d749fcd63ac6 | [] | no_license | mingjerli/rWBData | d82659b20dd46bd7d0043d7b8a8dc6cd5d60eb5c | 605ec50af53fe1b050480a49f3346e98f7327a71 | refs/heads/master | 2016-09-05T22:36:49.397190 | 2014-08-22T20:01:07 | 2014-08-22T20:01:07 | 23,237,937 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 623 | rd | available.catalogs.Rd | \name{available.catalogs}
\alias{available.catalogs}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get available data catalog
}
\description{
This function will return all available data catalog listed in world bank open data.
}
\usage{
available.catalogs()
}
\value{
This function returns a data frame with three columns.
\item{ID}{ID of the data catalog}
\item{name}{name of the data catalog}
\item{acronym}{acronym of the data catalog}
}
\references{
http://datacatalog.worldbank.org/
}
\author{
Ming-Jer Lee <mingjerli@gmail.com>
}
\examples{
available.catalogs()
available.catalogs()[1:5,]
}
|
834957292eb5ebb6e76db75c5e4eea7df87ebecc | fc47c5de300bda96f6f5415a7467bc5f3f2e5553 | /man/data_manip.Rd | 0603c1c337ce29a5bc082dc242dfb3c442fa9a82 | [] | no_license | nverno/iclean | b606f461454675969c3049370e6982ebe84d567e | 3ffac0d85851b4e4cb79e7085371a59bb1c1a26e | refs/heads/master | 2021-01-10T05:48:50.563246 | 2016-01-09T14:31:54 | 2016-01-09T14:31:54 | 49,243,439 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 480 | rd | data_manip.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_manip.R
\name{data_manip}
\alias{data_manip}
\title{Interactively manipulate master files.}
\usage{
data_manip(use_afs = TRUE, update = FALSE, data = NULL)
}
\arguments{
\item{use_afs}{Use master files from AFS}
\item{update}{Update AFS files before grabbing.}
\item{data}{Data to use (default NULL, and use AFS data)}
}
\description{
Mess around with data interactively (clean/reshape etc.)
}
|
312b5efaf40d19ae511c7d7b35c5ccf9050cbcac | b2e4162472b1b488f99ca925f2687ac59712aee9 | /scripts/reads_to_list.R | eaafd72536482845b8817e37822f9a48554201c1 | [] | no_license | OanaCarja/RiboViz | 572a875dd935c3fa23b11590f2a80228ce65a70e | d712fbd60ab09bc6e6af326857867f442006fcb1 | refs/heads/master | 2021-01-11T21:22:50.456789 | 2017-08-09T08:30:39 | 2017-08-09T08:30:39 | 70,090,070 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,965 | r | reads_to_list.R | reads_to_list <- function(gene, gene_location, bamFile, read_range=10:50, left_buffer=250, right_buffer=247,mult_exon=TRUE)
{
flank <- right_buffer+3
if(!mult_exon)
{ gene_location <- gene_location[1]
}
read_range_len <- length(read_range)
start_cod <- (left_buffer+1):(left_buffer+3)
# Specify output matrix
output <- matrix(0,nrow=read_range_len,ncol=(sum(sum(coverage(gene_location)))+2*flank))
# Check for introns
if(length(gene_location)==1)
{ start(gene_location) <- start(gene_location)-flank
end(gene_location) <- end(gene_location)+flank
}else{
start(gene_location)[start(gene_location)==min(start(gene_location))] <- start(gene_location)[start(gene_location)==min(start(gene_location))]-flank
end(gene_location)[end(gene_location)==max(end(gene_location))] <- end(gene_location)[end(gene_location)==max(end(gene_location))]+flank
}
# Read in bam data
what <- c("strand", "pos", "qwidth")
param <- ScanBamParam(which = gene_location, what = what)
bam <- scanBam(bamFile, param=param)
read_strand <- unlist(lapply(bam,function(x)x$strand))
read_location <- unlist(lapply(bam,function(x)x$pos))[read_strand==as.factor(strand(gene_location)[1])]
read_width <- unlist(lapply(bam,function(x)x$qwid))[read_strand==as.factor(strand(gene_location)[1])]
# Column numbers based on genomic position
column_pos <- unlist(which(coverage(gene_location)[seqnames(gene_location)[1]]==1))
if(start(gene_location)<min(column_pos))
{ column_pos <- c(start(gene_location):0,column_pos)
}
if(all(strand(gene_location)=="+"))
{ j <- 1
for(i in read_range)
{ x <- read_location[read_width==i]
ty <- table(factor(x,levels=column_pos))
output[j,] <- c(ty)
j <- j+1
}
}
if(all(strand(gene_location)=="-"))
{ j <- 1
for(i in read_range)
{ x <- read_width[read_width==i]+read_location[read_width==i]-1
ty <- table(factor(x,levels=column_pos))
output[j,] <- c(rev(ty))
j <- j+1
}
}
return(output);
}
|
876d21753b0eb4653bc7ff706aef023dd61b1aea | dd0e56aa9789495c0226746e77300d4a666aa88a | /onsetofEHextracode.R | 400dc7275442dfdf3571b1bcf8ac6ee6cf81af90 | [] | no_license | drjuliejung/ontogeny-of-escape-hatching-manuscript | 9242d2c2b3efc80866106bcec6e48b9909d0e9fa | 90876069a8043acdb3a03ccf8d065cf2529bcb13 | refs/heads/master | 2021-06-14T05:00:51.237609 | 2017-02-24T16:32:25 | 2017-02-24T16:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 43,702 | r | onsetofEHextracode.R | onsetofEH.R -- extra code
# Analysis for "onset of escape-hatching"
# title: Developmental onset of the escape-hatching response in red-eyed treefrogs depends on cue type
# May 2016
# Julie Jung
ls()
rm(list=ls())
ls()
setwd('/Users/juliejung/Desktop/2cues m.s.')
getwd()
# read.csv(file="my.csv.filename")
onset.df<-read.csv(file="ontogeny.csv")
###################### Q1 ##########################
## Does latency to hatch after stimulus begins differ between hypoxia and mechanically cued embryos?
###################### Q1 ##########################
# tactile <- subset(onset.df, Stimulus == "T", na.rm=T, select=c(Clutch, AgeBlock, Individual, Response, AverageR2, Average.Amp, DiffRandL, HatchTime, HatchAge, HsinceH, TtoH))
hypoxic <- subset(onset.df, Stimulus == "H", na.rm=T)
tactile <- subset(onset.df, Stimulus == "T", na.rm=T)
hist(hypoxic$TtoH) #poisson or negative binomial
hist(tactile$TtoH) #geometric or negative binomial
hist(onset.df$TtoH) #geometric?
############# visual evidence that yes, very different ##############
mean(hypoxic$TtoH, na.rm=T)
mean(tactile$TtoH, na.rm=T)
sd(hypoxic$TtoH, na.rm=T)
sd(tactile$TtoH, na.rm=T)
min(tactile$TtoH, na.rm=T)
max(tactile$TtoH, na.rm=T)
sd(tactile$TtoH, na.rm=T)
summary(hypoxic$TtoH, na.rm=T)
summary(tactile$TtoH, na.rm=T)
boxplot(hypoxic$TtoH, tactile$TtoH, xlab="Stimulus", ylab="Latency to hatch (min)")
axis(1, at=1:2, labels=c("hypoxic", "tactile"))
#how different are these? significantly?
############# visual evidence that yes, very different ##############
############# start stri strategy (not as good) ###########################
glm1<-glm(TtoH~Stimulus, family="poisson", data=onset.df)
plot(glm1)
summary(glm1) #P=0.002
glm2<-glm(TtoH~Stimulus, family="binomial", data=onset.df)
plot(glm2)
summary(glm2) #P=0.0005
############# end stri strategy (not as good) ###########################
###################### ANS 1 ##########################
# Ho: Midean change in TtoH is 0
# two.sided test
wilcox.test(hypoxic$TtoH, tactile$TtoH, mu = 0, alt="two.sided", paired = FALSE, conf.int=T, conf.level=0.99)
##nonparametric -->
##tests median <http://www.r-tutor.com/elementary-statistics/non-parametric-methods/wilcoxon-signed-rank-test>
##P<2.2e-16
##yes, very significantly different
###################### ANS 1 ##########################
c254t <- subset(tactile, Clutch == "254", select=c(Clutch, Stimulus, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c254tF <- c254t[1:2,]
c254tL <- c254t[7:8,]
mean(c254tF$TtoHhours)
mean(c254tL$TtoHhours)
mean(c254tF$AgeBlock)
mean(c254tL$AgeBlock)
mean(c254tF$HatchAge)
mean(c254tL$HatchAge)
mean(c254tF$TadLength)
mean(c254tL$TadLength)
mean(c254tF$SUM.of.trait.values)
mean(c254tL$SUM.of.trait.values)
c255t <- subset(tactile, Clutch == "255", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c255tF <- c255t[3:4,]
c255tL <- c255t[7:8,]
mean(c255tF$TtoHhours)
mean(c255tL$TtoHhours)
mean(c255tF$AgeBlock)
mean(c255tL$AgeBlock)
mean(c255tF$HatchAge)
mean(c255tL$HatchAge)
mean(c255tF$TadLength)
mean(c255tL$TadLength)
mean(c255tF$SUM.of.trait.values)
mean(c255tL$SUM.of.trait.values)
c256t <- subset(tactile, Clutch == "256", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c256tF <- c256t[5:6,]
c256tL <- c256t[7:8,]
mean(c256tF$TtoHhours)
mean(c256tL$TtoHhours)
mean(c256tF$AgeBlock)
mean(c256tL$AgeBlock)
mean(c256tF$HatchAge)
mean(c256tL$HatchAge)
mean(c256tF$TadLength)
mean(c256tL$TadLength)
mean(c256tF$SUM.of.trait.values)
mean(c256tL$SUM.of.trait.values)
c257t <- subset(tactile, Clutch == "257", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c257tF <- c257t[1:2,]
c257tL <- c257t[5:6,]
mean(c257tF$TtoHhours)
mean(c257tL$TtoHhours)
mean(c257tF$AgeBlock)
mean(c257tL$AgeBlock)
mean(c257tF$HatchAge)
mean(c257tL$HatchAge)
mean(c257tF$TadLength)
mean(c257tL$TadLength)
mean(c257tF$SUM.of.trait.values)
mean(c257tL$SUM.of.trait.values)
c258t <- subset(tactile, Clutch == "258", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c258tF <- c258t[5:6,]
c258tL <- c258t[9:10,]
mean(c258tF$TtoHhours)
mean(c258tL$TtoHhours)
mean(c258tF$AgeBlock)
mean(c258tL$AgeBlock)
mean(c258tF$HatchAge)
mean(c258tL$HatchAge)
mean(c258tF$TadLength)
mean(c258tL$TadLength)
mean(c258tF$SUM.of.trait.values)
mean(c258tL$SUM.of.trait.values)
c259t <- subset(tactile, Clutch == "259", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c259tF <- c259t[1:2,]
c259tL <- c259t[7:8,]
mean(c259tF$TtoHhours)
mean(c259tL$TtoHhours)
mean(c259tF$AgeBlock)
mean(c259tL$AgeBlock)
mean(c259tF$HatchAge)
mean(c259tL$HatchAge)
mean(c259tF$TadLength)
mean(c259tL$TadLength)
mean(c259tF$SUM.of.trait.values)
mean(c259tL$SUM.of.trait.values)
c262t <- subset(tactile, Clutch == "262", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c262tF <- c262t[7:8,]
c262tL <- c262t[17:18,]
mean(c262tF$TtoHhours)
mean(c262tL$TtoHhours)
mean(c262tF$AgeBlock)
mean(c262tL$AgeBlock)
mean(c262tF$HatchAge)
mean(c262tL$HatchAge)
mean(c262tF$TadLength)
mean(c262tL$TadLength)
mean(c262tF$SUM.of.trait.values)
mean(c262tL$SUM.of.trait.values)
c263t <- subset(tactile, Clutch == "263", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c263tF <- c263t[5:6,]
c263tL <- c263t[11:12,]
mean(c263tF$TtoHhours)
mean(c263tL$TtoHhours)
mean(c263tF$AgeBlock)
mean(c263tL$AgeBlock)
mean(c263tF$HatchAge)
mean(c263tL$HatchAge)
mean(c263tF$TadLength)
mean(c263tL$TadLength)
mean(c263tF$SUM.of.trait.values)
mean(c263tL$SUM.of.trait.values)
c264t <- subset(tactile, Clutch == "264", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c264tF <- c264t[3:4,]
c264tL <- c264t[13:14,]
mean(c264tF$TtoHhours)
mean(c264tL$TtoHhours)
mean(c264tF$AgeBlock)
mean(c264tL$AgeBlock)
mean(c264tF$HatchAge)
mean(c264tL$HatchAge)
mean(c264tF$TadLength)
mean(c264tL$TadLength)
mean(c264tF$SUM.of.trait.values)
mean(c264tL$SUM.of.trait.values)
c265t <- subset(tactile, Clutch == "265", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c265tF <- c265t[3:4,]
c265tL <- c265t[7:8,]
mean(c265tF$TtoHhours)
mean(c265tL$TtoHhours)
mean(c265tF$AgeBlock)
mean(c265tL$AgeBlock)
mean(c265tF$HatchAge)
mean(c265tL$HatchAge)
mean(c265tF$TadLength)
mean(c265tL$TadLength)
mean(c265tF$SUM.of.trait.values)
mean(c265tL$SUM.of.trait.values)
c266t <- subset(tactile, Clutch == "266", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c266tF <- c266t[5:6,]
c266tL <- c266t[11:12,]
mean(c266tF$TtoHhours)
mean(c266tL$TtoHhours)
mean(c266tF$AgeBlock)
mean(c266tL$AgeBlock)
mean(c266tF$HatchAge)
mean(c266tL$HatchAge)
mean(c266tF$TadLength)
mean(c266tL$TadLength)
mean(c266tF$SUM.of.trait.values)
mean(c266tL$SUM.of.trait.values)
c254h <- subset(hypoxic, Clutch == "254", select=c(Clutch, Stimulus, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c254hF <- c254h[1:2,]
c254hL <- c254h[5:6,]
mean(c254hF$TtoHhours)
mean(c254hL$TtoHhours)
mean(c254hF$AgeBlock)
mean(c254hL$AgeBlock)
mean(c254hF$HatchAge)
mean(c254hL$HatchAge)
mean(c254hF$TadLength)
mean(c254hL$TadLength)
mean(c254hF$SUM.of.trait.values)
mean(c254hL$SUM.of.trait.values)
c255h <- subset(hypoxic, Clutch == "255", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c255hF <- c255h[3:4,]
c255hL <- c255h[9:10,]
mean(c255hF$TtoHhours)
mean(c255hL$TtoHhours)
mean(c255hF$AgeBlock)
mean(c255hL$AgeBlock)
mean(c255hF$HatchAge)
mean(c255hL$HatchAge)
mean(c255hF$TadLength)
mean(c255hL$TadLength)
mean(c255hF$SUM.of.trait.values)
mean(c255hL$SUM.of.trait.values)
c256h <- subset(hypoxic, Clutch == "256", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c256hF <- c256h[1:2,]
c256hL <- c256h[7:8,]
mean(c256hF$TtoHhours)
mean(c256hL$TtoHhours)
mean(c256hF$AgeBlock)
mean(c256hL$AgeBlock)
mean(c256hF$HatchAge)
mean(c256hL$HatchAge)
mean(c256hF$TadLength)
mean(c256hL$TadLength)
mean(c256hF$SUM.of.trait.values)
mean(c256hL$SUM.of.trait.values)
c257h <- subset(hypoxic, Clutch == "257", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c257hF <- c257h[7:8,]
c257hL <- c257h[11:12,]
mean(c257hF$TtoHhours)
mean(c257hL$TtoHhours)
mean(c257hF$AgeBlock)
mean(c257hL$AgeBlock)
mean(c257hF$HatchAge)
mean(c257hL$HatchAge)
mean(c257hF$TadLength)
mean(c257hL$TadLength)
mean(c257hF$SUM.of.trait.values)
mean(c257hL$SUM.of.trait.values)
c258h <- subset(hypoxic, Clutch == "258", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c258hF <- c258h[5:6,]
c258hL <- c258h[9:10,]
mean(c258hF$TtoHhours)
mean(c258hL$TtoHhours)
mean(c258hF$AgeBlock)
mean(c258hL$AgeBlock)
mean(c258hF$HatchAge)
mean(c258hL$HatchAge)
mean(c258hF$TadLength)
mean(c258hL$TadLength)
mean(c258hF$SUM.of.trait.values)
mean(c258hL$SUM.of.trait.values)
c259h <- subset(hypoxic, Clutch == "259", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c259hF <- c259h[5:6,]
c259hL <- c259h[9:10,]
mean(c259hF$TtoHhours)
mean(c259hL$TtoHhours)
mean(c259hF$AgeBlock)
mean(c259hL$AgeBlock)
mean(c259hF$HatchAge)
mean(c259hL$HatchAge)
mean(c259hF$TadLength)
mean(c259hL$TadLength)
mean(c259hF$SUM.of.trait.values)
mean(c259hL$SUM.of.trait.values)
c262h <- subset(hypoxic, Clutch == "262", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c262hF <- c262h[5:6,]
c262hL <- c262h[9:10,]
mean(c262hF$TtoHhours)
mean(c262hL$TtoHhours)
mean(c262hF$AgeBlock)
mean(c262hL$AgeBlock)
mean(c262hF$HatchAge)
mean(c262hL$HatchAge)
mean(c262hF$TadLength)
mean(c262hL$TadLength)
mean(c262hF$SUM.of.trait.values)
mean(c262hL$SUM.of.trait.values)
c263h <- subset(hypoxic, Clutch == "263", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c263hF <- c263h[3:4,]
c263hL <- c263h[11:12,]
mean(c263hF$TtoHhours)
mean(c263hL$TtoHhours)
mean(c263hF$AgeBlock)
mean(c263hL$AgeBlock)
mean(c263hF$HatchAge)
mean(c263hL$HatchAge)
mean(c263hF$TadLength)
mean(c263hL$TadLength)
mean(c263hF$SUM.of.trait.values)
mean(c263hL$SUM.of.trait.values)
c264h <- subset(hypoxic, Clutch == "264", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c264hF <- c264h[1:2,]
c264hL <- c264h[9:10,]
mean(c264hF$TtoHhours)
mean(c264hL$TtoHhours)
mean(c264hF$AgeBlock)
mean(c264hL$AgeBlock)
mean(c264hF$HatchAge)
mean(c264hL$HatchAge)
mean(c264hF$TadLength)
mean(c264hL$TadLength)
mean(c264hF$SUM.of.trait.values)
mean(c264hL$SUM.of.trait.values)
c265h <- subset(hypoxic, Clutch == "265", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c265hF <- c265h[7:8,]
c265hL <- c265h[9:10,]
mean(c265hF$TtoHhours)
mean(c265hL$TtoHhours)
mean(c265hF$AgeBlock)
mean(c265hL$AgeBlock)
mean(c265hF$HatchAge)
mean(c265hL$HatchAge)
mean(c265hF$TadLength)
mean(c265hL$TadLength)
mean(c265hF$SUM.of.trait.values)
mean(c265hL$SUM.of.trait.values)
c266h <- subset(hypoxic, Clutch == "266", select=c(Clutch, AgeBlock, HatchAge, Response, NumH, TtoHhours, TadLength, SUM.of.trait.values))
c266hF <- c266h[7:8,]
c266hL <- c266h[9:10,]
mean(c266hF$TtoHhours)
mean(c266hL$TtoHhours)
mean(c266hF$AgeBlock)
mean(c266hL$AgeBlock)
mean(c266hF$HatchAge)
mean(c266hL$HatchAge)
mean(c266hF$TadLength)
mean(c266hL$TadLength)
mean(c266hF$SUM.of.trait.values)
mean(c266hL$SUM.of.trait.values)
#########CLUTCH MEANS of latency to hatch################
means <- matrix(c(mean(c254t$TtoH, na.rm=T),
mean(c255t$TtoH, na.rm=T),
mean(c256t$TtoH, na.rm=T),
mean(c257t$TtoH, na.rm=T),
mean(c258t$TtoH, na.rm=T),
mean(c259t$TtoH, na.rm=T),
mean(c262t$TtoH, na.rm=T),
mean(c263t$TtoH, na.rm=T),
mean(c264t$TtoH, na.rm=T),
mean(c265t$TtoH, na.rm=T),
mean(c266t$TtoH, na.rm=T),
mean(c254h$TtoH, na.rm=T),
mean(c255h$TtoH, na.rm=T),
mean(c256h$TtoH, na.rm=T),
mean(c257h$TtoH, na.rm=T),
mean(c258h$TtoH, na.rm=T),
mean(c259h$TtoH, na.rm=T),
mean(c262h$TtoH, na.rm=T),
mean(c263h$TtoH, na.rm=T),
mean(c264h$TtoH, na.rm=T),
mean(c265h$TtoH, na.rm=T),
mean(c265h$TtoH, na.rm=T)), ncol=2, byrow=FALSE)
colnames(means) <- c("tactile", "hypoxia")
rownames(means) <- c("c254", "c255", "c256", "c257", "c258", "c259", "c262", "c263", "c264", "c265", "c266")
means <- as.table(means)
means[,1]
###### TO SHOW KAREN 6/3/16
wilcox.test(means[,1], means[,2], mu = 0, alt="two.sided", paired = TRUE, conf.int=T, conf.level=0.99)
ramp <- matrix(c(9,6,3,6,6,9,15,9,15,6,9,6,9,9,6,6,6,6,12,12,3,3), ncol=2, byrow=FALSE)
colnames(ramp) <- c("tactile", "hypoxia")
rownames(ramp) <- c("c254", "c255", "c256", "c257", "c258", "c259", "c262", "c263", "c264", "c265", "c266")
ramp <- as.dataframe(ramp)
wilcox.test(ramp[,1], ramp[,2], mu = 0, alt="two.sided", paired = TRUE, conf.int=T, conf.level=0.99)
###################### Q 3 ##########################
## When hatching starts, are the ones that hatch 1st developmentally ahead of their sibs that don't?
## i.e. is
###################### Q 3 ##########################
# read.csv(file="my.csv.filename")
devstages.df<-read.csv(file="devstages.csv")
noH <- subset(devstages.df, NumH == 0, na.rm=T)
oneH <- subset(devstages.df, NumH == 1, na.rm=T)
twoH <- subset(devstages.df, NumH == 2, na.rm=T)
boxplot(noH$MeanLength, oneH$MeanLength, twoH$MeanLength, xlab="# hatched (out of 2)", ylab="Mean Length (mm)")
axis(1, at=1:3, labels=0:2)
meanleng <- aov(MeanLength~NumH, data=devstages.df)
library(agricolae)
results<-HSD.test(meanleng, "NumH", group=TRUE)
results
# all significantly different (using Tukey test)
boxplot(noH$Stage, oneH$Stage, twoH$Stage, xlab="# hatched (out of 2)", ylab="Developmental Stage")
axis(1, at=1:3, labels=0:2)
develstage <- aov(Stage~NumH, data=devstages.df)
library(agricolae)
results2<-HSD.test(develstage, "NumH", group=TRUE)
results2
## 0 hatched: a, 1 hatched: b, 2 hatched: b
##################################
####Show karen 6/4/16
# of the ones that hatch first ("oneH") are the ones that hatched developmentally ahead of their sibs that didn't hatch?
firstHt <- subset(tactile, NumH ==1, na.rm=T)
HfirstHt <- subset(firstHt, Response == "Hatched")
NHfirstHt <- subset(firstHt, Response == "Not Hatched")
boxplot(HfirstHt$SUM.of.trait.values, NHfirstHt$SUM.of.trait.values, xlab="Response", ylab="Developmental Stage")
axis(1, at=1:2, labels=c("Hatched", "Not Hatched"))
wilcox.test(HfirstHt$SUM.of.trait.values, NHfirstHt$SUM.of.trait.values, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
##Of the ones that hatched first (1 of 2 hatched), the ones that hatched are not significantly ahead of their sibs that didn’t hatch (P=0.6448).
firstHh <- subset(hypoxic, NumH ==1, na.rm=T)
HfirstHh <- subset(firstHh, Response == "Hatched")
NHfirstHh <- subset(firstHh, Response == "Not Hatched")
boxplot(HfirstHh$SUM.of.trait.values, NHfirstHh$SUM.of.trait.values, xlab="Response", ylab="Developmental Stage")
axis(1, at=1:2, labels=c("Hatched", "Not Hatched"))
wilcox.test(HfirstHh$SUM.of.trait.values, NHfirstHh$SUM.of.trait.values, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
##Of the ones that hatched first (1 of 2 hatched), the ones that hatched are not significantly ahead of their sibs that didn’t hatch (P=0.6448).
firstHt <- subset(tactile, NumH ==1, na.rm=T)
#firstHt$TadLength[firstHt$TadLength==8.852] <- NA #outlier
#firstHt$TadLength[firstHt$TadLength==7.942] <- NA
HfirstHt <- subset(firstHt, Response == "Hatched")
NHfirstHt <- subset(firstHt, Response == "Not Hatched")
boxplot(HfirstHt$TadLength, NHfirstHt$TadLength, xlab="Response", ylab="Tad Length")
axis(1, at=1:2, labels=c("Hatched", "Not Hatched"))
#wilcox.test(HfirstHt$TadLength, NHfirstHt$TadLength, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
t.test(HfirstHt$TadLength, NHfirstHt$TadLength,paired=TRUE)
##Of the ones that hatched first (1 of 2 hatched), the ones that hatched are not significantly ahead of their sibs that didn’t hatch (P=0.6448).
plot(HfirstHt$TadLength,NHfirstHt$TadLength)
firstHh <- subset(hypoxic, NumH ==1, na.rm=T)
HfirstHh <- subset(firstHh, Response == "Hatched")
NHfirstHh <- subset(firstHh, Response == "Not Hatched")
boxplot(HfirstHh$TadLength, NHfirstHh$TadLength, xlab="Response", ylab="Tad Length")
axis(1, at=1:2, labels=c("Hatched", "Not Hatched"))
wilcox.test(HfirstHh$TadLength, NHfirstHh$TadLength, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
t.test(HfirstHh$TadLength, NHfirstHh$TadLength,paired=TRUE)
plot(HfirstHh$TadLength, NHfirstHh$TadLength)
##Of the ones that hatched first (1 of 2 hatched), the ones that hatched are not significantly ahead of their sibs that didn’t hatch (P=0.6448).
###########################################
# "of the ones that hatched first" = first hatch of the clutch (1 time point per clutch)
dat <- rbind(c254t[1:2,],
c255t[3:4,],
c256t[5:6,],
c257t[1:2,],
c258t[5:6,],
c259t[1:2,],
c262t[7:8,],
c263t[5:6,],
c264t [3:4,],
c265t [3:4,],
c266t [5:6,],
c254h [1:2,] ,
c255h [3:4,],
c256h [1:2,],
c257h [7:8,],
c258h [5:6,],
c259h [5:6,],
c262h [5:6,],
c263h [3:4,],
c264h [1:2,],
c265h [7:8,],
c266h [7:8,])
# compare hatched vs. not hatched (unpaired)
Hat <- subset(dat, Response == "Hatched")
NotHat <- subset(dat, Response == "Not Hatched")
wilcox.test(Hat$SUM.of.trait.values, NotHat$SUM.of.trait.values, mu = 0, alt="two.sided", paired = F, conf.int=T, conf.level=0.99)
## does developmental stage predict latency to hatch after stimulus begins.
library(car)
hist(onset.df$SUM.of.trait.values)
hist(hypoxic$SUM.of.trait.values)
hist(tactile$SUM.of.trait.values)
scatterplot (onset.df$SUM.of.trait.values, onset.df$TtoH, log = "y", ylab="Latency to hatch (h)", xlab="Developmental Stage")
scatterplot (onset.df$TtoH ~ onset.df$SUM.of.trait.values | onset.df$Stimulus, reg.line=TRUE, col.lab="black", by.groups=T, pch=c(16,1), boxplots=F, lwd=2, lty=1, legend.title="Stimulus", levels=c("hypoxia", "tactile"), legend.coords="topright", ylab="Latency to hatch (h)", reset.par=T, xlab="Developmental Stage")
library(ggplot2)
#normal scale
ggplot(onset.df, aes(x = SUM.of.trait.values, y = TtoH, shape = Stimulus)) +
geom_point(size=3) +
geom_smooth(method=lm, se=FALSE) +
scale_shape_manual(values=c(16,1)) +
ylab("Latency to Hatch (h)\n") +
theme_bw(20) +
xlab("\nDevelopmental Stage")
#log scale
ggplot(onset.df, aes(x = SUM.of.trait.values, y = TtoH, shape = Stimulus)) +
scale_y_log10() +
geom_point(size=3) +
scale_shape_manual(values=c(15,0)) +
geom_smooth(method=lm, se=FALSE) +
ylab("Latency to Hatch (h)\n") +
theme_bw(20) +
xlab("\nDevelopmental Stage")
install.packages("devtools")
library(devtools)
install_github("easyGgplot2", "kassambara")
library(easyGgplot2)
ggplot2.scatterplot(data=onset.df, xName='SUM.of.trait.values',yName='TtoH',
groupName='Stimulus', size=30, backgroundColor="white",
groupColors=c('black', 'black'), addRegLine=TRUE,
addConfidenceInterval=F, setShapeByGroupName=FALSE,
removePanelGrid=TRUE)
#yScale=“log10”
cor.test(onset.df$SUM.of.trait.values, onset.df$TtoH)
scatterplot (hypoxic$SUM.of.trait.values, hypoxic$TtoH, main = "hypoxia hatch", ylab="Latency to hatch (h)", xlab="Developmental Stage")
cor.test(hypoxic$SUM.of.trait.values, hypoxic$TtoH)
scatterplot (tactile$SUM.of.trait.values, tactile$TtoH, main = "tactile hatch", ylab="Latency to hatch (h)", xlab="Developmental Stage")
cor.test(tactile$SUM.of.trait.values, tactile$TtoH)
## does tadpole length predict Latency to hatch after stimulus begins.
hist(onset.df$TadLength)
hist(hypoxic$TadLength)
hist(tactile$TadLength)
scatterplot (onset.df$TadLength, onset.df$TtoH, ylab="Latency to hatch (h)", xlab="Tadpole Length (mm)")
cor.test(onset.df$TadLength, onset.df$TtoH)
scatterplot (hypoxic$TadLength, hypoxic$TtoH, main = "hypoxia hatch", ylab="Latency to hatch (h)", xlab="Tadpole Length (mm)")
cor.test(hypoxic$TadLength, hypoxic$TtoH)
scatterplot (tactile$TadLength, tactile$TtoH, main = "tactile hatch", ylab="Latency to hatch (h)", xlab="Tadpole Length (mm)")
cor.test(tactile$TadLength, tactile$TtoH)
plot (onset.df$SUM.of.trait.values, onset.df$TtoH, ylab="Latency to hatch (h)", xlab="Developmental Stage")
plot (hypoxic$SUM.of.trait.values, hypoxic$TtoH, main = "hypoxia hatch", ylab="Latency to hatch (h)", xlab="Developmental Stage")
plot (tactile$SUM.of.trait.values, tactile$TtoH, main = "tactile hatch", ylab="Latency to hatch (h)", xlab="Developmental Stage")
## tadpole length does not predict Latency to hatch after stimulus begins.
plot (onset.df$TadLength, onset.df$TtoH, ylab="Latency to hatch (h)", xlab="Tadpole Length (mm)")
plot (hypoxic$TadLength, hypoxic$TtoH, main = "hypoxia hatch", ylab="Latency to hatch (h)", xlab="Tadpole Length (mm)")
plot (tactile$TadLength, tactile$TtoH, main = "tactile hatch", ylab="Latency to hatch (h)", xlab="Tadpole Length (mm)")
###################################
###################################
####### NEW FIGURE oct26, 2016 ####
###################################
###################################
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# par(mfrow=c(1,2))
#
# LtoHdata<-read.csv(file="LtoHdata.csv")
# LtoHdata<-na.omit(LtoHdata)
# str(LtoHdata)
# boxplot(log10(LtoHmins)~FirstVsConsistent*Stimulus, data=LtoHdata, notch=TRUE, ylab="Log of Latency to Hatch in Minutes", xlab="Stimulus")
p1<- ggplot(LtoHdata, aes( Stimulus,log10(LtoHmins), Colour=FirstVsConsistent))+
geom_boxplot()+
ylab("Log of Latency to Hatch (mins)\n") +
theme_bw(16) +
xlab("\nStimulus")
#log scale
LtoHfig<-read.csv(file="LtoHfig.csv")
str(LtoHfig)
p2<- ggplot(LtoHfig, aes(x = Developmental.Stage, y = log10(Latency.to.Hatch.in.Minutes), shape = Stimulus)) + #TtoH is in mins
geom_point(size=2) +
geom_smooth(method=lm, se=FALSE) +
stat_smooth(method = lm)+
scale_shape_manual(values=c(15,0)) + #c(16,1) to make circles
ylab("Log of Latency to Hatch (mins)\n") +
theme_bw(16) +
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())+
xlab("\nDevelopmental Stage")
multiplot(p1, p2, cols=2)
#scatter plot and boxplot overlay
max(LtoHfig$Developmental.Stage, na.rm=T)
LtoHfig$AvgLtoHmins<-as.numeric(LtoHfig$AvgLtoHmins)
library(MASS) # to access Animals data sets
library(scales) # to access break formatting functions
ggplot(LtoHfig, aes(x = Developmental.Stage, y = AvgLtoHmins, shape = Stimulus, colour=FirstVsConsistent, na.rm=T))+
geom_boxplot(aes(fill = FirstVsConsistent))+
geom_jitter(position=position_dodge(width=0.5))+
scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x),
labels = trans_format("log10", math_format(10^.x))) +
#scale_y_log10(breaks=c(.01,.1,1),labels=c(.01,.1,1))+
scale_x_discrete(breaks=c(1,2,3,4,5,6,7),labels=c(1,2,3,4,5,6,7))+
#xlim(min(LtoHfig$Developmental.Stage, na.rm=T), max(LtoHfig$Developmental.Stage, na.rm=T))+
scale_shape_manual(values=c(15,0)) +
scale_color_manual(values=c("black", "black")) +
scale_fill_manual(values=c("azure3", "white")) +
stat_smooth(method = lm, se=FALSE, color="blue", lty=2)+
ylab("Latency to Hatch (min)\n") +
theme_bw(20) +
xlab("\nDevelopmental Stage")
## Order 156, 220 -->NAs
min(LtoHfig$AvgLtoHmins, na.rm=T)
max(LtoHfig$AvgLtoHmins, na.rm=T)
min(LtoHfig$Developmental.Stage, na.rm=T)
max(LtoHfig$Developmental.Stage, na.rm=T)
#normal scale
LtoHfig$Log.Latency.to.Hatch.in.Minutes<- log10(LtoHfig$AvgLtoHmins)
ggplot(LtoHfig, aes(x = Developmental.Stage, y = Latency.to.Hatch.in.Minutes, shape = Stimulus)) +
geom_point(size=3) +
scale_shape_manual(values=c(15,0)) +
stat_smooth(method = lm, se=FALSE)+
ylab("Latency to Hatch (h)\n") +
theme_bw(20) +
xlab("\nDevelopmental Stage")
library(reshape2)
ggplot(LtoHfig,aes(Stimulus, AvgLtoHmins, Colour=FirstVsConsistent)) +
geom_boxplot(aes(colour=FirstVsConsistent), show.legend=FALSE) +
facet_grid(Stimulus ~ .) +
theme_bw()
hypLtoHfig<-subset(LtoHfig, LtoHfig$Stimulus=="H")
tacLtoHfig<-subset(LtoHfig, LtoHfig$Stimulus=="T")
boxplot(hypLtoHfig$FirstVsConsistent, hypLtoHfig$AvgLtoHmins)
###################################
###################################
###################################
###################################
###################################
#calculate means by hand from each clutch.
# read.csv(file="my.csv.filename")
latencymeans.df<-read.csv(file="latencymeans.csv")
hyplatencymeans <- subset(latencymeans.df, Stimulus == "H")
meclatencymeans <- subset(latencymeans.df, Stimulus == "T")
###################### Q 2 ##########################
## Does latency to hatch after stimulus begins (TtoH) change from first hatch to consistent hatching (end criteria)
###################### Q 2 ##########################
#hist(latencymeans.df$First) #poisson or negative binomial
#hist(latencymeans.df$Last) #geometric or negative binomial
mean(latencymeans.df$FirstLtoHhours)
mean(latencymeans.df$ConsistentLtoHhours, na.rm=T)
boxplot(latencymeans.df$ConsistentLtoHhours, latencymeans.df$FirstLtoHhours, xlab="hatching", ylab="Latency to hatch (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
############# visual evidence of how different ##############
###################### ANS 2 ##########################
# Ho: Midean change in TtoH is 0
# two.sided test
wilcox.test(latencymeans.df$FirstLtoHhours, latencymeans.df$ConsistentLtoHhours, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
##nonparametric -->
##tests median <http://www.r-tutor.com/elementary-statistics/non-parametric-methods/wilcoxon-signed-rank-test>
###################### ANS 2 ##########################
hist(latencymeans.df$AgeLag)
hyp <- subset(latencymeans.df, Stimulus == "H")
tac <- subset(latencymeans.df, Stimulus == "T")
mean(hyp$FirstLtoHhours)
mean(hyp$ConsistentLtoHhours)
boxplot(hyp$FirstLtoHhours, hyp$ConsistentLtoHhours, xlab="hatching", ylab="Latency to hatch (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
wilcox.test(hyp$FirstLtoHhours, hyp$ConsistentLtoHhours, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
mean(tac$FirstLtoHhours)
mean(tac$ConsistentLtoHhours, na.rm=T)
boxplot(tac$FirstLtoHhours, tac$ConsistentLtoHhours, xlab="hatching", ylab="Latency to hatch (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
wilcox.test(tac$FirstLtoHhours, tac$ConsistentLtoHhours, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
## Age block when first vs. consistent // hyp vs. tactile
mean(hyp$FirstAgeBlock)
mean(hyp$ConsistentAgeBlock)
boxplot(hyp$FirstAgeBlock, hyp$ConsistentAgeBlock, xlab="hatching", ylab="Age Block (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
wilcox.test(hyp$FirstAgeBlock, hyp$ConsistentAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
mean(tac$FirstAgeBlock)
mean(tac$ConsistentAgeBlock, na.rm=T)
boxplot(tac$FirstAgeBlock, tac$ConsistentAgeBlock, xlab="hatching", ylab="Latency to hatch (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
wilcox.test(tac$FirstAgeBlock, tac$ConsistentAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
wilcox.test(tac$AgeLag, hyp$AgeLag, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
t.test(tac$AgeLag, hyp$AgeLag, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
sum(tac$AgeLag)
sum(hyp$AgeLag)
##########
##########
## Across clutches, HYP hatching began and became consistent earlier than MEC hatching
## --> Age
## --> Embryo size
## --> Dev stage
##########
##########
#calculate means by hand from each clutch.
# read.csv(file="my.csv.filename")
latencymeans.df<-read.csv(file="latencymeans.csv")
hyplatencymeans <- subset(latencymeans.df, Stimulus == "H")
meclatencymeans <- subset(latencymeans.df, Stimulus == "T")
# AGE ANALYSIS (significant)
# #hypoxia
# hist(hyplatencymeans$Agefirst) # normal --> t - test??
# hist(hyplatencymeans$Agelast) # normal --> t - test??
# #wilcox.test(hyplatencymeans$Agefirst, hyplatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# t.test(hyplatencymeans$Agefirst, hyplatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# plot(hyplatencymeans$Agefirst ~ hyplatencymeans$Agelast)
t.test(hyplatencymeans$Agefirst, meclatencymeans$Agefirst, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
plot(Agefirst ~ Stimulus, data=latencymeans.df)
t.test(hyplatencymeans$Agelast, meclatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
plot(Agelast ~ Stimulus, data=latencymeans.df)
# EMBRYO SIZE ANALYSIS
t.test(hyplatencymeans$Embryosizefirst, meclatencymeans$Embryosizefirst, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
plot(Embryosizefirst ~ Stimulus, data=latencymeans.df)
t.test(hyplatencymeans$Embryosizelast, meclatencymeans$Embryosizelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
plot(Embryosizelast ~ Stimulus, data=latencymeans.df)
# DEV STAGE ANALYSIS
t.test(hyplatencymeans$Devstagefirst, meclatencymeans$Devstagefirst, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
plot(Devstagefirst ~ Stimulus, data=latencymeans.df)
t.test(hyplatencymeans$Devstagelast, meclatencymeans$Devstagelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
plot(Devstagelast ~ Stimulus, data=latencymeans.df)
# #mecanosensory
# hist(meclatencymeans$Agefirst) # normal --> t - test??
# hist(meclatencymeans$Agelast) # normal --> t - test??
# #wilcox.test(meclatencymeans$Agefirst, meclatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# t.test(meclatencymeans$Agefirst, meclatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# plot(meclatencymeans$Agefirst ~ meclatencymeans$Agelast)
#
# #first
# hist(hyplatencymeans$Agefirst) # normal --> t - test??
# hist(hyplatencymeans$Agelast) # normal --> t - test??
# #wilcox.test(hyplatencymeans$Agefirst, hyplatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# t.test(hyplatencymeans$Agefirst, hyplatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# plot(hyplatencymeans$Agefirst ~ hyplatencymeans$Agelast)
#
# #consistent
# hist(meclatencymeans$Agefirst) # normal --> t - test??
# hist(meclatencymeans$Agelast) # normal --> t - test??
# #wilcox.test(meclatencymeans$Agefirst, meclatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# t.test(meclatencymeans$Agefirst, meclatencymeans$Agelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# plot(meclatencymeans$Agefirst ~ meclatencymeans$Agelast)
#DONE STAGE ANALYSIS (not significantly different between 2015 and 2016)
hist(compyrs.df$DoneStage) #normalish.
wilcox.test(dat2015$DoneStage, dat2016$DoneStage, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
t.test(dat2015$DoneStage, dat2016$DoneStage, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
compyrs.df$Year <- as.factor(compyrs.df$Year)
plot(DoneStage~Year, data=compyrs.df)
### "Stage at the onset of hatching was quite consistent under hypoxia and more variable in response to the mechanosensory cue (Fig. 3B; Levene’s test, F1,20 = 15.116, P = 0.0009). "
var(hyplatencymeans$Devstagefirst)
var(meclatencymeans$Devstagefirst)
var(hyplatencymeans$Devstagelast)
var(meclatencymeans$Devstagelast)
# load leveneTest function
library(car)
# run the levene test centered around the mean
leveneTest(latencymeans.df$Devstagefirst, latencymeans.df$Stimulus, center=mean)
#data frame with two columns, height (in inches) and sex (Male or Female)
#and I want to run levene's test to see if the variance is the same for
#Male and Female height.
###################### Q 2 ##########################
## Does latency to hatch after stimulus begins (TtoH) change from first hatch to consistent hatching (end criteria)
###################### Q 2 ##########################
#hist(latencymeans.df$First) #poisson or negative binomial
#hist(latencymeans.df$Last) #geometric or negative binomial
mean(latencymeans.df$FirstLtoHhours)
mean(latencymeans.df$ConsistentLtoHhours, na.rm=T)
boxplot(latencymeans.df$ConsistentLtoHhours, latencymeans.df$FirstLtoHhours, xlab="hatching", ylab="Latency to hatch (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
############# visual evidence of how different ##############
###################### ANS 2 ##########################
# Ho: Midean change in TtoH is 0
# two.sided test
wilcox.test(latencymeans.df$FirstLtoHhours, latencymeans.df$ConsistentLtoHhours, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
##nonparametric -->
##tests median <http://www.r-tutor.com/elementary-statistics/non-parametric-methods/wilcoxon-signed-rank-test>
###################### ANS 2 ##########################
hist(latencymeans.df$AgeLag)
hyp <- subset(latencymeans.df, Stimulus == "H")
tac <- subset(latencymeans.df, Stimulus == "T")
mean(hyp$FirstLtoHhours)
mean(hyp$ConsistentLtoHhours)
boxplot(hyp$FirstLtoHhours, hyp$ConsistentLtoHhours, xlab="hatching", ylab="Latency to hatch (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
wilcox.test(hyp$FirstLtoHhours, hyp$ConsistentLtoHhours, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
mean(tac$FirstLtoHhours)
mean(tac$ConsistentLtoHhours, na.rm=T)
boxplot(tac$FirstLtoHhours, tac$ConsistentLtoHhours, xlab="hatching", ylab="Latency to hatch (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
wilcox.test(tac$FirstLtoHhours, tac$ConsistentLtoHhours, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
## Age block when first vs. consistent // hyp vs. tactile
mean(hyp$FirstAgeBlock)
mean(hyp$ConsistentAgeBlock)
boxplot(hyp$FirstAgeBlock, hyp$ConsistentAgeBlock, xlab="hatching", ylab="Age Block (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
wilcox.test(hyp$FirstAgeBlock, hyp$ConsistentAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
mean(tac$FirstAgeBlock)
mean(tac$ConsistentAgeBlock, na.rm=T)
boxplot(tac$FirstAgeBlock, tac$ConsistentAgeBlock, xlab="hatching", ylab="Latency to hatch (h)")
axis(1, at=1:2, labels=c("first", "consistent"))
wilcox.test(tac$FirstAgeBlock, tac$ConsistentAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
wilcox.test(tac$AgeLag, hyp$AgeLag, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
t.test(tac$AgeLag, hyp$AgeLag, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
sum(tac$AgeLag)
sum(hyp$AgeLag)
min(LtoHfig$Latency.to.Hatch.in.Minutes, na.rm=T)
### checking if same results when use these 2 diff
latencymeansKW.df<-read.csv(file="SmallDatasetforJulie.csv")
newlatencymeansKW.df<-read.csv(file="NewSmallDatasetforJulie.csv")
hyplatencymeansKW <- subset(latencymeansKW.df, Stimulus == "H")
meclatencymeansKW <- subset(latencymeansKW.df, Stimulus == "T")
newhyplatencymeansKW <- subset(newlatencymeansKW.df, Stimulus == "H")
newmeclatencymeansKW <- subset(newlatencymeansKW.df, Stimulus == "T")
# AGE ANALYSIS (significant)
# #hypoxia
hyplatencymeansKW$FirstAgeBlock<-as.numeric(as.character(hyplatencymeansKW$FirstAgeBlock))
newhyplatencymeansKW$FirstAgeBlock<-numeric(newhyplatencymeansKW$FirstAgeBlock)
t.test(hyplatencymeansKW$FirstAgeBlock, meclatencymeansKW$FirstAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#different
t.test(hyplatencymeansKW$ConsistentAgeBlock, meclatencymeansKW$ConsistentAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#same
t.test(newhyplatencymeansKW$FirstAgeBlock, newmeclatencymeansKW$FirstAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#different
t.test(newhyplatencymeansKW$ConsistentAgeBlock, newmeclatencymeansKW$ConsistentAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#same
#wilcox.test(hyplatencymeansKW$FirstAgeBlock, meclatencymeansKW$FirstAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#wilcox.test(hyplatencymeansKW$ConsistentAgeBlock, meclatencymeansKW$ConsistentAgeBlock, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# EMBRYO SIZE ANALYSIS
bothsize <- rbind(latencymeansKW.df$Embryosizefirst, latencymeansKW.df$Embryosizelast)
t.test(hyplatencymeansKW$Embryosizefirst, meclatencymeansKW$Embryosizefirst, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
t.test(hyplatencymeansKW$Embryosizelast, meclatencymeansKW$Embryosizelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
bothsize <- rbind(latencymeansKW.df$Embryosizefirst, latencymeansKW.df$Embryosizelast)
t.test(newhyplatencymeansKW$Embryosizefirst, newmeclatencymeansKW$Embryosizefirst, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
t.test(newhyplatencymeansKW$Embryosizelast, newmeclatencymeansKW$Embryosizelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# DEV STAGE ANALYSIS
library(exactRankTests)
wilcox.exact(hyplatencymeansKW$Devstagefirst, meclatencymeansKW$Devstagefirst, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#first stage - with correction factor (V=0, P=0.0009766)
wilcox.exact(hyplatencymeansKW$Devstagelast, meclatencymeansKW$Devstagelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#last stage - with correction factor (V=0, P=0.0009766)
wilcox.exact(newhyplatencymeansKW$Devstagefirst, newmeclatencymeansKW$Devstagefirst, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#first stage - with correction factor (V=0, P=0.0009766)
wilcox.exact(newhyplatencymeansKW$Devstagelast, newmeclatencymeansKW$Devstagelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
#last stage - with correction factor (V=0, P=0.0009766)
wilcox.exact(hyplatencymeansKW$Devstagefirst, meclatencymeansKW$Devstagefirst, mu = 0, alt="two.sided", paired = T, correct=F, conf.int=T, conf.level=0.99)
wilcox.exact(hyplatencymeansKW$Devstagelast, meclatencymeansKW$Devstagelast, mu = 0, alt="two.sided", paired = T, correct=F, conf.int=T, conf.level=0.99)
#same without correction factor
wilcox.test(hyplatencymeansKW$Devstagefirst, meclatencymeansKW$Devstagefirst, mu = 0, alt="two.sided", paired = T, correct=F, conf.int=T, conf.level=0.99)
wilcox.test(hyplatencymeansKW$Devstagelast, meclatencymeansKW$Devstagelast, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
### "Stage at the onset of hatching was quite consistent under hypoxia and more variable in response to the mechanosensory cue (Fig. 3B; Levene’s test, F1,20 = 15.116, P = 0.0009). "
# load leveneTest function
library(car)
# run the levene test centered around the mean
leveneTest(latencymeansKW.df$Devstagefirst, latencymeansKW.df$Stimulus, center=mean)
#data frame with two columns, height (in inches) and sex (Male or Female)
#and I want to run levene's test to see if the variance is the same for
#Male and Female height.
leveneTest(newlatencymeansKW.df$Devstagefirst, newlatencymeansKW.df$Stimulus, center=mean)
#### "However the lag time from first until consistent hatching did not differ between stimulus types
#### (t10 = 1.047, P = 0.32)."
latencymeansKW.df$AgeLag<-latencymeansKW.df$ConsistentAgeBlock - latencymeansKW.df$FirstAgeBlock
meclatencymeansKW$AgeLag<-meclatencymeansKW$ConsistentAgeBlock - meclatencymeansKW$FirstAgeBlock
hyplatencymeansKW$AgeLag<-hyplatencymeansKW$ConsistentAgeBlock - hyplatencymeansKW$FirstAgeBlock
hist(latencymeansKW.df$AgeLag)
hist(meclatencymeansKW$AgeLag)
hist(hyplatencymeansKW$AgeLag)
# if want non-parametric
wilcox.test(meclatencymeansKW$AgeLag, hyplatencymeansKW$AgeLag, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# currently reporting parametric test
t.test(meclatencymeansKW$AgeLag, hyplatencymeansKW$AgeLag, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
# NEW ##
#### "However the lag time from first until consistent hatching did not differ between stimulus types
#### (t10 = 1.047, P = 0.32)."
newlatencymeansKW.df$AgeLag<-newlatencymeansKW.df$ConsistentAgeBlock - newlatencymeansKW.df$FirstAgeBlock
newmeclatencymeansKW$AgeLag<-newmeclatencymeansKW$ConsistentAgeBlock - newmeclatencymeansKW$FirstAgeBlock
newhyplatencymeansKW$AgeLag<-newhyplatencymeansKW$ConsistentAgeBlock - newhyplatencymeansKW$FirstAgeBlock
hist(newlatencymeansKW.df$AgeLag)
hist(newmeclatencymeansKW$AgeLag)
hist(newhyplatencymeansKW$AgeLag)
# currently reporting parametric test
t.test(newmeclatencymeansKW$AgeLag, newhyplatencymeansKW$AgeLag, mu = 0, alt="two.sided", paired = T, conf.int=T, conf.level=0.99)
|
ea3c67ac2b79bf64ace3f178548a1c39269bc50e | 643248857926aa16523e6b941cbe73e1bf9cf2c8 | /Temp/ktemp.R | 7ade0ebdd340e223a401c5de0baf346e36990da2 | [] | no_license | ksrikanthcnc/Data-Mining | 004135123e6c6d83d0a84bf99f38c4764f598bf0 | 1fdc62de42f8fb80e0dd2f645737317f5cfdb9fe | refs/heads/master | 2020-03-16T17:56:06.725758 | 2019-05-23T13:58:15 | 2019-05-23T13:58:15 | 132,852,902 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 978 | r | ktemp.R | km1 = kmeans(m, 2, nstart=100)
plot(m, col =(km1$cluster +1) , main="K-Means result with 2 clusters", pch=20, cex=2)
km1$
m
km1
mydata <- m[1:1000,]
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mydata,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares",
main="Assessing the Optimal Number of Clusters with the Elbow Method",
pch=20, cex=2)
km2 = kmeans(m, 6, nstart=100)
plot(m, col =(km2$cluster +1) , main="K-Means result with 6 clusters", pch=20, cex=2)
km2
head(km2)
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")}
wssplot(m)
km2$cluster
plot(pc.comp1, pc.comp2,col=cl$cluster)
|
125ff6d88fe827b5a991acd31f63c5650d34bda7 | 919fd296ac269d455a7d995aeb5b9d918cbfc058 | /lessons/r/shiny/5/ui.r | f202ed7f3dce6c0b816bd8ad659fd4cabc71db91 | [
"Apache-2.0"
] | permissive | aays/studyGroup | 9681427897d30bcddf2162ccdd3b410c4f2cb9e0 | e7d7bb03e70e32c7ca2525ce826e366810d3e9a0 | refs/heads/gh-pages | 2023-04-07T18:18:10.952550 | 2023-02-27T15:45:13 | 2023-02-27T15:45:13 | 171,192,931 | 0 | 0 | Apache-2.0 | 2019-02-18T01:15:35 | 2019-02-18T01:15:34 | null | UTF-8 | R | false | false | 1,075 | r | ui.r | library(shiny) #First load shiny library
load("../pcas.RDATA") #Load data
#Define the overall UI
shinyUI(
#Use a fluid Bootstrap layout
fluidPage(
#Give the page a title
titlePanel("PCA of Metrics"),
#Define page with a sidebar panel and main panel
sidebarLayout(
#Sidebar Panel
sidebarPanel(
#Create drop-down menu to select Variable to plot PCA for
selectInput(inputId='var', label = h3('Variable'),
choices = c(
"Colless" = "colless",
"Species Pool Size" = "numsp",
"Spatially Contiguous" = "spatial"
)
),
#Check box group, contingent upon previous drop-down menu selection
uiOutput(outputId="paramchkbxgrp"),
#Create a slider that will adjust the cex value of the text displayed called "cexSlider".
sliderInput(inputId = "cexSlider", label=h4("Adjust cex"),
min = 0, max=5, value=1),
uiOutput(outputId="sliderX"),
uiOutput(outputId="sliderY")
),
#Create a spot for the plot
mainPanel(
plotOutput(outputId="pcaplot", width="500px",height="500px")
)
)
)
)
|
087e68f569fa490e3e406d46cdbd7e9add9a419d | d226838e64a1d55fdaf797893f7468651b725183 | /man/bowtie2Build.Rd | 6510cc04ad49e11631afbe13b5b966d6f988e8a4 | [] | no_license | HenrikBengtsson/aroma.seq | 5fd673cc449d9c3b89daf1125e8cc95556d0641d | 6464f1e5e929c423978cf7dcb11ac7018d179a6d | refs/heads/master | 2021-06-21T13:53:21.618898 | 2021-02-10T02:57:15 | 2021-02-10T02:57:15 | 20,848,327 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,591 | rd | bowtie2Build.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% bowtie2Build.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{bowtie2Build}
\alias{bowtie2Build.default}
\alias{bowtie2Build}
\title{Creates index on reference genome using bowtie2-build}
\description{
Creates index on reference genome using bowtie2-build.
}
\usage{
\method{bowtie2Build}{default}(pathnameFAs, bowtieRefIndexPrefix, optionsVec=NULL, ...,
command="bowtie2-build", verbose=FALSE)
}
\arguments{
\item{pathnameFAs}{A \code{\link[base]{character}} \code{\link[base]{vector}} of FASTA reference files.}
\item{bowtieRefIndexPrefix}{A \code{\link[base]{character}} string specifying the bowtie2
reference index to be built (partial pathname, minus the .*.bt2 suffix).}
\item{optionsVec}{(optional) A named \code{\link[base]{character}} \code{\link[base]{vector}}.}
\item{...}{...}
\item{command}{The name of the external executable.}
\item{verbose}{See \code{\link[R.utils]{Verbose}}.}
}
\section{Support for compressed input files}{
If gzipped FASTA files are used, this function will temporarily decompress
before passing them to the bowtie2-build external software (which only
support non-compressed FASTA files).
}
\section{Known issues}{
The FASTA pathnames must not contain commas.
If detected, this method generates an informative error.
}
\author{Henrik Bengtsson, Taku Tokuyasu}
\keyword{internal}
|
c928cef028f878f4899529caf9a7b1660e27e6d5 | c513316dce29b9fb75e8302d1d2c475e180785bb | /man/plot_point_prediction_quality.Rd | ff87feb44c540352a77852114396670eda6502ec | [] | no_license | russelnelson/freelunch | 033f9774c44256d0f464519277851132bb0d4e61 | 59f31e1ae36459269c115c9c2413684c333f839d | refs/heads/master | 2023-03-19T18:27:20.858058 | 2021-03-08T07:05:15 | 2021-03-08T07:05:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,187 | rd | plot_point_prediction_quality.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setup.R
\name{plot_point_prediction_quality}
\alias{plot_point_prediction_quality}
\title{Produces a scatterplot of real parameters (y axis) and estimated parameters (x axis) to quickly
look at quality of point prediction. A perfect method would have all these dots on the 45 degree line
which is highlighted here with a red dashed line.}
\usage{
plot_point_prediction_quality(estimation)
}
\arguments{
\item{estimation}{the output of one of the \code{fit_} or \code{cross_validate_} calls in this package}
}
\value{
A list of ggplots
}
\description{
I find this plot particularly useful to spot partial identifications: when estimation is possible for
a sub-interval of the parameter range
#'
}
\examples{
##generate some fake data where paramone,paramtwo ---> ssone,sswto;
## notice that paramtwo is basically unidentifiable!
paramone<-rnorm(n=5000)
paramtwo<-runif(n=5000,min=2,max=5)
ssone<-2*paramone + rnorm(n=5000)
sstwo<- paramone/paramtwo + rnorm(n=5000)
training_data<-
data.frame(
paramone,
paramtwo,
ssone,
sstwo
)
## this would be the "real" data, what we want to estimate our model with!
testing_data<-data.frame(
ssone=2,
sstwo=0.25
)
### fit a gam
estimation<- fit_gam(training_runs = training_data,
target_runs = training_data,
parameter_colnames = c("paramone","paramtwo"),
summary_statistics_colnames = c("ssone","sstwo"))
## we can check how the prediction match real parameters
## because target_runs were just the training_data again this is showing IN-SAMPLE errors
plot_point_prediction_quality(estimation)
## notice that basically GAM for paramtwo just returns the average (i.e. "I don't know!")
## we can theme it, but we need to map (since this is patchwork output)
## we can do the same plot for cross-validations results; which
## would make it OUT-OF-SAMPLE
cv_results<-
cross_validate_rejection_abc(training_data,ngroup = 5,
parameter_colnames = c("paramone","paramtwo"),
summary_statistics_colnames = c("ssone","sstwo"))
}
|
23202fde0b1a7ca42a3f63e83b107de03901006a | b457ede5c2d4d5065896c612a331b0988a297a30 | /Chevalier_etal_MD962048/figures/makeFigDR6.R | dfb4cd72ba8ee7f2c6cd207396c6c1e8764ffe39 | [] | no_license | mchevalier2/Papers | 6556e6ccd19bd71a11be2865477e436f5f3eb62c | 73d24cda42f86fe1b9bf2ffebe031603260e45d4 | refs/heads/master | 2023-05-27T20:58:51.029066 | 2023-05-19T09:08:27 | 2023-05-19T09:08:27 | 187,044,389 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,039 | r | makeFigDR6.R | ## Figure DR6: Morlet nalysis of pollen diversity
##
OUTPUT_FOLDER=getwd()
s <- readline(prompt=paste0("Where should the figure be saved?\nDefault is current workin directory (",OUTPUT_FOLDER,"): "))
if(s != '') OUTPUT_FOLDER <- s
pkg2install=c()
if (! ("rio" %in% rownames(installed.packages()))) pkg2install=c(pkg2install, 'rio')
if (! ("dplR" %in% rownames(installed.packages()))) pkg2install=c(pkg2install, 'dplR')
if (! ("plot3D" %in% rownames(installed.packages()))) pkg2install=c(pkg2install, 'plot3D')
makePlot <- TRUE
if (length(pkg2install) > 0){
s=''
while (! s %in% c('y', 'yes', 'Y', 'YES', 'n', 'no', 'N', 'NO')){
s <- readline(prompt=paste0("The following are required: ", paste(pkg2install, collapse=', '),". Do you want to install them? [yes/no] "))
}
if(s %in% c('y', 'yes', 'Y', 'YES')){
install.packages(pkg2install)
}else{
print("Script aborded.")
makePlot <- FALSE
}
}
if (makePlot) {
## Calculate the Gaussian density of Probability
## defined by xbar and sigma, at x
gauss=function(x, xbar, sigma){return(1/sqrt(2*pi*sigma**2)*exp(-(x-xbar)**2/sigma**2))}
## Apply the Gaussian smoothing kernel on dat, using sigma
## as a kernel width. xout defines the output axis
gausmooth=function(dat, xout, sigma, interp=TRUE){
yout=rep(NA,length(xout))
for(i in 1:length(xout)){
if((xout[i] >= min(dat[,1]) & xout[i] <= max(dat[,1])) | interp){
yout[i]=sum(dat[,2]*gauss(dat[,1], xout[i], sigma))/sum(gauss(dat[,1], xout[i], sigma))
}
}
return(yout)
}
makeTransparent <- function(..., alpha=0.5) {
if(alpha>1) alpha=1
if(alpha<0) alpha=0
alpha = floor(255*alpha)
newColor = col2rgb(col=unlist(list(...)), alpha=FALSE)
.makeTransparent = function(col, alpha) {
rgb(red=col[1], green=col[2], blue=col[3], alpha=alpha, maxColorValue=255)
}
newColor = apply(newColor, 2, .makeTransparent, alpha=alpha)
return(newColor)
}
MAT=rio::import('https://github.com/mchevalier2/ClimateReconstructions/raw/master/MD96-2048_MAT_01.xlsx', which=2)[1:181,]
POLLEN=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=4)[1:181,-c(2,3)]
POLLENSUM=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=4)[1:181,c(1,2,3)]
ECC=rio::import('https://github.com/mchevalier2/Papers/raw/master/Chevalier_etal_MD962048/data/IndependentRecords.xlsx', which=2)[1:800,c(1,2)]
## Species richness (S)
S <- vegan::specnumber(POLLEN[,-1]) ## rowSums(BCI > 0) does the same... # Richness
## Pielou's evenness
DMG=(S-1) / log(POLLENSUM[,2])
DMG.interp=approx(MAT[,1],DMG, xout=seq(0,790,1))
morlet=dplR::morlet(DMG.interp$y, DMG.interp$x, siglvl=0.99, p2=8.7, dj=0.1)
morletP=log2(morlet$Power)[,ncol(morlet$Power):1]
morletP[morletP < -4] = -4
Signif <- t(matrix(morlet$Signif, dim(morlet$Power)[2], dim(morlet$Power)[1]))
Signif <- morlet$Power/Signif
pdf(paste0(OUTPUT_FOLDER, "/Chevalier_etal_MD962048_FigDR6.pdf"), width=7.54, height=7.54/2, useDingbats=FALSE) ; {
par(ps=7,bg=makeTransparent("white",alpha=0),mar=rep(0,4),cex=1,cex.main=1)
layout(matrix(1:2, ncol=2, byrow=TRUE), width=c(1.2, 0.8), height=1)
COL='black'
COL2='red'
plot.new() ; { ## SSTs
plot.window(xlim=c(-100,900),ylim=range(DMG)+diff(range(DMG))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(MAT[,1], DMG, col=makeTransparent(COL, alpha=1), type='l', cex=0.3)
for(i in seq(16.5,21.5,0.5)) segments(-20,i,-9,i, lwd=0.5, col=COL)
for(i in seq(17,21.5,1)) text(-25,i,i, adj=c(1,0.5), col=COL)
text(-115, min(DMG)+diff(range(DMG))/2, "Margalef's Index", adj=c(0.5,1), srt=90, col=COL, cex=8/7)
rect(-9,min(DMG)-0.02*diff(range(DMG)),809,max(DMG)+0.02*diff(range(DMG)),lwd=0.5)
for(i in seq(0,800,25)){ segments(i,min(DMG)-0.02*diff(range(DMG)),i,min(DMG)-ifelse(i%%50 == 0, 0.03,0.025)*diff(range(DMG)), lwd=0.5) }
for(i in seq(0,800,100)){ text(i,min(DMG)-0.04*diff(range(DMG)), i, cex=1, adj=c(0.5, 1)) }
text(400, min(DMG)-0.1*diff(range(DMG)), 'Age (calendar yr BP x1000)', adj=c(0.5,0.5), cex=8/7)
}
plot.window(xlim=c(-100,900),ylim=range(ECC[,2])+diff(range(ECC[,2]))*c(-0.1,0.02),main='',ylab='',xlab='') ; {
points(ECC, col=makeTransparent(COL2, alpha=1), type='l', cex=0.3)
for(i in seq(0.005,0.05,0.005)) segments(820,i,809,i, lwd=0.5, col=COL2)
for(i in seq(0.005,0.05,0.01)) text(825,i,i, adj=c(0,0.5), col=COL2)
text(920, min(ECC[,2])+diff(range(ECC[,2]))/2, 'Eccentricty', adj=c(0.5,0), srt=90, col=COL2, cex=8/7)
}
}
par(mar=c(3.5,2.2,3,.2))
plot3D::image2D(z=morletP[,1:65],y=rev(morlet$period)[1:65], x=morlet$x, ylim=rev(range(rev(morlet$period)[1:65])), col = plot3D::jet.col(100), cex.axis=7/7, colkey=FALSE, resfac=2, tck=-.013, mgp=c(1.3, .3, 0), las=1, hadj=c(1,1), xlab='Age (calendar yr BP x1000)', ylab='Periods (in thousand of years)', cex.lab=8/7, contour=FALSE, log='y', lwd=1.5)
contour(morlet$x, morlet$period, Signif, levels = 1, labels = morlet$siglvl, drawlabels = FALSE, axes = FALSE, frame.plot = FALSE, add = TRUE, lwd = 1, col = "black")
polygon(c(0,morlet$x, 792,0),c(max(morlet$Scale),2**log2(morlet$coi), max(morlet$period),max(morlet$period)), col=makeTransparent('white', alpha=0.6), lwd=0.2)
plot3D::colkey(side=3, length=0.8, dist=-0.01, lwd=0.1, cex.axis=8/7, clim=range(morletP), col=plot3D::jet.col(100), clab='log2(power)', font.clab=1, line.clab=1.3, adj.clab=0.5, add=TRUE, tck=-0.4, mgp=c(3, .25, 0), lwd.tick=0.7)
} ; dev.off()
}
#-;
|
fc365d90f5db0fef41816ea0657d0de2104a3ec4 | 247946f5456e093a7fe49f57e722477ac9dc010e | /R/signed_rankprod.R | e735e318cfa77e486e3909e6ca0e30a66b97c28c | [
"MIT"
] | permissive | jdreyf/jdcbioinfo | b718d7e53f28dc15154d3a62b67075e84fbfa59b | 1ce08be2c56688e8b3529227e166ee7f3f514613 | refs/heads/master | 2023-08-17T20:50:23.623546 | 2023-08-03T12:19:28 | 2023-08-03T12:19:28 | 208,874,588 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,864 | r | signed_rankprod.R | #' Signed rank products of a two column matrix where larger statistics have stronger rank in the same or opposite direction
#'
#' Signed rank products of a two column matrix. Larger statistics have stronger rank in the same or opposite direction.
#'
#' @param mat Numeric matrix with two columns holding statistics per comparison & rows are analytes.
#' @param same.dirction Logical indicates whether the two ranks should be in the same direction.
#' @inheritParams rankprod
#' @return Data frame with statistics from signed rank products test.
#' @export
signed_rankprod <- function(mat, nsim=1e7-2, same.dirction=FALSE, reorder.rows=TRUE, prefix=NULL, seed=100){
stopifnot(ncol(mat)==2, !is.null(colnames(mat)))
if(nsim > 1e7-2) stop("nsim too large to have enough precision")
rmat <- apply(mat, 2, function(v) {
r <- rank(abs(v))
r[v < 0] <- -r[v < 0]
return(r)
})
rmat <- rmat/nrow(mat)
colnames(rmat) <- paste(gsub("\\..$", "", colnames(mat)), "Signed.Rank.Prop", sep=".")
set.seed(seed)
rmat.sim <- apply(rmat, 2, function(v, nsim) sample(v, size=nsim, replace=TRUE), nsim)
rankprod <- apply(rmat, 1, prod)
rankprod.sim <- apply(rmat.sim, 1, prod)
Fn <- stats::ecdf(c(rankprod.sim, Inf, -Inf))
pval <- Fn(rankprod)
if(same.dirction) {
pval <- 1 - pval
} else {
pval <- 2 * pmin(pval, 1 - pval)
}
fdr <- stats::p.adjust(pval, method="BH")
direction <- rep("", nrow(rmat))
direction[rankprod < 0] <- "Opposite"
direction[rmat[, 1] > 0 & rmat[, 2] > 0] <- "Up"
direction[rmat[, 1] < 0 & rmat[, 2] < 0] <- "Down"
res <- data.frame(rmat, Direction=direction, Signed.Rankprod.p=pval, Signed.Rankprod.FDR=fdr, row.names=rownames(mat))
if(reorder.rows) res <- res[order(res$Signed.Rankprod.p), ]
if(!is.null(prefix)) colnames(res) <- paste(prefix, colnames(res), sep=".")
return(res)
}
|
99c8da987734f7d355b528b4f9f615e9489f30e7 | 98b2af819fda96cdefb326aa24be7a424367369b | /src/5-merge_manual_data.R | c74b59bfbeb343ca3ec974bbd309b679f46a019a | [] | no_license | bgulbis/Risk_Score_Validation | 9eba5a2a09c45a2648da321c8b7bcbbd700b418e | bd708b28c515bf5ed0982fc0b3b12b6fc76e1af5 | refs/heads/master | 2020-05-21T20:24:38.091811 | 2016-11-30T21:56:44 | 2016-11-30T21:56:44 | 65,042,606 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,139 | r | 5-merge_manual_data.R | # merge_manual_data
library(readxl)
library(edwr)
library(tidyverse)
library(stringr)
data.external <- "data/external"
identifiers <- read_data(data.external, "^identifiers") %>%
as.id()
comorbid <- c("Cirrhosis" = "cirrhosis",
"Upper GI bleeding" = "upper_gi_bleed",
"Hepatic failure" = "hepatic_failure",
"Encephalopathy" = "encephalopathy_coma",
"Coma" = "encephalopathy_coma",
"Heart failure" = "chf",
"Chronic restrictive, obstructive, or vascular disease" = "pulmonary",
"Chronic hypoxia" = "hypoxia",
"Hypercapnia" = "hypercapnia",
"Secondary polycythemia" = "polycythemia",
"Pulmonary hypertension" = "pulm_htn",
"Respiratory dependency" = "resp_depend",
"Acute renal failure" = "arf",
"Receiving chronic dialysis" = "chronic_hd",
"Metastatic cancer" = "cancer_mets",
"Immunosuppression" = "immunosuppress",
"Chemotherapy" = "chemo",
"Radiation" = "radiation",
"Long-term or high-dose steroids" = "steroids",
"Leukemia" = "leukemia",
"Multiple myeloma" = "mult_myeloma",
"Lymphoma" = "lymphoma",
"AIDS" = "aids")
manual_data <- read_excel(paste(data.external, "2016-10-23_manual_data.xlsx", sep = "/"),
col_types = c("text", "text", "numeric", "text")) %>%
rename(fin = `Patient ID`,
comorbidity = `Co-morbidity`,
value = Value,
comments = Comments) %>%
mutate(value = if_else(value == 1, TRUE, FALSE, NA),
comorbidity = str_replace_all(comorbidity, comorbid)) %>%
filter(!is.na(fin)) %>%
left_join(identifiers, by = "fin") %>%
select(pie.id, comorbidity, value) %>%
arrange(pie.id, comorbidity, desc(value)) %>%
distinct(pie.id, comorbidity, .keep_all = TRUE)
manual_patients <- distinct(manual_data, pie.id)
saveRDS(manual_data, "data/tidy/manual_data.Rds")
saveRDS(manual_patients, "data/final/manual_patients.Rds")
|
5a1aa32a59c69199f5204986466a7cc88ed11c3e | 57f81f0e33aff4c3d1d074438ffaf6b7b8636ac3 | /man/kmbayes_diagnose.Rd | fef0c8fb8f8aadef220136632a332b6c2f5d5d41 | [] | no_license | yadevi/bkmrhat | 679fdd2551c94a8cb70974c487e2408ce4f30049 | 33117def060cb20b3269470564fd03c753701fee | refs/heads/master | 2023-03-10T10:02:38.341174 | 2021-02-17T17:13:33 | 2021-02-17T17:13:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,173 | rd | kmbayes_diagnose.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diag_funs.R
\name{kmbayes_diagnose}
\alias{kmbayes_diagnose}
\alias{kmbayes_diag}
\title{MCMC diagnostics using rstan}
\usage{
kmbayes_diagnose(kmobj, ...)
kmbayes_diag(kmobj, ...)
}
\arguments{
\item{kmobj}{Either an object from \code{\link[bkmr]{kmbayes}} or
from \code{\link[bkmrhat]{kmbayes_parallel}}}
\item{...}{arguments to \code{\link[rstan]{monitor}}}
}
\description{
Give MCMC diagnostistics from the \code{rstan} package
using the \code{\link[rstan]{Rhat}}, \code{\link[rstan]{ess_bulk}},
and \code{\link[rstan]{ess_tail}} functions. Note that r-hat is only
reported for \code{bkmrfit.list} objects from \code{\link[bkmrhat]{kmbayes_parallel}}
}
\examples{
\donttest{
set.seed(111)
dat <- bkmr::SimData(n = 50, M = 4)
y <- dat$y
Z <- dat$Z
X <- dat$X
set.seed(111)
Sys.setenv(R_FUTURE_SUPPORTSMULTICORE_UNSTABLE="quiet")
future::plan(strategy = future::multiprocess)
fitkm.list <- kmbayes_parallel(nchains=2, y = y, Z = Z, X = X, iter = 1000,
verbose = FALSE, varsel = TRUE)
kmbayes_diag(fitkm.list)
kmbayes_diag(fitkm.list[[1]]) # just the first chain
closeAllConnections()
}
}
|
0342678e33b503d005d4d61199d6b3860463a153 | 18f8d1bbc50f09d048297c7685f2c32be1598a76 | /man/NSBM.estimate.Rd | 0731ca9e9e6aad0323ffa191d769a674c620d716 | [] | no_license | cran/randnet | d349ef02db7a200d9562c1d221147744e5c7636b | 9dcbcd5ef79d868a830a4a4be8d86507439e363e | refs/heads/master | 2023-06-02T07:27:44.475063 | 2023-05-20T06:30:02 | 2023-05-20T06:30:02 | 103,827,813 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,561 | rd | NSBM.estimate.Rd | \name{NSBM.estimate}
\alias{NSBM.estimate}
\title{
estimates nomination SBM parameters given community labels by the method of moments
}
\description{
estimates NSBM parameters given community labels
}
\usage{
NSBM.estimate(A,K,g,reg.bound=-Inf)
}
\arguments{
\item{A}{
adjacency matrix of a directed where Aij = 1 iff i -> j
}
\item{K}{
number of communities
}
\item{g}{
a vector of community labels
}
\item{reg.bound}{
the regularity lower bound of lambda value. By default, -Inf. That means, no constraints. When the network is sparse, using certain constaints may improve stability.
}
}
\details{
The method of moments is used for estimating the edge nomination SBM, so the strategy can be used for both unweighted and weighted networks. The details can be found in Li et. al. (2020).
}
\value{
a list of
\item{B }{estimated block connection probability matrix}
\item{lambda }{estimated lambda values for nomination intensity}
\item{theta }{estimated theta values for nomination preference}
\item{P.tilde }{estimated composiste probability matrix after nomination}
\item{g }{community labels}
}
\references{
T. Li, E. Levina, and J. Zhu. Community models for networks observed through edge nominations. arXiv preprint arXiv:2008.03652 (2020).
}
\author{
Tianxi Li, Elizaveta Levina, Ji Zhu\cr
Maintainer: Tianxi Li \email{tianxili@virginia.edu}
}
\seealso{
\code{\link{SBM.estimate}}
}
\examples{
dt <- NSBM.Gen(n=200,K=2,beta=0.2,avg.d=10)
A <- dt$A
sc <- RightSC(A,K=3)
est <- NSBM.estimate(A,K=3,g=sc$cluster)
}
\keyword{ NSBM}
|
0745dcc8f5da88753a151742eb6265f4115a89ca | a85b5937213fdb7f82ca9db40346c90df9efa017 | /code/codeAttachment/hcGap.R | ec81e3295651062129b0b8c4c733f128214d098d | [] | no_license | simonlehmannknudsen/Parameterless-clustering-by-dynamic-tree-cutting | 17524465d545f0b39609fd8ee0bf7b9fb8e576b9 | 57289f56f9a8706e4320dc2f18276652b79901ab | refs/heads/master | 2021-09-05T23:27:14.940873 | 2018-01-31T16:10:48 | 2018-01-31T16:10:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,122 | r | hcGap.R | setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
library(transclustr)
library(ggplot2)
library(plyr)
library(cluster)
library(gtools)
library(ClusterR)
library(stringi)
require(graphics)
source("utilities.R")
source("randomization.R")
# NOTE!
# The Hierarchical Clustering approach is divisive.
# The information for the dendrogram is converted into agglomerative, thus being able to use the plotting defined for a hclust() object
# Required to make a hclust() object, doing a clustering with the method, and reassigning the variables on the object.
####################################################################################################
# Hierarchical Method
####################################################################################################
# Returns a hclust() object: ?hclust
# simMatrix: Similarity matrix
# proteins: All protein names
# step: The steps of thresholds used for clustering
# minSplit: Minimum number of splits when doing a binary search
# maxSplit: Maximum number of splits when doing a binary search
# minTreshold: The starting threshold, gets incremented by 'step'
# maxThreshold: The maximum threshold where all clusters are singletons, max(simMatrix) + 1
# binarySearch: TRUE will enable the binary search
hclustDivisiveGap <- function(simMatrix, proteins, step = 1, minSplit = 2, maxSplit = 10, minThreshold, maxThreshold, binarySearch = FALSE, GAP = FALSE, dimensions = 5, seed = 42, randomAp = 4) {
if (binarySearch && step == 1) { stop("Binary search requires step >= 2.") }
dfGap <- data.frame(costOriginal = integer(0), costRandom = integer(0), threshold = integer(0), numberOfProteins = integer(0))
# List of all clusters that needs to be clustered with tclust
# cid: Cluster id
# proteins: Proteins in the given cluster
# startIndex, endIndex: The range in 'order' for the proteins belonging to this cluster.
# height: The height where the children clusters would be merged into this cluster. Meaning that this cluster is split into its children at height-1.
# nextThreshold: Next threshold to run tclust on this cluster.
proteinLabelsOfEachCluster <- list(c1 = list(cid = "c1", proteins = c(proteins), startIndex = 1, endIndex = length(proteins), height = -1, parent = "", nextThreshold = minThreshold))
# Total number of clusters which has occured during the run. (Includes singleton clusters). Used to get proper cid on all clusters.
amountOfTotalClusters <- 1
# Holds all information for every cluster during the run. totalClusters$c1 is information about the first cluster. Required to make the merge ordering for the dendrogram.
totalClusters <- proteinLabelsOfEachCluster
countSplits <- 0 # Total number of splits for the entire run
countSingletons <- 0 # Equals the amount of proteins when run is done
charC <- "c" # used to make var cid
# Variables used to create the dendrogram with a hclust() object
mergeMatrix <- matrix(0, nrow = 0, ncol = 2)
merge <- list() # Holds the order of all splits. merge[[1]] returns which clusters was made from the first split.
height <- c() # maxThreshold - threshold
order <- proteins # Holds the ordered proteins for the dendrogram
labels <- proteins # The labels of all proteins
while (countSingletons < length(proteins)) {
clustersLargerThanOne <- list() # All clusters returned from this iteration of tclust(Not singletons). These will be added to 'proteinLabelsOfEachCluster' to be clustered in the next iteration.
# For every level: Do tclust on all clusters with current threshold
if (length(proteinLabelsOfEachCluster) > 0) {
for (i in 1:length(proteinLabelsOfEachCluster)) {
currentStep <- step
currentTclustCluster <- proteinLabelsOfEachCluster[[i]]# Cluster to tclust
print(paste0("Clustering ", currentTclustCluster$cid, " with ", length(currentTclustCluster$proteins), " proteins | threshold = ", currentTclustCluster$nextThreshold))
if (length(currentTclustCluster$proteins) > 1) { # Not a singleton. This check might not be necessary, as we dont add singletons for the next iteration
if (length(proteins) != length(currentTclustCluster$proteins)) { # We need to match the similarity matrix to the proteins in the cluster
simMatrixTemp <- simMatrix[currentTclustCluster$proteins, currentTclustCluster$proteins]
}
else { # Initial cluster
simMatrixTemp <- simMatrix
}
# tclustResultDataFrame <- clusteringWithTclust(simMatrixTemp, currentTclustCluster$proteins, currentTclustCluster$nextThreshold)
tclustResult <- tclust(simmatrix = simMatrixTemp, convert_dissimilarity_to_similarity = FALSE, threshold = currentTclustCluster$nextThreshold)
tclustResultCost <- tclustResult$costs
tclustResultDataFrame <- data.frame(protein = currentTclustCluster$proteins, cluster = tclustResult$clusters[[1]])
tclustResultDataFrame$cluster = tclustResultDataFrame$cluster + 1
amountOfClustersInTclustResult <- length(table(tclustResultDataFrame$cluster))
# Get labels for each new cluster
fallBack <- FALSE # If binary search did not minimize the number of splits, reset to the initial threshold/result
if (amountOfClustersInTclustResult > 1) {
### BEGIN binary search
if (binarySearch && amountOfClustersInTclustResult >= maxSplit) {
tempTclustResultDataFrame <- tclustResultDataFrame
tempTclustResultCost <- tclustResultCost
tempAmountOfClustersInTclustResult <- amountOfClustersInTclustResult
upperBound <- step
lowerBound <- 0
while(amountOfClustersInTclustResult >= maxSplit) {
currentStep <- lowerBound + ceiling(abs(upperBound - lowerBound) / 2)
# tclustResultDataFrame <- clusteringWithTclust(simMatrixTemp, currentTclustCluster$proteins, currentTclustCluster$nextThreshold - step + currentStep)
tclustResult <- tclust(simmatrix = simMatrixTemp, convert_dissimilarity_to_similarity = FALSE, threshold = (currentTclustCluster$nextThreshold - step + currentStep))
tclustResultCost <- tclustResult$costs
tclustResultDataFrame <- data.frame(protein = currentTclustCluster$proteins, cluster = tclustResult$clusters[[1]])
tclustResultDataFrame$cluster = tclustResultDataFrame$cluster + 1
amountOfClustersInTclustResult <- length(table(tclustResultDataFrame$cluster))
while (amountOfClustersInTclustResult < minSplit && currentStep > 1 && currentStep != step) {
lowerBound <- currentStep
currentStep <- lowerBound + ceiling(abs(upperBound - lowerBound) / 2)
if (currentStep == upperBound) { # Did not find a better threshold
fallBack = TRUE
break
}
# tclustResultDataFrame <- clusteringWithTclust(simMatrixTemp, currentTclustCluster$proteins, currentTclustCluster$nextThreshold - step + currentStep)
tclustResult <- tclust(simmatrix = simMatrixTemp, convert_dissimilarity_to_similarity = FALSE, threshold = (currentTclustCluster$nextThreshold - step + currentStep))
tclustResultCost <- tclustResult$costs
tclustResultDataFrame <- data.frame(protein = currentTclustCluster$proteins, cluster = tclustResult$clusters[[1]])
tclustResultDataFrame$cluster = tclustResultDataFrame$cluster + 1
amountOfClustersInTclustResult <- length(table(tclustResultDataFrame$cluster))
}
if (amountOfClustersInTclustResult >= maxSplit && (upperBound - lowerBound == 1)) {
fallBack = TRUE
}
if (fallBack) {
break
}
if (amountOfClustersInTclustResult >= maxSplit) { # currentStep did not descrease the amount of clusters enough
# Decrease upperBound
upperBound <- currentStep
}
}
if (fallBack) { # Resetting back to first result
if ((amountOfClustersInTclustResult > tempAmountOfClustersInTclustResult) ||
(amountOfClustersInTclustResult == 1 && (amountOfClustersInTclustResult < tempAmountOfClustersInTclustResult))) {
# Either the first tclustResult was better, or the first tclustResult gave the only split
tclustResultDataFrame <- tempTclustResultDataFrame
tclustResultCost <- tempTclustResultCost
amountOfClustersInTclustResult <- tempAmountOfClustersInTclustResult
}
}
}
### END binary search
if (GAP && length(currentTclustCluster$proteins) - 1 >= dimensions) {
threshold <- currentTclustCluster$nextThreshold - step + currentStep
if (randomAp == 3) {
simMatrixRandom <- buildRandomSimMatrixAp3(currentTclustCluster$proteins, simMatrixTemp, k = dimensions, seed = seed)
}
if (randomAp == 4) {
simMatrixRandom <- buildRandomSimMatrixAp4(currentTclustCluster$proteins, simMatrixTemp, k = dimensions, seed = seed)
}
tclustResultRandom <- tclust(simmatrix = simMatrixRandom, convert_dissimilarity_to_similarity = FALSE, threshold = threshold)
tclustResultRandomCost <- tclustResultRandom$costs
df <- data.frame(costOriginal = tclustResultCost, costRandom = tclustResultRandomCost, threshold = threshold, numberOfProteins = length(currentTclustCluster$proteins))
dfGap <- rbind(dfGap, df)
}
tempProteinLabelsOfEachCluster <- getProteinLabelsFromClustering(tclustResultDataFrame)
for (j in 1:amountOfClustersInTclustResult) { # Update height on cluster object
tempProteinLabelsOfEachCluster[[j]]$height <- maxThreshold - (currentTclustCluster$nextThreshold - step + currentStep - 1)
amountOfTotalClusters <- amountOfTotalClusters + 1
cid <- paste0(charC, amountOfTotalClusters)
tempProteinLabelsOfEachCluster[[j]]$cid <- cid
tempProteinLabelsOfEachCluster[[j]]$parent <- currentTclustCluster$cid
nextThreshold <- currentTclustCluster$nextThreshold + currentStep
if (nextThreshold > maxThreshold) {
nextThreshold <- maxThreshold
}
tempProteinLabelsOfEachCluster[[j]]$nextThreshold <- nextThreshold
}
}
else { # Same cluster as currentTclustCluster, no split occured
tempProteinLabelsOfEachCluster <- getProteinLabelsFromClustering(tclustResultDataFrame)
tempProteinLabelsOfEachCluster[[1]]$height <- currentTclustCluster$height
tempProteinLabelsOfEachCluster[[1]]$cid <- currentTclustCluster$cid
tempProteinLabelsOfEachCluster[[1]]$parent <- currentTclustCluster$parent
nextThreshold <- currentTclustCluster$nextThreshold + currentStep
if (nextThreshold > maxThreshold) {
nextThreshold <- maxThreshold
}
tempProteinLabelsOfEachCluster[[1]]$nextThreshold <- nextThreshold
}
# Update order
currentStartIndex <- currentTclustCluster$startIndex # Set start of first new cluster = start of parent cluster
for (k in 1:amountOfClustersInTclustResult) { # For every new cluster in parent cluster
labelsK <- tempProteinLabelsOfEachCluster[[k]]$proteins
lengthLabelsK <- length(labelsK)
startLabelsK <- currentStartIndex
endLabelsK <- currentStartIndex + lengthLabelsK - 1
tempProteinLabelsOfEachCluster[[k]]$startIndex <- startLabelsK
tempProteinLabelsOfEachCluster[[k]]$endIndex <- endLabelsK
for (n in 1:lengthLabelsK) { # Update positions in order
order[startLabelsK + n - 1] <- tempProteinLabelsOfEachCluster[[k]]$proteins[n]
}
# Done with labelsK, update currentStartIndex so start at labelsK.endIndex + 1
currentStartIndex <- endLabelsK + 1
}
# Add clusters for next iteration
if (amountOfClustersInTclustResult == 1) {
tempCurrentCluster <- tempProteinLabelsOfEachCluster[[1]]
clustersLargerThanOne <- c(clustersLargerThanOne, list(tempCurrentCluster))
}
else {
height <- c(height, c(maxThreshold - (currentTclustCluster$nextThreshold - step + currentStep - 1)))
countSplits <- countSplits + 1
currentMerge <- c()
for (j in 1:amountOfClustersInTclustResult) { # For each new cluster
tempCurrentCluster <- tempProteinLabelsOfEachCluster[[j]]
# Update totalClusters
tempCurrentClusterList <- list(tempCurrentCluster)
names(tempCurrentClusterList) <- tempCurrentCluster$cid
totalClusters <- c(totalClusters, tempCurrentClusterList)
currentMerge <- c(currentMerge, c(tempCurrentCluster$cid))
if (length(tempCurrentCluster$proteins) == 1) { # singleton found
countSingletons <- countSingletons + 1
}
if (length(tempCurrentCluster$proteins) > 1) { # Add cluster for next iteration
clustersLargerThanOne <- c(clustersLargerThanOne, list(tempCurrentCluster)) # We lose name at this line
}
}
merge <- c(merge, list(currentMerge))
}
}
}
}
# All clusters to tclust on next iteration
proteinLabelsOfEachCluster <- clustersLargerThanOne
}
singletons <- c()
clusters <- c()
for (i in 1:length(totalClusters)) {
if (length(totalClusters[[i]]$proteins) == 1) {singletons <- c(singletons, totalClusters[[i]]$cid)}
else {clusters <- c(clusters, totalClusters[[i]]$cid)}
}
####################################################################################################
# hc$merge
####################################################################################################
mergeMatrix <- matrix(0, nrow = 0, ncol = 2) # Negative values are singletons. Positive values are clusters, where the value corresponds to the row at which this cluster came from.
mergeLookUpList <- list() # Holds the row numbers for 'merge' for a given merge. mergeLookUpList$c1 returns the row in 'merge' where cluster c1 was made.
mergeHeights <- c() # Holds the height for all merges
# 'merge' in ascending order corresponds to the ordering of the splits (Divisive)
# 'merge' in descending order corresponds to the ordering of the merges (Agglomerative)
# The hierarchical clustering is done divisive, but in order to use the plotting function for a 'hclust() object' we need to see it as agglomerative.
for (i in length(merge):1) { # For all parents
parent <- ""
for (j in 2:length(merge[[i]])) { # For all children of parent i
# length(merge[[i]]) == 2, A two-split occured -> merging 2 clusters into one
# length(merge[[i]]) > 2, split resulted in more than 2 clusters -> merging multiple cluster into one
mergeHeights <- c(mergeHeights, height[i])
if (j == 2) {
c1 <- merge[[i]][j-1]
c2 <- merge[[i]][j]
c1.isSingleton <- length(totalClusters[[c1]]$proteins) == 1
c2.isSingleton <- length(totalClusters[[c2]]$proteins) == 1
c1.isCluster <- !c1.isSingleton
c2.isCluster <- !c2.isSingleton
parent <- totalClusters[[c1]]$parent
if (c1.isSingleton && c2.isSingleton) { # (s,s), on first iteration there are no clusters, only singletons
rowLabelsC1 <- - which(labels == totalClusters[[c1]]$proteins[1])
rowLabelsC2 <- - which(labels == totalClusters[[c2]]$proteins[1])
mergeMatrix <- rbind(mergeMatrix, c(rowLabelsC1, rowLabelsC2))
}
if (c1.isCluster && c2.isSingleton) { # (c,s)
mergeMatrixRowOfC1 <- mergeLookUpList[[c1]]
rowLabelsC2 <- - which(labels == totalClusters[[c2]]$proteins[1])
mergeMatrix <- rbind(mergeMatrix, c(mergeMatrixRowOfC1, rowLabelsC2))
}
if (c1.isSingleton && c2.isCluster) { # (s,c)
mergeMatrixRowOfC2 <- mergeLookUpList[[c2]]
rowLabelsC1 <- - which(labels == totalClusters[[c1]]$proteins[1])
mergeMatrix <- rbind(mergeMatrix, c(rowLabelsC1, mergeMatrixRowOfC2))
}
if (c1.isCluster && c2.isCluster) { # (c,c)
mergeMatrixRowOfC1 <- mergeLookUpList[[c1]]
mergeMatrixRowOfC2 <- mergeLookUpList[[c2]]
mergeMatrix <- rbind(mergeMatrix, c(mergeMatrixRowOfC1, mergeMatrixRowOfC2))
}
}
if (j > 2) { # Merge next element with cluster from previous line in mergeMatrix
c1 <- nrow(mergeMatrix)
c2 <- merge[[i]][j]
c1.isCluster <- TRUE # cluster from previous line in mergeMatrix. This will always be a cluster, since we handle merging of 2 singletons above(j == 2, initalize run)
c2.isSingleton <- length(totalClusters[[c2]]$proteins) == 1
c2.isCluster <- !c2.isSingleton
if (c1.isCluster && c2.isSingleton) {
mergeMatrixRowOfC1 <- c1
rowLabelsC2 <- - which(labels == totalClusters[[c2]]$proteins[1])
mergeMatrix <- rbind(mergeMatrix, c(mergeMatrixRowOfC1, rowLabelsC2))
}
if (c1.isCluster && c2.isCluster) {
mergeMatrixRowOfC1 <- c1
mergeMatrixRowOfC2 <- mergeLookUpList[[c2]]
if (is.null(mergeMatrixRowOfC2)) {
stop("j > 2, c1 = c2 = isCluster. c2 was not found in mergeLookUpList. All clusters must be singletons at this state, otherwise this error can occur.")
}
mergeMatrix <- rbind(mergeMatrix, c(mergeMatrixRowOfC1, mergeMatrixRowOfC2))
}
}
}
# Update mergeLookUpList
newCluster <- list(nrow(mergeMatrix)) # Make a counter, this is too many ops
names(newCluster) <- parent # cid for parent. The children have been merged into its parent.
mergeLookUpList <- c(mergeLookUpList, newCluster)
}
# Can't plot without making a hclust() object and reassign
hc <- hclust(dist(USArrests), "ave")
hc$merge <- mergeMatrix
hc$height <- mergeHeights
hc$order <- order
hc$labels <- labels
# returnList <- list(hc = hc, gap = gapList)
returnList <- list(hc = hc, gap = dfGap)
return(returnList)
} |
4db2d249ab0e89f743eb8658e94234e6a8457211 | 20f7c5c60c635e2839d71dd88da9d57741d7daf1 | /man/cube.Rd | 2c1309eb0a1f92f429c75b0a364cb8119ace4961 | [
"MIT"
] | permissive | nuno-agostinho/poweR | 8aa0c3ffa35b2eabf408f540f85543ba2107b45d | a51d04e9748cd1644be2878ea7f189da8b46397f | refs/heads/master | 2023-06-20T21:29:27.323702 | 2021-07-23T11:03:14 | 2021-07-23T11:03:14 | 388,748,802 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 311 | rd | cube.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cube.R
\name{cube}
\alias{cube}
\title{Calculate cube of a number}
\usage{
cube(x)
}
\arguments{
\item{x}{Numeric}
}
\value{
Number
}
\description{
In other words, return the power of three for a given number.
}
\examples{
cube(5)
}
|
e913e6a0c3e1aff6930d7cdc478a645c1e3213ea | 03dcc7edef3ea915ae4da30b24d78b3a982f6909 | /ui.R | e54967bbda0d406d3db84717de628ca369e1141c | [] | no_license | camfin/camfin1 | 358557cc70b8283240172816979cda3b50db13d0 | 224dab443759ff8ef8caff74b22845735c8e13d9 | refs/heads/master | 2021-01-10T13:27:46.320316 | 2016-02-04T08:41:36 | 2016-02-04T08:41:36 | 50,800,653 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,829 | r | ui.R | shinyUI(pageWithSidebar(
headerPanel("Campaign Finance Contributions for 2015-2016 Election Cycle through December 2015"),
sidebarPanel(
width = 2,
textInput('name', 'Search NAME', value = '') ,
textInput('city', 'Search CITY', value = '') ,
textInput('state', 'Search STATE', value = '') ,
textInput('employer', 'Search EMPLOYER', value = '') ,
textInput('cmte_nm', 'Search CMTE_NM', value = '') ,
textInput('prty', 'Search PRTY', value = '') ,
textInput('candidate', 'Search CANDIDATE', value = 'CLINTON') ,
textInput('occupation', 'Search OCCUPATION', value = '') ,
radioButtons("xsort", "Sort by",
choices = c("LAST_DATE (ASC)","LAST_DATE (DESC)","TOTAL_CONTRIB","N_CONTRIB"), selected = c("TOTAL_CONTRIB"), inline = TRUE),
checkboxGroupInput("xshow", "Show",
choice = c("NAME","CITY","STATE","EMPLOYER","CMTE_NM","PRTY","CANDIDATE","OCCUPATION","LAST_DATE","TOTAL_CONTRIB","N_CONTRIB"),
selected = c("NAME","CITY","STATE", "CMTE_NM","PRTY","CANDIDATE", "LAST_DATE","TOTAL_CONTRIB","N_CONTRIB"),
inline = TRUE),
textInput('colwidth', 'Maximum Column Width', value = '40') ,
textInput('totwidth', 'Maximum Total Width', value = '240') ,
textInput('totrows', 'Maximum Total Rows', value = '900') ),
mainPanel(
div(
tabsetPanel(
tabPanel("Output",
width = 10,
verbatimTextOutput('myText')
),
tabPanel("Usage",
width = 10,
includeMarkdown("camfin.Rmd")
)
)
),
width = 10)
)
) |
b1e0b2057dcff2350d79a6180100566fb3a8e1f6 | 218e33874d0352a4ad9e96bf9c362246883b5d9e | /man/hqreg.Rd | 2ba5c20f7289639899e39d1ecb2b39f319fc5c40 | [] | no_license | Sandy4321/hqreg | 0a53c892687b5dece5a3adf61490ccbc6bcfcba2 | 9ce7a756cccc446b2aa396f33bbaa1a9d07f6d56 | refs/heads/master | 2022-02-16T13:33:29.110672 | 2019-08-18T06:23:47 | 2019-08-18T06:23:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,388 | rd | hqreg.Rd | \name{hqreg}
\alias{hqreg}
\title{Fit a robust regression model with Huber or quantile loss penalized by lasso or elasti-net}
\description{Fit solution paths for Huber loss regression or quantile regression penalized
by lasso or elastic-net over a grid of values for the regularization parameter lambda.}
\usage{
hqreg(X, y, method = c("huber", "quantile", "ls"),
gamma = IQR(y)/10, tau = 0.5, alpha = 1, nlambda = 100, lambda.min = 0.05, lambda,
preprocess = c("standardize", "rescale"), screen = c("ASR", "SR", "none"),
max.iter = 10000, eps = 1e-7, dfmax = ncol(X)+1, penalty.factor = rep(1, ncol(X)),
message = FALSE)
}
\arguments{
\item{X}{Input matrix.}
\item{y}{Response vector.}
\item{method}{The loss function to be used in the model. Either "huber" (default),
"quantile", or "ls" for least squares (see \code{Details}).}
\item{gamma}{The tuning parameter of Huber loss, with no effect for the other loss
functions. Huber loss is quadratic for absolute values less than gamma and linear for those
greater than gamma. The default value is IQR(y)/10.}
\item{tau}{The tuning parameter of the quantile loss, with no effect for the other loss
functions. It represents the conditional quantile of the response to be estimated, so
must be a number between 0 and 1. It includes the absolute loss when tau = 0.5 (default).}
\item{alpha}{The elastic-net mixing parameter that controls the relative contribution
from the lasso and the ridge penalty. It must be a number between 0 and 1. \code{alpha=1}
is the lasso penalty and \code{alpha=0} the ridge penalty.}
\item{nlambda}{The number of lambda values. Default is 100.}
\item{lambda.min}{The smallest value for lambda, as a fraction of lambda.max, the data
derived entry value. Default is 0.05.}
\item{lambda}{A user-specified sequence of lambda values. Typical usage is to leave
blank and have the program automatically compute a \code{lambda} sequence based on
\code{nlambda} and \code{lambda.min}. Specifying \code{lambda} overrides this. This
argument should be used with care and supplied with a decreasing sequence instead of
a single value. To get coefficients for a single \code{lambda}, use \code{coef} or
\code{predict} instead after fitting the solution path with \code{hqreg} or performing
k-fold CV with \code{cv.hqreg}.}
\item{preprocess}{Preprocessing technique to be applied to the input. Either
"standardize" (default) or "rescale"(see \code{Details}). The coefficients
are always returned on the original scale.}
\item{screen}{Screening rule to be applied at each \code{lambda} that discards variables
for speed. Either "ASR" (default), "SR" or "none". "SR" stands for the strong rule,
and "ASR" for the adaptive strong rule. Using "ASR" typically requires fewer iterations
to converge than "SR", but the computing time are generally close. Note that the option
"none" is used mainly for debugging, which may lead to much longer computing time.}
\item{max.iter}{Maximum number of iterations. Default is 10000.}
\item{eps}{Convergence threshold. The algorithms continue until the maximum change in the
objective after any coefficient update is less than \code{eps} times the null deviance.
Default is \code{1E-7}.}
\item{dfmax}{Upper bound for the number of nonzero coefficients. The algorithm exits and
returns a partial path if \code{dfmax} is reached. Useful for very large dimensions.}
\item{penalty.factor}{A numeric vector of length equal to the number of variables. Each
component multiplies \code{lambda} to allow differential penalization. Can be 0 for
some variables, in which case the variable is always in the model without penalization.
Default is 1 for all variables.}
\item{message}{If set to TRUE, hqreg will inform the user of its progress. This argument
is kept for debugging. Default is FALSE.}
}
\details{
The sequence of models indexed by the regularization parameter \code{lambda} is fit
using a semismooth Newton coordinate descent algorithm. The objective function is defined
to be \deqn{\frac{1}{n} \sum loss_i + \lambda\textrm{penalty}.}{\sum loss_i /n + \lambda*penalty.}
For \code{method = "huber"},
\deqn{loss(t) = \frac{t^2}{2\gamma} I(|t|\le \gamma) + (|t| - \frac{\gamma}{2};) I(|t|>
\gamma)}{loss(t) = t^2/(2*\gamma) I(|t|\le \gamma) + (|t| - \gamma/2) I(|t|>\gamma);}
for \code{method = "quantile"}, \deqn{loss(t) = t (\tau - I(t<0));}
for \code{method = "ls"}, \deqn{loss(t) = \frac{t^2}{2}}{loss(t) = t^2/2.}
In the model, "t" is replaced by residuals.
The program supports different types of preprocessing techniques. They are applied to
each column of the input matrix \code{X}. Let x be a column of \code{X}. For
\code{preprocess = "standardize"}, the formula is
\deqn{x' = \frac{x-mean(x)}{sd(x)};}{x' = (x-mean(x))/sd(x);}
for \code{preprocess = "rescale"},
\deqn{x' = \frac{x-min(x)}{max(x)-min(x)}.}{x' = (x-min(x))/(max(x)-min(x)).}
The models are fit with preprocessed input, then the coefficients are transformed back
to the original scale via some algebra. To fit a model for raw data with no preprocessing, use \code{hqreg_raw}.
}
\value{
The function returns an object of S3 class \code{"hqreg"}, which is a list containing:
\item{call}{The call that produced this object.}
\item{beta}{The fitted matrix of coefficients. The number of rows is equal to the number
of coefficients, and the number of columns is equal to \code{nlambda}. An intercept is included.}
\item{iter}{A vector of length \code{nlambda} containing the number of iterations until
convergence at each value of \code{lambda}.}
\item{saturated}{A logical flag for whether the number of nonzero coefficients has reached \code{dfmax}.}
\item{lambda}{The sequence of regularization parameter values in the path.}
\item{alpha}{Same as above.}
\item{gamma}{Same as above. \code{NULL} except when \code{method = "huber"}.}
\item{tau}{Same as above. \code{NULL} except when \code{method = "quantile"}.}
\item{penalty.factor}{Same as above.}
\item{method}{Same as above.}
\item{nv}{The variable screening rules are accompanied with checks of optimality
conditions. When violations occur, the program adds in violating variables and re-runs
the inner loop until convergence. \code{nv} is the number of violations.}
}
\references{Yi, C. and Huang, J. (2016)
\emph{Semismooth Newton Coordinate Descent Algorithm for
Elastic-Net Penalized Huber Loss Regression and Quantile Regression},
\url{https://arxiv.org/abs/1509.02957} \cr
\emph{Journal of Computational and Graphical Statistics, accepted in Nov 2016} \cr
\url{http://www.tandfonline.com/doi/full/10.1080/10618600.2016.1256816}}
\author{Congrui Yi <congrui-yi@uiowa.edu>}
\seealso{\code{\link{plot.hqreg}}, \code{\link{cv.hqreg}}}
\examples{
X = matrix(rnorm(1000*100), 1000, 100)
beta = rnorm(10)
eps = 4*rnorm(1000)
y = drop(X[,1:10] \%*\% beta + eps)
# Huber loss
fit1 = hqreg(X, y)
coef(fit1, 0.01)
predict(fit1, X[1:5,], lambda = c(0.02, 0.01))
# Quantile loss
fit2 = hqreg(X, y, method = "quantile", tau = 0.2)
plot(fit2)
# Squared loss
fit3 = hqreg(X, y, method = "ls", preprocess = "rescale")
plot(fit3, xvar = "norm")
}
\keyword{models}
\keyword{regression}
|
79d49d927241793b72db1b575380e0cd871234bb | cacb0d6c51a9dcd9bf24f8b5265910d72ed230ef | /man/estimate.theta.Rd | 92fc051ff64c3789497897bc5b386629fd19558f | [] | no_license | vasuagg/smp | 49fee91a32f101a91cb34ce5d8a81ffa208aa812 | e1b52e0f3d7b1ed71cbdd0bb40d4f36c314bdf01 | refs/heads/master | 2021-01-22T08:18:03.936816 | 2014-01-04T17:43:18 | 2014-01-04T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 804 | rd | estimate.theta.Rd | \name{estimate.theta}
\alias{estimate.theta}
\title{Estimate best estimate of theta.}
\usage{
estimate.theta(in.data, n = max(in.data), v = TRUE)
estimate.theta(in.data, n = max(in.data), v = TRUE)
}
\arguments{
\item{in.data}{input data drawn from binomial
distribution}
\item{n}{number of subunits, default is max(in.data)}
\item{in.data}{input data drawn from binomial
distribution}
\item{n}{number of subunits, default is max(in.data)}
}
\description{
Returns the optimal point estimate of theta for the input
data set. By default, theta is optimized with n set to
the largest value of observed in the data.
Returns the optimal point estimate of theta for the input
data set. By default, theta is optimized with n set to
the largest value of observed in the data.
}
|
019713a933ceef4100360cdc9378df697ea50ac6 | 71c6d3e8051ee850a56ecd42b6bf95a2913618f3 | /R/SweaveTools.R | 3e8ef02befe4b8290a4d41d7fe2b542f2ffd46e8 | [] | no_license | cran/cxxPack | 03d21cde8f98b40931040bcbb23660aefd8bfe89 | 956af335f418a489f226555c550359c7b887d577 | refs/heads/master | 2021-01-11T19:44:10.811111 | 2010-07-21T00:00:00 | 2010-07-21T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 539 | r | SweaveTools.R | # Used to dynamically compile and load C++ functions in a vignette.
loadcppchunk <- function(name, compile=TRUE, logfile="compile.log") {
if(compile) {
# Under Windows this should use Rtools/bin/sh.exe
xstat = system(paste('sh ./makedll.sh ', name, ' "', logfile, '"',sep=""))
if(xstat) {
stop(paste('loadcppchunk() failed for ',name,'\n'))
}
}
dyn.load(paste('./cpp/', name,.Platform$dynlib.ext,sep=""))
}
unloadcppchunk <- function(name) {
dyn.unload(paste('./cpp/', name, .Platform$dynlib.ext,sep=""))
}
|
7354862a19bd2b1c8e9651267e02b869395cfd2e | 712c71892a6edd61227e2c0c58bbc1e9b43893e4 | /man/checkFileHashSource.Rd | 971fe53eed358df1a58825a366ec36a7422318b5 | [] | no_license | gelfondjal/adapr | 130a6f665d85cdfae7730196ee57ba0a3aab9c22 | b85114afea2ba5b70201eef955e33ca9ac2f9258 | refs/heads/master | 2021-01-24T10:20:14.982698 | 2020-01-28T22:56:18 | 2020-01-28T22:56:18 | 50,005,270 | 33 | 3 | null | 2018-10-18T16:09:57 | 2016-01-20T04:48:49 | R | UTF-8 | R | false | true | 891 | rd | checkFileHashSource.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Check_file_hash_source.R
\name{checkFileHashSource}
\alias{checkFileHashSource}
\title{Checks the consistency of the dependency directory with the files within the file system.
Reports the source scripts that need to be updated.}
\usage{
checkFileHashSource(dependency.dir = NULL, dependency.object = NULL)
}
\arguments{
\item{dependency.dir}{Directory with dependency information files}
\item{dependency.object}{data frame with dependency information}
}
\value{
list of information about file hash mismatches
}
\description{
Checks the consistency of the dependency directory with the files within the file system.
Reports the source scripts that need to be updated.
}
\details{
Only needs one or the other argument.
}
\examples{
\dontrun{
checkFileHashSource(pullSourceInfo("adaprHome")$dependency.dir)
}
}
|
c167a3369178e7f1099b11123dc7de12f87de2d1 | f93ceb8f3fed76f17d4cc26018aaa0c45e37aad1 | /scripts/cointegration.r | 1647f88932de7a26e764c0e8f51e87df6a0261c1 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | ssh352/nxcore | 5bc9b4df983fa8c85272cae8b39e225d2541b2ba | 56811368b22e954083a765bb9d4946c15d83fa40 | refs/heads/master | 2021-05-29T06:49:13.537820 | 2015-09-25T21:44:30 | 2015-09-25T21:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,978 | r | cointegration.r | library(devtools)
library(doMC)
library(iotools)
library(xts)
library(devtools)
library(doMC)
#registerDoMC(cores=16)
#registerDoMC(cores=10)
document("..")
col_types=c(rep("character",3), rep("numeric", 3), rep("character", 2))
x = dstrsplit(readAsRaw(
"/Users/mike/projects/jackson_lecture/may_trades/taq_20100506_trades_all.csv"),
sep=",", skip=1, col_types=col_types)
names(x) = c("symbol", "date", "time", "price", "size", "corr", "cond", "ex")
# Remove bunk trades.
x = na.omit(x[!(x$cond %in% c("L", "N", "O", "Z", "P")),1:5])
x = x[x$size > 0,]
sym_split = split(1:nrow(x), x$symbol)
x$time_stamp = paste(x$date, x$time)
x$date_time=strptime(x$time_stamp, format="%Y%m%d %H:%M:%S", tz=Sys.timezone())
data(sp)
x = x[x$symbol %in% sp$symbol,]
on="minutes"
k=30
sym_split = split(1:nrow(x), x$symbol)
# Create the consolidated trade data.
cat("Consolidating trade data\n")
taq = foreach (sym = sp$symbol, .combine=rbind, .inorder=FALSE) %dopar% {
registerDoSEQ()
d = x[sym_split[[sym]],]
ret = NULL
if (nrow(d) > 0) {
ret = as.data.frame(consolidate_prices(d$date, d$time, d$price, d$size,
time_format="%H:%M:%S", date_format="%Y%m%d",
on=on, k=k))
ret$symbol = sym
}
ret
}
taq$date_time = strptime(rownames(taq), "%Y-%m-%d %H:%M:%S")
# Create the xts matrix of stock values.
sym_split = split(1:nrow(x), x$symbol)
prices = foreach(sym_inds=sym_split, .combine=cbind) %dopar% {
xs = x[sym_inds,]
xst = xts(xs$price, order.by=xs$date_time)
xts(xs$price, order.by=xs$date_time)
}
colnames(prices) = names(sym_split)
# Carry prices forward for each column.
cat("Carrying prices forward.\n")
prices = carry_prices_forward(prices)
prices = na.omit(prices)
# Make sure that we are dealing with the right resolution after combining.
prices = period.apply(prices, endpoints(prices, on=on, k=k),
function(ps) {
xts(matrix(apply(as.matrix(ps), 2, mean, na.rm=TRUE), nrow=1),
order.by=time(ps[1]))
})
# x is a data frame with a price, size, and symbol column.
clean_and_normalize_transactions = function(x, on="minutes", k=1) {
sp_split = split(1:nrow(x), x$symbol)
x = foreach(inds=sp_split, .combine=cbind) %dopar% {
xts(cbind(x$price[inds], x$size[inds]), order.by=x$date_time[inds])
}
psp = matrix(1:ncol(x), ncol=2, byrow=TRUE)
# The following is a hog and it needs to be better. Should vwap return an
# xts object or would it be better as a data frame?
x = foreach(i = 1:nrow(psp), .combine=cbind) %dopar% {
vwap(as.vector(x[,psp[i, 1]]), as.vector(x[,psp[i,2]]),
time(x), on=on, k=k)
}
x = foreach(j=1:ncol(x), .combine=cbind) %dopar% {
carry_price_forward(x[,j])
}
colnames(x) = names(sp_split)
x
}
sp_trades = clean_and_normalize_transactions(x, on="seconds", k=1)
document("..")
foreach(it = volume_window_gen(time(x)), .combine=rbind) %dopar% {
ci = cointegration_info(x[it,])
c(ci$p_value, ci$p_stat)
}
|
ace57a70b02f76fa553691bacf4b46b16d66a3d5 | d062547f9bb1f93dab3c84dc37257d17fce790f0 | /server.R | 307976f727d24d9d8347bb508c673ea2869aea9b | [] | no_license | kfaranet/DevelopingDataProductsWeek4 | a1aec7c441e4319a92fefb8b04340ab165532c84 | ade037bc4707323cd908b2d6e6d52be10764a146 | refs/heads/master | 2022-12-13T23:50:39.760763 | 2020-09-08T01:31:22 | 2020-09-08T01:31:22 | 292,845,064 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 253 | r | server.R | #
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$CText <- renderText(round((input$Ftemp - 32) * (5/9),1))
output$FText <- renderText(round((input$Ctemp * (9/5) + 32),1))
})
|
a05b2b260700eab4f1853b56b00057a94c4ec0a7 | 5b2f016f1298c790224d83c1e17a425640fc777d | /array/CononicalCorrelationAnalysis.R | 3a2e1430b7fc700a8bafde77127c2d853990cd97 | [] | no_license | Shicheng-Guo/methylation2020 | b77017a1fc3629fe126bf4adbb8f21f3cc9738a0 | 90273b1120316864477dfcf71d0a5a273f279ef9 | refs/heads/master | 2023-01-15T20:07:53.853771 | 2020-02-28T03:48:13 | 2020-02-28T03:48:13 | 243,668,721 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 521 | r | CononicalCorrelationAnalysis.R | #!/usr/bin/R
setwd("");
install.packages("CCA")
library("CCA")
y<-matrix(rnorm(46858*5791),5790,46857) # methylation array
x<-matrix(rnorm(5790*200,1,10),5790,200)
dim(x)
dim(y)
res<-rcc(x,y,1,1)
data(nutrimouse)
x=as.matrix(nutrimouse$gene)
y=as.matrix(nutrimouse$lipid)
estim.regul(x,y)
correl=matcor(x,y)
img.matcor(correl,type=1)
img.matcor(correl,type=2)
Chr21_450kMerge.txt.trans
setwd("/home/sguo/methylation")
data<-read.table("Chr21_450kMerge.txt.trans",head=T,row.names=1,sep="\t")
data<-t(data)
|
79db1c5e3fdd47f788fdb78b8c851676ab3b1149 | 23a572adade5e5682a38580d5c46f9ee27f6a16b | /man/FormatMCLFastas.Rd | 9e3377e759b5f037be724f8e7530ec9b6a1fd3ec | [] | no_license | cran/MAGNAMWAR | 600a2865205c3f719bac6b55c60f9c53099a8ca2 | b78d31591665dfcabe8f635d1b6e3d07f8c71d2e | refs/heads/master | 2021-01-20T03:12:07.293148 | 2018-07-12T06:20:17 | 2018-07-12T06:20:17 | 89,508,282 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,009 | rd | FormatMCLFastas.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FormatMCLFastas.R
\name{FormatMCLFastas}
\alias{FormatMCLFastas}
\title{Format all raw GenBank fastas to single OrthoMCL compatible fasta file}
\usage{
FormatMCLFastas(fa_dir, genbnk_id = 4)
}
\arguments{
\item{fa_dir}{Path to the directory where all raw GenBank files are stored. Note, all file names must be changed to a 4-letter code representing each species and have '.fasta' file descriptor}
\item{genbnk_id}{(Only necessary for the deprecated version of fasta headers) The index of the sequence ID in the GenBank pipe-separated annotation line (default: 4)}
}
\value{
Returns nothing, but prints the path to the final OrthoMCL compatible fasta file
}
\description{
Creates the composite fasta file for use in running OrthoMCL and/or submitting to www.orthomcl.org
}
\examples{
\dontrun{
dir <- system.file('extdata', 'fasta_dir', package='MAGNAMWAR')
dir <- paste(dir,'/',sep='')
formatted_file <- FormatMCLFastas(dir)
}
}
|
2321a2f4235082ea6455fe330eda5e4d14729855 | 84ab6a0816222d0ba645712e30ab189fd3b58351 | /R/geocodeUSCB.R | 57a66674b64d7477dfd1807f708296c1bb6fe884 | [] | no_license | ajyoshizumi/geocodeR | 23591aaef8bb20dac2fede20c7b2a5d96a7d4a70 | 2dec0870a87394d71d6e76bc221a1561a2bb2a64 | refs/heads/master | 2021-05-21T05:37:49.521663 | 2020-04-08T17:47:33 | 2020-04-08T17:47:33 | 252,569,980 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,729 | r | geocodeUSCB.R | #' A Function for Geocoding Using the US Census Bureau API
#'
#' This function allows you to geocode addresses.
#' @param Address Used for one line addresses.
#' @param Street Used for separated addresses. Format is "#### RoadName RoadType".
#' @param City Used for separated addresses.
#' @param State Used for separated addresses.
#' @param Zip Used for seperated addresses.
#' @param Benchmark Indicates what version of the locater should be used. Defaults to "Public_AR_Current". For a list of other options please consult https://geocoding.geo.census.gov/geocoder/Geocoding_Services_API.pdf.
#' @param SearchType Indicates whether the search type is for a single line address ("onelineaddress") or a seperated address ("address"). Defaults to "onelineaddress".
#' @export
#' @examples geocodeUSCB(Address = "2800 Faucette Boulevard Raleigh NC 27607", SearchType = "onelineaddress")
#' @examples geocodeUSCB(Street = "2800 Faucette Boulevard", City = "Raleigh", State = "NC", Zip = "27607", SearchType = "address")
#'
#' geocodeUSCB()
# Geocoding function that leverages US Census Bureau
geocodeUSCB <- function(Address,Street,City,State,Zip,
Benchmark = "Public_AR_Current",
SearchType = "onelineaddress"){
# Define url to be contacted.
urlAddress <- paste(
"https://geocoding.geo.census.gov/geocoder/locations/",
SearchType,
"?",
sep = ""
)
# Change query based on search type.
if(SearchType == "address"){
# Query the US Census Bureau's geocoding service.
r <- httr::GET(url = urlAddress,
query = list(
street = Street,
city = City,
state = State,
zip = Zip,
benchmark = Benchmark
)
)
} else if (SearchType == "onelineaddress"){
# Query the US Census Bureau's geocoding service.
r <- httr::GET(url = urlAddress,
query = list(
address = Address,
benchmark = Benchmark
)
)
} else if (SearchType != "address" | SearchType != "onelineaddress"){
# Return error if search type is not valid.
stop("Invalid search type specified.")
}
# Store content of the response as text.
c <- httr::content(x = r, type = "text", encoding = "UTF-8")
# Read JSON structure of text into a list.
geoList <- jsonlite::fromJSON(txt = c)
# Assign key variables.
lon <- geoList$result$addressMatches$coordinates$x[1]
lat <- geoList$result$addressMatches$coordinates$y[1]
# Knit into data frame row.
entry <- list(Longitude = lon,
Latitude = lat)
return(entry)
}
|
742eee56272d4b8326a888fbce0c7c0e84852926 | 8cdc42c6520f267e1ffdd6a69ab7445076311f16 | /R/speeddist.R | 5a01fcb833ccab082f17cadebb4906b946f5db76 | [] | no_license | holaanna/pprmcmc | 2d5238f56b48d4d550ef7d209bcc62b2ac151306 | 4084532d5c6de8883d9786b416dc6d7208a1363d | refs/heads/master | 2020-04-26T15:02:27.232596 | 2019-03-04T01:11:23 | 2019-03-04T01:11:23 | 173,634,657 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,241 | r | speeddist.R | #' Simulaton of the epidemic process to find the speed of the disease
#'
#' Epidemic simulation and maximum distance the wave can travel using gillespsie algorithm.
#'
#'\code{simul} provide the simulation of the epidemic process and the
#' maximum distance the wave can travel at each particular obaservation date.
#'
#' @param alpha, beta, gama indicating the dispersal kernel paremeter,
#' the contact parameter and the infectious period.
#' @param tim A vector of observation times
#' @param Tmax Final observation time
#' @param l Takes values 1, 2, 3.
#' \enumerate{
#' \item indicates the rayleigh kernerl
#' \item indicates the exponential kernel
#' \item indicates the cauchy kernel
#' }
#' @return A list with components:
#' \describe{
#' \item{epidem}{A five-dimentional matrix giving the dynamic of the process .
#' Each column indicates respectively the times, the x cooedinate, the y-coordinate
#' indicator (0 if infection and 1 if removal) and the index of individual
#' removed (0 if the event is infection).}
#' \item{maxdist}{A vector of maximum distances travelled by the wave on each time in tim.}
#' }
#' @import VGAM
#' @examples
#'# Simulation with rayleigh kernel
#' alpha=0.00012
#' beta=0.012
#' tim=10:325
#' gama=110
#' Tmax=325
#' l=1
#' res=simul(alpha,beta,tim,gama,Tmax,l)
#'
#' @export
simul=function(alpha,beta,tim,gama,Tmax,l){
t=0
inf=0
i=1
S=0
size=0
dat=c(0,0,0,1,0)
init=c(0,0,0)
inf_lis=1
ni=1
indx=0
ii=0
remt=rem_tim=gama
rp=0
vec=vecx=vecy=NULL
while(t<=Tmax){
if(l==1){
r=rrayleigh(1,1/sqrt(2*alpha))
}
if(l==2){
r=rexp(1,1/alpha)
}
if(l==3){
r=rcauchy(1,alpha)
}
theta=runif(1,0,2*pi)
rate=ni*beta
dt=log(1/runif(1))/rate
if(length(inf_lis)==1){
while(min(rem_tim)<dt){ #only one infection left thus has to be an unfection
dt=log(1/runif(1))/rate
}
j=inf_lis[1]
size=c(size,ni)
if(ii==0){
S=c(S,j)
x=dat[2]+ r*cos(theta)
y=dat[3]+ r*sin(theta)
ii=1
}
else{
S=c(S,j)
x=dat[j,2]+ r*cos(theta)
y=dat[j,3]+ r*sin(theta)
}
ni=ni+1
dat=rbind(dat,c(t+dt,x,y,ni,0))
inf_lis=c(inf_lis,nrow(dat))
indx=c(indx,nrow(dat)-1)
rem_tim=rem_tim-dt
rem_tim=c(rem_tim,gama)
remt=c(remt,gama)
dd=sqrt((x-init[1])^2+(y-init[2])^2)
if(dd>rp){
rp=dd
}
}
else{
if(min(rem_tim)<dt){ #removal
dt=min(rem_tim)
ni=ni-1
j=which(rem_tim==min(rem_tim))
dat=rbind(dat,c(t+dt,0,0,ni,inf_lis[j]))
inf_lis=inf_lis[-j]
rem_tim=rem_tim[-j]-rem_tim[j]
}
else{ # infection
ni=ni+1
j=sample(inf_lis,1)
x=dat[j,2]+ r*cos(theta)
y=dat[j,3]+ r*sin(theta)
dat=rbind(dat,c(t+dt,x,y,ni,0))
inf_lis=c(inf_lis,nrow(dat))
indx=c(indx,nrow(dat)-1)
rem_tim=rem_tim-dt
rem_tim=c(rem_tim,gama)
remt=c(remt,gama)
size=c(size,ni-1)
S=c(S,j)
dd=sqrt((x-init[1])^2+(y-init[2])^2)
if(dd>rp){
rp=dd
}
}
}
if(length(tim)!=0){
if(t+dt>Tmax){
vec=c(vec,rep(rp,length(tim)))
}
else{
while(tim[1]>t&&tim[1]<=(t+dt)){
if(t==0){
vec=c(vec,0)
}
else{
vec=c(vec,rp)
}
tim=tim[-1]
}
}
}
if(t<Tmax && t+dt>Tmax){
dat[nrow(dat),]=c(t+dt,0,0,ni,0)
break
}
t=t+dt
}
return(list(maxdist=vec, epidem=dat))
}
#' Posterior distribution of Wave speed
#'
#'\code{speed} provide sample from posterior wave speed using the posterior
#' distribution of the model parameters.
#'
#' @param alpha, beta, gama indicating sample from the posterior distribution
#' of the dispersal kernel paremeter, the contact parameter and the
#' infectious period.
#' @param tim A vector of observation times i.e. the equence of times at which the system progress is observed.
#' @param Tmax Final observation time
#' @param samp A vector of sample to draw from the posteriors distributions.
#' @param l Takes values 1, 2, 3.
#' \enumerate{
#' \item indicates the rayleigh kernerl
#' \item indicates the exponential kernel
#' \item indicates the cauchy kernel
#' }
#'
#' @details The parameterisation of distributions used are:
#' \enumerate{
#' \item Rayleigh kernerl: \eqn{f(r;\alpha)=2\alpha*r*exp(\alpha*r^2)}
#' \item Exponential kernel: \eqn{f(r;\alpha)=\alpha*exp(\alpha*r)}
#' \item Cauchy kernel: \eqn{f(r;\alpha)=1/(\pi\alpha(1 + r^2/\alpha^2))}
#' }
#'
#' @return A vector indicating the speed of progation.
#'@examples
#'# Simulation with Rayleigh kernel
#' data(postray) # Posterior distribution of the model parameters obtained from the MCMC
#' samp=sample(10000:100000,1000)
#' alpha=postray[,1][samp]
#' beta=postray[,2][samp]
#' Tmax=325 # Increase Tmax to 600 for ex for more sample and better estimate of the speed.
#' tim=10:Tmax
#' gama=postray[,3][samp]
#' l=1
#' speed=speed(alpha,beta,tim,gama,Tmax,samp,l)
#' # plot speed with 95\% credible interval along with the median
#' hist(speed,breaks=50,col='blue',main=' ',xlab='Wave speed in any direction',xaxt='n')
#' abline(v=round(median(speed),2),col='red',lwd=2)
#' abline(v=round(quantile(speed,.025),2),col='red',lty=2)
#' abline(v=round(quantile(speed,.975),2),col='red',lty=2)
#' axis(side=1,at=c(round(quantile(speed,.025),2),round(median(speed),2),round(quantile(speed,.975),2)),las=2)
#' box()
#' @export
speed=function(alpha,beta,tim,gama,Tmax,samp,l){
Mat=array(0,c(length(samp),length(tim)))
for(i in 1:length(alpha)){
vec=simul(alpha[i],beta[i],tim,gama[i],Tmax,l)
Mat[i,]=vec[[1]]
}
speed=apply(Mat,2,max)/tim
return(speed)
}
|
5a63fb79ffcf5d7194c2efcf3f00be482eed17ca | e87fad089f466bc80d22816a532e288d6c119df6 | /main/creating_4grams.R | 811e9f85219907eb3433d13a3d487f9c2dbc1741 | [] | no_license | cypee/Capstone | 46d72129e7d8cd7351757a5b1d371b56a1da2c03 | 227cfb31a3554d787766cbe6cf98b789ed1c4a0b | refs/heads/master | 2021-01-20T20:05:22.071365 | 2016-05-30T05:16:29 | 2016-05-30T05:16:29 | 59,980,690 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,220 | r | creating_4grams.R | library("tm")
library("stringi")
library("gsubfn")
library("qdap")
library("beepr")
library("audio")
library("ngram")
workingfolder <- "d:/Users/henry/Desktop/capstone/Data"
setwd(workingfolder)
#ngram of 4
load("clean_data.RData")
data_sz <- length(clean_data)
block <- 1000000
# block <- 5000 # testing
n_rep <- ceiling(data_sz/block)
for (n in 1:n_rep) {
load("clean_data.RData")
if (n < n_rep) {
clean_data1 <- matrix(clean_data[((n-1)*block):(n*block)])
} else {
clean_data1 <- matrix(clean_data[((n-1)*block):data_sz])
}
rm(clean_data)
ngram_list <- apply(clean_data1, 1, function(x) tryCatch({ngram(x , n =4)}, error=function(e){}))
rm(clean_data1)
ngram_sub <- rapply(ngram_list, function(x) as.matrix(get.ngrams(x)))
rm(ngram_list)
#removing leftover strange characters
ngram_sub <- gsub("^[-]+", "", ngram_sub)
ngram_sub <- gsub("^[[:blank:]]+", "", ngram_sub)
ngram_sub <- gsub("[[:blank:]]+", " ", ngram_sub)
#Removing lines with less than 4 words
ngram_sub <- ngram_sub[wc(ngram_sub)==4]
save(ngram_sub, file=paste("ngram4_", n, ".RData", sep=""))
cat(paste(paste("ngram4.",n,sep=""), "completed\n", sep=" "))
}
# load ngram data
for(n in 1:n_rep){
load(paste("ngram4_", n,".RData", sep=""))
assign(paste("ngram4_",n,sep=""), ngram_sub)
}
for(i in letters){
for(n in 1:n_rep){
test <- get(paste("ngram4_", n, sep=""))
#if the file does not exist create the file
if(!exists(paste("With4_",i,sep=""))){
assign(paste("With4_",i,sep=""), test[grepl(test, pattern=paste("^[",i,"]", sep=""))])
}
#if the file already exists join them toguether
else if(exists(paste("With4_",i,sep=""))){
assign(paste("With4_",i,sep=""), c(get(paste("With4_",i,sep="")), test[grepl(test, pattern=paste("^[",i,"]", sep=""))]))
}
}
}
save(With4_a, file="With4_a.RData")
save(With4_b, file="With4_b.RData")
save(With4_c, file="With4_c.RData")
save(With4_d, file="With4_d.RData")
save(With4_e, file="With4_e.RData")
save(With4_f, file="With4_f.RData")
save(With4_g, file="With4_g.RData")
save(With4_h, file="With4_h.RData")
save(With4_i, file="With4_i.RData")
save(With4_j, file="With4_j.RData")
save(With4_k, file="With4_k.RData")
save(With4_l, file="With4_l.RData")
save(With4_m, file="With4_m.RData")
save(With4_n, file="With4_n.RData")
save(With4_o, file="With4_o.RData")
save(With4_p, file="With4_p.RData")
save(With4_q, file="With4_q.RData")
save(With4_r, file="With4_r.RData")
save(With4_s, file="With4_s.RData")
save(With4_t, file="With4_t.RData")
save(With4_u, file="With4_u.RData")
save(With4_v, file="With4_v.RData")
save(With4_w, file="With4_w.RData")
save(With4_x, file="With4_x.RData")
save(With4_y, file="With4_y.RData")
save(With4_z, file="With4_z.RData")
## Write results to .csv files
n <- 4
for(l in letters){
load(paste("With",n,"_",l,".RData", sep=""))
ngram <- get(paste("With",n,"_",l, sep=""))
ngram <- data.frame(table(ngram))
ngram <- ngram[character_count(ngram$ngram) > n,]
write.csv(ngram, file = paste("ngram",n,"_DF_",l,".csv", sep=""))
rm(ngram)
rm(list = paste("With",n,"_",l, sep=""))
print(paste("ngram:",n, ", letter:", l, sep=" "))
}
|
a18330e4db6a65950b316ee3181a71b509996179 | 83522af0f32648e6181af5a841394e8813e365d5 | /man/repair_encoding.Rd | 13eebc537c1974ffc0ca29ff6ee8c5aeb0ac20ec | [
"MIT"
] | permissive | glecaro/rvest | 414fa1aa0255028f86a26d6c16a5700875229d47 | d8abe0482e4d0d41458b5a010c8fe84c6aa0c5d1 | refs/heads/master | 2023-02-24T05:08:14.220762 | 2021-01-29T13:27:38 | 2021-01-29T13:27:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 603 | rd | repair_encoding.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/encoding.R
\name{repair_encoding}
\alias{repair_encoding}
\title{Repair faulty encoding}
\usage{
repair_encoding(x, from = NULL)
}
\arguments{
\item{from}{The encoding that the string is actually in. If \code{NULL},
\code{guess_encoding} will be used.}
}
\description{
\ifelse{html}{\figure{lifecycle-deprecated.svg}{options: alt='Deprecated lifecycle'}}{\strong{Deprecated}}
This function has been deprecated because it doesn't work. Instead
re-read the HTML file with correct \code{encoding} argument.
}
\keyword{internal}
|
eb01b6af23d341ea975d3cf67ecc99b7f13e8a11 | fc5a514940766e67d47a1fc5e7e02db3c2022953 | /CTDL>_R/B16_SapXep_NoiBot&B17_SapXep_Nhanh.R | 6ca8191c5127674f483653071bed99eb00da71bd | [] | no_license | TranThiDieuHien/Do_An_CTDL-GT | b5493aba933ffe305dad6fbbda95578bb8b34a4e | 3b11bb7e731d7801cdda754a7b73e0231f5e413b | refs/heads/main | 2023-06-21T00:21:48.342733 | 2021-07-17T15:40:28 | 2021-07-17T15:40:28 | 386,695,973 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 656 | r | B16_SapXep_NoiBot&B17_SapXep_Nhanh.R | vec = c(5, 0, -10, 15, 34, 8, 23, -2)
bubble <- function(x){
n<-length(x)
for(j in 1:(n-1)){
for(i in 1:(n-j)){
if(x[i]>x[i+1]){
temp<-x[i]
x[i]<-x[i+1]
x[i+1]<-temp
}
}
}
return(x)
}
bubble(vec)
#Sắp xếp nhanh
quickSort <- function(arr) {
mid <- sample(arr, 1)
left <- c()
right <- c()
lapply(arr[arr != mid], function(d) {
if (d < mid) {
left <<- c(left, d)
}
else {
right <<- c(right, d)
}
})
if (length(left) > 1) {
left <- quickSort(left)
}
if (length(right) > 1) {
right <- quickSort(right)
}
c(left, mid, right)
}
quickSort(vec)
|
cbc94f5291b70834342e162dfdfa7d7077e8989b | 74ce34dfcd0971aa389b379b7484fddde4cdffc9 | /man/randomRows.Rd | c5571c96567ca0fd139cf35ca3176b8b39d7c495 | [] | no_license | cran/stackoverflow | 294b5425c89167d3278faa19d88905f821ef194f | 3bd6c79acafa3ba9caa681a740cae22da2c18416 | refs/heads/master | 2020-04-04T03:44:41.465303 | 2020-01-10T03:50:02 | 2020-01-10T03:50:02 | 35,567,770 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 892 | rd | randomRows.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/randomRows.R
\name{randomRows}
\alias{randomRows}
\title{Sample rows from a dataframe or matrix}
\usage{
randomRows(x, size, replace = FALSE, prob = NULL)
}
\arguments{
\item{x}{a data frame or matrix}
\item{size}{a non-negative integer giving the number of items to choose.}
\item{replace}{Should sampling be with replacement?}
\item{prob}{A vector of probability weights for obtaining the elements of the vector being sampled.}
}
\description{
Sample rows from a dataframe or matrix
}
\section{Changes}{
Matched parameters to sample -- njf, May 18, 2015
}
\references{
\url{http://stackoverflow.com/questions/8273313/random-rows-in-dataframe-in-r}
}
\seealso{
\code{\link{sample}}
\code{\link[dplyr]{sample_n}} for dplyr users
}
\author{
\href{http://stackoverflow.com/users/211116/spacedman}{Spacedman}
}
|
f3247a96e9f73d2c6036377ad6f66bd8ea7c6e41 | 892b01bf9174b0200a1d49075aec42a0dfed934c | /man/source.survscan.Rd | ab95ede36fe809f2770223f8cfca9af85f5080eb | [] | no_license | anfederico/cbmrscripts | fd7962a18aa0b791067debdc7daf9c6556c730c0 | dc0c242444411a684db7ad84143e338fa4ae3931 | refs/heads/master | 2020-06-06T14:13:37.447962 | 2019-06-19T20:29:47 | 2019-06-19T20:29:47 | 192,760,943 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 747 | rd | source.survscan.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sourcerer.R
\name{source.survscan}
\alias{source.survscan}
\title{Extensive survival analysis for a series of signatures}
\usage{
source.survscan(path.to.cbmrscripts,
path.to.tcgadump = "/restricted/projectnb/montilab-p/personal/anthony/tcgadump",
path.to.timer = "/restricted/projectnb/montilab-p/CBMrepositoryData/TCGA/tumorInfiltration/timer/TableS2.13059_2016_1028_MOESM3_ESM.txt")
}
\arguments{
\item{path.to.cbmrscripts}{Absolute path to cbmrscripts}
\item{path.to.tcgadump}{Absolute path to tcgadump}
\item{path.to.timer}{Absolute path to timer data}
}
\value{
A series of functions
}
\description{
Extensive survival analysis for a series of signatures
}
|
138eec85da2d7c2b83fb6476e287ae823c22b367 | 69f66af951cfeeeb124b6bf76a1a6b9674f71a8b | /man/assert_sane_character_vector.Rd | d3fc45913aca5fbab60f277b5d3ef68c223c1740 | [
"MIT"
] | permissive | coolbutuseless/btnsystem | 421880b97f0fef2fa851d1ef24c3ee4aec888a60 | a9573448a7e70955a78e91d257196fe3d01e5ad0 | refs/heads/master | 2022-04-15T08:12:11.169553 | 2020-04-12T23:00:41 | 2020-04-12T23:00:41 | 254,867,680 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 514 | rd | assert_sane_character_vector.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.R
\name{assert_sane_character_vector}
\alias{assert_sane_character_vector}
\title{Assert character vector is sane.}
\usage{
assert_sane_character_vector(args)
}
\arguments{
\item{args}{Character vector of arguments to check.}
}
\value{
Logical value. TRUE if all tests pass, otherwise throw an error.
}
\description{
Zero-length vector allowed. No NAs allowed. Must be fewer than 1000 arguments
and fewer than 200000 characters.
}
|
49509167e62ce8ab94389dcde6854798689ff255 | 1da1269745b6ce6806ffd7a15668fc27470cd921 | /R/ghg_ss_ghg_information.R | ffa316a065756dc547355275eea48e6314c9f1d3 | [] | no_license | markwh/envirofacts | d0c3bb7495060fd00b825c1e72602479f8a92b72 | 815ba95808a37f552d9a7041be532817e4766b90 | refs/heads/master | 2021-01-10T07:14:32.874354 | 2019-03-27T02:28:15 | 2019-03-27T02:28:15 | 50,798,175 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,079 | r | ghg_ss_ghg_information.R | #' Retrieve ss ghg information data from ghg database
#'
#' @param FACILITY_ID e.g. '1000039'. See Details.
#' @param REPORTING_YEAR e.g. '2013'. See Details.
#' @param FACILITY_NAME e.g. 'Alstom Grid Inc'. See Details.
#' @param GAS_NAME e.g. 'Sulfur hexafluoride'. See Details.
#' @param GHG_EMISSIONS_UNROUNDED e.g. '2625.64'. See Details.
#' @param TOTAL_EMISSIONS_UNROUNDED e.g. '0'. See Details.
#' @param MFG_EMISSIONS_UNROUNDED e.g. '2625.64'. See Details.
#' @export
ghg_ss_ghg_information <- function(FACILITY_ID = NULL, REPORTING_YEAR = NULL, FACILITY_NAME = NULL,
GAS_NAME = NULL, GHG_EMISSIONS_UNROUNDED = NULL, TOTAL_EMISSIONS_UNROUNDED = NULL,
MFG_EMISSIONS_UNROUNDED = NULL) {
args <- list(FACILITY_ID = FACILITY_ID, REPORTING_YEAR = REPORTING_YEAR, FACILITY_NAME = FACILITY_NAME,
GAS_NAME = GAS_NAME, GHG_EMISSIONS_UNROUNDED = GHG_EMISSIONS_UNROUNDED, TOTAL_EMISSIONS_UNROUNDED = TOTAL_EMISSIONS_UNROUNDED,
MFG_EMISSIONS_UNROUNDED = MFG_EMISSIONS_UNROUNDED)
ret <- envir_get("ss_ghg_information", args)
ret
}
|
190c68af8065683c1627c4fcf44e1fc4dc8a73ad | 4470537beaf3cf750e91721dd3d19413da6df5e8 | /family_Anoran/findog_s02_tabulate_v01.R | 59c60344ee0895ee7b6b8c47b9956b5b556bdd53 | [] | no_license | xgrau/auto-orthology | 22fa81342dfaa1ceed7099c28230ee13925bb636 | 6ea30d86969a6e57ee245e59b499c5f256233298 | refs/heads/master | 2020-08-01T01:09:37.250658 | 2020-03-03T11:47:21 | 2020-03-03T11:47:21 | 210,808,952 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,769 | r | findog_s02_tabulate_v01.R | # libraries
library(igraph)
library(scales)
setwd("/home/xavi/Documents/auto-orthology/family_Anoran/")
#### Input ####
# input parmeters
edg_fn = "OG0000000.out_ete.sos_0.00.edges"
nod_fn = "OG0000000.out_ete.nodes"
dic_fn = "OG0000000.dict"
man_fn = dic_fn
# input parmeters
# edg_fn = "family_ADAR/adar.out_ete.sos_0.00.edges"
# nod_fn = "family_ADAR/adar.out_ete.nodes"
# dic_fn = "family_ADAR/adar.dict"
sup_th = 0
# load data
edg = read.table(edg_fn, header = T)
nod = read.table(nod_fn, header = T)
dic = read.table(dic_fn, header = T)
man = read.table(man_fn, header = T)
#### Graph info ####
# is node in reference dict?
nod[nod$gene %in% as.character(dic$gene),"ref"] = "ref"
nod[!nod$gene %in% as.character(dic$gene),"ref"] = "not"
nod = merge(nod,dic, by.x = "gene", by.y = "gene", all.x=T)
# remove edges with suppor under threshold
edg = edg[edg$branch_support >= sup_th,]
# create network
net = graph_from_data_frame(edg, vertices = nod, directed = F)
net_layout_mds = layout_with_mds(net) # MDS layout
net_layout_nic = layout_components(net) # nice layout
# add gene names as labels, colors and sizes for nodes
V(net)$label = as.character(nod$gene)
net_nod_siz = c(ref = 2, not = 1) # node sizes
net_nod_col = c(ref = "blue", not = "slategray4") # node colors
igraph::V(net)$color = net_nod_col[igraph::V(net)$ref]
igraph::V(net)$size = net_nod_siz[igraph::V(net)$ref]
# plot igraph
pdf(paste(edg_fn,".network.pdf",sep=""),height=5,width=5)
plot.igraph(net,
vertex.label=V(net)$family,
vertex.label.family="sans", vertex.frame.color=NA, vertex.label.cex=0.7,
edge.color = alpha("slategray3",0.5),
layout=net_layout_mds)
plot.igraph(net,
vertex.label=V(net)$family,
vertex.label.family="sans", vertex.frame.color=NA, vertex.label.cex=0.7,
edge.color = alpha("slategray3",0.5),
layout=net_layout_nic)
dev.off()
#### Assign families ####
# find components
# assign family (same as ref sequence with which it is sharing component)
# PROBLEMATIC: SAME SEQ CAN SHARE COMPONENT WITH MORE THAN ONE REF
# for (rei in 1:length(nod_ref$component)) {
# nod[nod$component == nod_ref[rei,"component"], "family_inferred"] = nod_ref[rei,"family"]
# }
# identify all families each seq is linked to (by orthology)
for (noi in 1:nrow(nod)) {
noi_bool = nod[noi,"gene"] == edg$in_gene | nod[noi,"gene"] == edg$out_gene
noi_comp_elements = unique(c(as.character(edg[noi_bool,c("in_gene")]),as.character(edg[noi_bool,c("out_gene")])))
noi_comp_refvec = as.character(dic[dic$gene %in% noi_comp_elements,"family"])
noi_comp_refstr = paste(noi_comp_refvec, collapse = ',')
nod[noi,"family_inferred"] = noi_comp_refstr
}
nod$family_inferred_factor= as.factor(nod$family_inferred)
factor_colors = c("slategray4",
"red1","red4","purple1","purple4",
"blue1","blue4","olivedrab1","olivedrab4",
"darkgreen","orange1","orange3",
"cyan4","cyan2","gold","limegreen",
"violetred1","violetred4",
"springgreen1","springgreen4",
"slateblue2","slateblue4","sienna2","sienna4",
"paleturquoise1","paleturquoise4",
"turquoise3","cyan")
faminf_colors = factor_colors[nod$family_inferred_factor]
# replot, with lots of colors
V(net)$color = faminf_colors
pdf(paste(edg_fn,".network_colorfams.pdf",sep=""),height=5,width=5)
plot.igraph(net,
vertex.label=V(net)$family,vertex.size=2,
vertex.label.family="sans", vertex.frame.color=NA, vertex.label.cex=0.7,
edge.color = alpha("slategray3",0.5),
layout=net_layout_mds)
plot.igraph(net,
vertex.label=V(net)$family,vertex.size=2,
vertex.label.family="sans", vertex.frame.color=NA, vertex.label.cex=0.7,
edge.color = alpha("slategray3",0.5),
layout=net_layout_nic)
legend("topright", legend = levels(nod$family_inferred_factor), col=factor_colors, pch=20, cex=0.3, bty = "n")
dev.off()
#### Compare manual ####
# compare with manual results
source("../helper_scripts/geneSetAnalysis.R")
pdf(paste(edg_fn,".venns.pdf",sep=""),height=4,width=4)
for (rei in 1:nrow(dic)) {
man_list = as.character(man[man$family == dic[rei,"family"],"gene"])
nod_list = as.character(nod[nod$family_inferred == dic[rei,"family"],"gene"])
nod_list = nod_list[!is.na(nod_list)]
# plot venn
# TODO: report lists of intersections, disjoint, etc. (in ven object!)
ven = venn.two(list1 = nod_list , list2 = man_list, catname1 = "inferred", catname2 = "manual", main = as.character(dic[rei,"family"]))
}
dev.off()
# hist(table(net_components$membership))
|
1d0317a55b5cb245f4d15cbab33102099169a12d | f8eb55c15aec611480ede47d4e15e5a6e472b4fa | /analysis/0352_house_prices_expensive.R | 36c869b086e9e8dac486459d192f15a13c7de0ee | [] | no_license | nmaggiulli/of-dollars-and-data | a4fa71d6a21ce5dc346f7558179080b8e459aaca | ae2501dfc0b72d292314c179c83d18d6d4a66ec3 | refs/heads/master | 2023-08-17T03:39:03.133003 | 2023-08-11T02:08:32 | 2023-08-11T02:08:32 | 77,659,168 | 397 | 32 | null | null | null | null | UTF-8 | R | false | false | 1,916 | r | 0352_house_prices_expensive.R | cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(scales)
library(readxl)
library(lubridate)
library(stringr)
library(ggrepel)
library(survey)
library(lemon)
library(mitools)
library(Hmisc)
library(tidyverse)
folder_name <- "0352_house_prices_expensive"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
date_string <- date_to_string(Sys.Date())
shiller_housing <- read_excel(paste0(importdir, "0352_shiller_hpi_data/shiller_house_data_2023_06_05.xls"),
sheet = "Data",
skip = 6) %>%
select(1, 2)
colnames(shiller_housing) <- c("date", "real_housing_index")
to_plot <- shiller_housing
file_path <- paste0(out_path, "/shiller_hpi_real_", date_string, ".jpeg")
source_string <- str_wrap(paste0("Source: Shiller HPI data (OfDollarsAndData.com)"),
width = 85)
note_string <- str_wrap(paste0("Note: Index value is adjusted for inflation."),
width = 80)
plot <- ggplot(to_plot, aes(x=date, y=real_housing_index)) +
geom_line() +
scale_y_continuous(label = comma, breaks = seq(0, 225, 25)) +
scale_x_continuous(breaks = seq(1900, 2020, 20)) +
of_dollars_and_data_theme +
ggtitle(paste0("Real U.S. Housing Index Since 1890")) +
labs(x="Year", y="Real Housing Index",
caption = paste0(source_string, "\n", note_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
# ############################ End ################################## # |
181ce2063f255201ae49aefa5cf508edeb77e6e6 | f7a6a9a231b8a0faeaddb75bf4aab4bad1ccd2dc | /man/mplus_check_params.Rd | ba719f6cbd34472898fb6fc917288157da5feb61 | [
"MIT"
] | permissive | d-vanos/MplusReadR | ad55f5d3ab384a6078ac11d643e4fc4ba052e289 | aeae510119cd9694f14ef9ac7b302e94b0af2ca7 | refs/heads/main | 2023-03-20T03:00:33.245289 | 2021-03-15T01:42:22 | 2021-03-15T01:42:22 | 313,146,673 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,358 | rd | mplus_check_params.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mplus_check_parameter_options.R
\name{mplus_check_params}
\alias{mplus_check_params}
\title{Check Parameters}
\usage{
mplus_check_params(
Mplus_file,
parameter_type,
standardized = TRUE,
project = "other"
)
}
\arguments{
\item{Mplus_file}{An mplus object generated by the Mplus Automation package from Mplus output using the \code{\link[MplusAutomation:readModels]{MplusAutomation::readModels()}} function.}
\item{parameter_type}{One of 'parameters', 'paramheader' or 'display'. These display options which can be entered in 'param_header', 'parameter', or 'display' in \code{\link[=mplus_compile]{mplus_compile()}}.It is also possible to select 'outcomes' and 'variables' for Dejonckheere project models.}
\item{standardized}{Whether standardized or unstandardized output should be used for univariate and bivariate models. Defaults to TRUE.}
\item{project}{Whether the parameters are for the Dejon project or another project. One of 'dejon', 'other'. Defaults to 'other'.}
}
\value{
A list of available options for \code{\link[=mplus_compile]{mplus_compile()}} or \code{\link[=dejon_compile]{dejon_compile()}}.
}
\description{
Checks the options available to select in \code{\link[=mplus_compile]{mplus_compile()}} or \code{\link[=dejon_compile]{dejon_compile()}}.
}
|
a89d6ef790f99497cb1049a2f7a4e3ca71f762d2 | 0d826056dce249700a0c6b26517163920c94c249 | /irkernel.R | 111e9dd31c9ec3057cf470aea3272d720e27139c | [
"Apache-2.0"
] | permissive | lmorandini/docker-jupyterlab | 5e842d8682071592a0d0f8370c6a12fe28b72478 | e303204f449d10dc5f0676429485dccc26101221 | refs/heads/master | 2021-09-04T19:52:47.094379 | 2018-01-21T22:21:15 | 2018-01-21T22:21:15 | 71,970,649 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 223 | r | irkernel.R | install.packages(c('repr', 'IRdisplay', 'crayon', 'pbdZMQ', 'devtools'), repos='https://cran.ms.unimelb.edu.au')
devtools::install_github('IRkernel/IRkernel')
IRkernel::installspec(name = 'ir33', displayname = 'R 3.3')
|
0311dfac7a73c6da26e9ace3330c58339b0fab2f | 6f45028de8e2b23a123d1e1e2a2b76618a6c472b | /tools/atari.R | 4a81dea83f0bf18e362f60aaf07a8a2b84b16a44 | [] | no_license | RL-code-lib/dapo | 103434e5e9b9a2a2a16dde6dff23c14ba1beb9fb | bd640bac818cf4a551d6ba8426cc31c9e3af6ab9 | refs/heads/master | 2022-03-26T04:55:16.268174 | 2019-11-28T14:05:48 | 2019-11-28T14:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,725 | r | atari.R | #!/usr/bin/R
library(Rcpp)
sourceCpp("~/darl/tools/lib.cpp")
prepare_plot <- function() {
plot_ylim <<- c()
confidence_ylim <<- c()
}
grp_avg <- function(e) {
x = e$dt-e$dt[1]
y = e[,4] # 4 for Score, 5 for Policy Performance
return(group_avg(x,y,60))
}
parse_res <- function(dirname) {
e <- read.csv(paste(dirname, 'out/logfile', sep='/'), header=FALSE)
d <- read.csv(paste(dirname, 'out/progress.csv', sep='/'))
e$dt <- strptime(e[,1], "%Y-%m-%d %H:%M:%OS")
e <- e[e$dt >= e$dt[1], ]
plot_ylim <<- range(c(plot_ylim, range(grp_avg(e)$y)))
return(list(e=e,d=d))
}
parse_csv <- function(filename) {
e <- read.csv(filename, header=FALSE)
e$dt <- strptime(e[,1], "%Y-%m-%d %H:%M:%OS")
e <- e[e$dt >= e$dt[1], ]
return(e)
}
parse_multi <- function(dirbasename) {
dirnames <- paste(dirbasename, c('_t1', '_t2', '_t3', '_t4', '_t5'), sep='')
d <- NULL
for(dir in dirnames) {
e <- parse_csv(paste(dir,'out/logfile',sep='/'))
rt <- grp_avg(e)
newd <- data.frame(x=rt$x, y=rt$y)
colnames(newd)[2] <- paste(dir,'y',sep='.')
if(is.null(d)) {
d <- newd
} else {
d <- merge(d, newd, by='x', all=TRUE)
}
}
# Post-process
selected <- !is.na(rowMeans(d[,2:ncol(d)]))
d <- d[selected,]
apply(d[,2:ncol(d)],1,quantile,0.25,na.rm=TRUE) -> lower
apply(d[,2:ncol(d)],1,median,na.rm=TRUE) -> med
apply(d[,2:ncol(d)],1,quantile,0.75,na.rm=TRUE) -> upper
confidence_ylim <<- range(c(confidence_ylim, range(lower), range(upper)), na.rm=TRUE)
return(data.frame(x=d$x, lower=lower, med=med, upper=upper))
}
make_plot <- function(res, ...) {
rt = grp_avg(res$e)
ylab='Score' # e[,4]
#ylab='Performance' # e[,5]
if(TRUE) {
plot(x=rt$x/3600, y=rt$y, type='n', xlab='Training Time/hour', ylab=ylab, ylim=plot_ylim, ...)
abline(v=seq(0,10,0.5), col='grey', lty=2)
}
if(FALSE) {
xlim = c(-max(res$d$policy_entropy), 0)
plot(x=rt$x/3600, y=rt$y, type='n', xlab='Negative Entropy', ylab=ylab, ylim=plot_ylim, xlim=xlim, ...)
abline(v=seq(xlim[1],0,length.out=4), col='grey', lty=2)
}
abline(h=seq(plot_ylim[1], plot_ylim[2], length.out=5), col='grey', lty=2)
prepare_plot()
}
make_confidence_plot <- function(d, ...) {
ylab='Score' # e[,4]
#ylab='Performance' # e[,5]
# d has f columns: x, lower, med, upper
plot(x=d$x/3600, y=d$med, type='n', xlab='Training Time/hour', ylab=ylab, ylim=confidence_ylim, ...)
abline(v=seq(0,3,0.5), col='grey', lty=2)
abline(h=seq(confidence_ylim[1], confidence_ylim[2], length.out=5), col='grey', lty=2)
prepare_plot()
}
draw_line <- function(res, ...) {
#lines(filter(e[,4], rep(1/300, 300)), x=(e$dt-e$dt[1])/3600, ...)
rt = grp_avg(res$e)
ent = group_avg(res$d$time_elapsed, res$d$policy_entropy, 60)
d1 <- data.frame(x=rt$x, rt=rt$y)
d2 <- data.frame(x=ent$x, ent=ent$y)
d <- merge(x=d1, y=d2, by="x")
if(TRUE) {
lines(d$rt, x=d$x/3600, ...)
}
if(FALSE) {
lines(d$rt, x=-d$ent, ...)
}
}
draw_confidence_line <- function(d, col, ...) {
alpha_col = rgb(t(col2rgb(col)), alpha=50, maxColorValue = 255)
polygon(x=c(rev(d$x), d$x)/3600, y=c(rev(d$upper),d$lower), col=alpha_col, border=FALSE)
lines(x=d$x/3600, d$med, col=col, ...)
}
old_get_dir_name <- function() {
pwds <- unlist(strsplit(getwd(), split='/'))[5]
return(pwds[length(pwds)])
}
get_dir_name <- function() {
return(basename(getwd())[1])
}
get_starting_score <- function(res) {
rt = grp_avg(res$e)
return(mean(rt$y[1], na.rm=TRUE))
}
get_ending_score <- function(res) {
rt = grp_avg(res$e)
return(mean(rev(rt$y)[1:min(length(rt$y),1)], na.rm=TRUE))
}
get_relative_score <- function(proposed, baseline, random, human=NA) {
# The formula follows from Z.Wang et al., Dueling network architectures for deep reinforcement learning
return ((proposed-baseline) / (ifelse(is.na(human),baseline,max(human,baseline)) - random))
}
get_human_score <- function() {
game <- get_dir_name()
dat <- read.table("../atari_human_score.txt", header=FALSE, stringsAsFactors=FALSE)
ret <- dat[dat[,1]==game,]
if(nrow(ret)==0) {
return(NA)
} else {
return(ret[,2])
}
}
get_game_type <- function() {
game <- get_dir_name()
dat <- read.table("../atari_human_score.txt", header=FALSE, stringsAsFactors=FALSE)
ret <- dat[dat[,1]==game,]
if(nrow(ret)==0) {
return('Unknown')
} else {
return(ret[,3])
}
}
main <- function(f, exp_name, make_pdf=TRUE, make_png=FALSE, raw=FALSE) {
if(raw) { f() }
if(make_pdf) { pdf(file=paste(get_dir_name(), '_', exp_name, ".pdf", sep=''), width=5, height=5); f(); dev.off() }
if(make_png) { png(file=paste(get_dir_name(), '_', exp_name, ".png", sep=''), width=300, height=300); f(); dev.off() }
}
|
194cae27bb6eab5abe902f89ebac785bfb0a5726 | 379e403848af05b2bb4fa92184cff4c0ec5ac102 | /code/5lda.R | 42a474dada11321e8603b402bbf8c019ebdc2f0b | [] | no_license | JonasRieger/fringes | 570d339f73ce5d46d30bc838a1b7a1b2d98696f3 | 345c743cb782ef593d44be8579c2d0388b5966fa | refs/heads/main | 2023-07-15T08:38:03.285146 | 2021-08-26T06:29:50 | 2021-08-26T06:29:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,141 | r | 5lda.R | library(ldaPrototype)
library(batchtools)
reg = makeExperimentRegistry(file.dir = "Batch", packages = "ldaPrototype")
sapply(seq(20, 75, 5),
function(K) addProblem(paste0("AT", K), data = list(i = "AT", K = K, ncpus = 4)))
sapply(seq(20, 40, 5),
function(K) addProblem(paste0("CH", K), data = list(i = "CH", K = K, ncpus = 4)))
sapply(seq(45, 75, 5),
function(K) addProblem(paste0("CH", K), data = list(i = "CH", K = K, ncpus = 2)))
sapply(seq(20, 45, 5),
function(K) addProblem(paste0("DK", K), data = list(i = "DK", K = K, ncpus = 2)))
sapply(seq(50, 75, 5),
function(K) addProblem(paste0("DK", K), data = list(i = "DK", K = K, ncpus = 1)))
sapply(seq(20, 55, 5),
function(K) addProblem(paste0("ESP", K), data = list(i = "ESP", K = K, ncpus = 1)))
sapply(seq(60, 75, 5),
function(K) addProblem(paste0("ESP", K), data = list(i = "ESP", K = K, ncpus = 2)))
sapply(seq(20, 40, 5),
function(K) addProblem(paste0("FR", K), data = list(i = "FR", K = K, ncpus = 1)))
sapply(seq(45, 75, 5),
function(K) addProblem(paste0("FR", K), data = list(i = "FR", K = K, ncpus = 2)))
sapply(seq(20, 40, 5),
function(K) addProblem(paste0("GER", K), data = list(i = "GER", K = K, ncpus = 2)))
sapply(seq(45, 75, 5),
function(K) addProblem(paste0("GER", K), data = list(i = "GER", K = K, ncpus = 1)))
sapply(seq(20, 75, 5),
function(K) addProblem(paste0("IT", K), data = list(i = "IT", K = K, ncpus = 1)))
sapply(seq(20, 75, 5),
function(K) addProblem(paste0("NL", K), data = list(i = "NL", K = K, ncpus = 1)))
sapply(seq(20, 40, 5),
function(K) addProblem(paste0("UK", K), data = list(i = "UK", K = K, ncpus = 2)))
sapply(seq(45, 75, 5),
function(K) addProblem(paste0("UK", K), data = list(i = "UK", K = K, ncpus = 1)))
addAlgorithm("LDARepAlgo",
fun = function(job, data, instance, seed, ...){
i = data$i
K = data$K
ncpus = data$ncpus
starttime = Sys.time()
message("### ", i, " Topics: ", K, " ###")
docs = readRDS(file.path("data", i, "docs.rds"))
vocab = readRDS(file.path("data", i, "vocab.rds"))
if(ncpus > 1){
lda = LDARep(docs, vocab, K = K, pm.backend = "socket", ncpus = ncpus)
}else{
lda = LDARep(docs, vocab, K = K)
}
saveRDS(lda, file.path("data", i, "lda", paste0(K, ".rds")))
gc(verbose = TRUE, reset = TRUE)
time = as.numeric(difftime(Sys.time(), starttime, units = "hours"))
message(round(time, 2), " hours")
return(time)
})
addExperiments()
ids = getJobTable()[, .(job.id, problem)]
ids[, K := as.integer(gsub("[A-Z]", "", problem))]
ids[, i := gsub("[0-9]", "", problem)]
ids[i == "AT", walltime := 2*60*60]
ids[i == "AT", memory := 32*1024]
ids[i == "AT", ncpus := 4]
ids[i == "CH" & K < 41, walltime := 2*60*60]
ids[i == "CH" & K < 41, ncpus := 4]
ids[i == "CH" & K > 41, walltime := 8*60*60]
ids[i == "CH" & K > 41, ncpus := 2]
ids[i == "CH", memory := 32*1024]
ids[i == "DK" & K < 46, walltime := 8*60*60]
ids[i == "DK" & K < 46, ncpus := 2]
ids[i == "DK" & K > 46, walltime := 48*60*60]
ids[i == "DK" & K > 46, ncpus := 1]
ids[i == "DK", memory := 32*1024]
ids[i == "ESP" & K < 56, walltime := 48*60*60]
ids[i == "ESP" & K < 56, ncpus := 1]
ids[i == "ESP" & K < 56, memory := 32*1024]
ids[i == "ESP" & K > 56, walltime := 48*60*60]
ids[i == "ESP" & K > 56, ncpus := 2]
ids[i == "ESP" & K > 56, memory := 64*1024]
ids[i == "FR" & K < 41, walltime := 48*60*60]
ids[i == "FR" & K < 41, ncpus := 1]
ids[i == "FR" & K < 41, memory := 32*1024]
ids[i == "FR" & K > 41, walltime := 48*60*60]
ids[i == "FR" & K > 41, ncpus := 2]
ids[i == "FR" & K > 41, memory := 64*1024]
ids[i %in% c("GER", "UK") & K < 41, walltime := 48*60*60]
ids[i %in% c("GER", "UK") & K < 41, ncpus := 2]
ids[i %in% c("GER", "UK") & K < 41, memory := 64*1024]
ids[i %in% c("GER", "UK") & K > 41, walltime := 7*24*60*60]
ids[i %in% c("GER", "UK") & K > 41, ncpus := 1]
ids[i %in% c("GER", "UK") & K > 41, memory := 40*1024]
ids[i %in% c("IT", "NL"), walltime := 48*60*60]
ids[i %in% c("IT", "NL"), memory := 32*1024]
ids[i %in% c("IT", "NL"), ncpus := 1]
ids[, problem := NULL]
ids[, K := NULL]
ids[, i := NULL]
submitJobs(ids)
|
5e05c70a648b06a0434da1135f9d17c49cea2b4d | bcdfb8f7ac27dcb48f7667e14c543043247d2f58 | /cachematrix.R | b213f8a7a58b927af80d686775e243f95cf7deab | [] | no_license | mardup/ProgrammingAssignment2 | 322ea2e2e4712ce02a5caf0b8d5b44e52b015108 | a00c722574bddc736620313998ff125587eeb5ab | refs/heads/master | 2020-11-28T05:16:07.050841 | 2019-12-23T14:45:29 | 2019-12-23T14:45:29 | 229,713,451 | 0 | 0 | null | 2019-12-23T08:50:11 | 2019-12-23T08:50:10 | null | UTF-8 | R | false | false | 1,222 | r | cachematrix.R | ## This assignement is to create 2 functions to cach the inverse of a matrix
## similarly to the example to cache mean
## This function is to set matrix value (setM), get matrix value (getM)
## set the matrix inverse/solve (setSolve) and get the matrix inverse/solve (getSolve)
makeCacheMatrix <- function(x = matrix()) {
#set the value of the matrix
CacheM <- NULL
setM <- function(y) {
x <<- y
CacheM <<- NULL
}
#get the value of the matrix
getM <- function() x
#set the inverse of the matrix
setSolve <- function(solve) CacheM <<- solve
#get the inverse of the matrix
getSolve <- function() CacheM
list(setM = setM, getM = getM,
setSolve = setSolve,
getSolve = getSolve)
}
## This function will check if inverse of the matrix has been already calculated
## if not it will calculate, if it has it will send the result
## with print message "getting cached data"
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
CacheM <-x$getSolve()
if(!is.null(CacheM)) {
message("getting cached data")
return(CacheM)
}
#calculate the inverse
data <- x$getM()
CacheM <- solve(data, ...)
x$setSolve(CacheM)
return(CacheM)
}
|
5049d9d2ed68ed68a6c40445b57f5a2c70da4723 | d4db709669719f84558afb6165079f1f4947669c | /man/calcShift.Rd | 5d9e851cd07cfd095bd39800cbd185d0e7f8c856 | [
"MIT"
] | permissive | shulp2211/SeeCiTe | a4daa3b757499851862ec6a3de8f31d289ea12af | 745c53db36ee4a0544280c4bceb96ec8989eb7ee | refs/heads/master | 2023-02-21T00:15:28.867295 | 2021-01-18T17:06:57 | 2021-01-18T17:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 572 | rd | calcShift.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcShift.R
\name{calcShift}
\alias{calcShift}
\title{Create a decile plot and a scatter for CNV versus flanking region in an offspring. Uses rogme package.}
\usage{
calcShift(lrr_dt, single = F)
}
\arguments{
\item{lrr_dt}{A data table with probe level LRR values for all individuals in a trio.}
}
\value{
A list with a data table with sampling results and a graph object
}
\description{
Create a decile plot and a scatter for CNV versus flanking region in an offspring. Uses rogme package.
}
|
366e5d0e1d7d7238938f8d84a3371f9d1b1f48b0 | d2a1402ec7225f160436fa9997c22dfbc98b2c2b | /TESTmultiComparison.R | 92c25d823f5f301d28ce3736833c241cd19e58ae | [] | no_license | ashar799/SBC | d9fe9e6a02ab6b70a3b3d0532b45b76ac1846cd9 | 731d73821ad27944f0767957ff5205554702ad4b | refs/heads/master | 2021-01-20T20:32:35.588709 | 2019-04-11T11:42:16 | 2019-04-11T11:42:16 | 61,547,525 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,066 | r | TESTmultiComparison.R | ########## This file compares prediction on test cases with different methods
####### So we use k-means + SVM to predict labels on new data points ###########
####### Then fit penalized Cox- Models with the predicted Labels
predictionGroundTruth = function(){
############ Predicting New Class Labels #################################
Y <- cbind(Y1,Y2)
Y.new <- cbind(Y1.test,Y2.test)
gr.km <- kmeans(Y, F, nstart =10)
label.train <- gr.km$cluster
svms <- sapply(2^(-10:14), function(cost) cross(ksvm(Y, factor(label.train), C=cost, kernel="vanilladot", kpar=list(), cross=5)))
mysvm <- ksvm(Y, factor(label.train), C=2^(-10:14)[which.min(svms)], kernel="vanilladot", kpar=list(), cross=10) # accuracy ~97%
pred.svm <- predict(mysvm, Y.new)
predRandIndex.svm <<- adjustedRandIndex(c.true.new, pred.svm)
############ Predicting the New Class Labels using kNN #############################
gr.km <- kmeans(Y, F, nstart =10)
label.train <- gr.km$cluster
knear <- knn(train = Y, test = Y.new, cl = label.train, k = F)
predRandIndex.knear <<- adjustedRandIndex(c.true.new, knear)
######## Predicting New C-Indices based on a Penalized Cox or AFT model####################
######## Penalized Cox PH ###########################################
linear.pred.cox <- c(0)
### see if we can use glmnet
reg.pcox <- cv.glmnet(x = Y, y = Surv(exp(time), censoring), family = "cox")
linear.pred.cox <- predict(object =reg.pcox, newx = Y.new, s= "lambda.min")
smod <- Surv(exp(time.new), censoring.new)
predCIndex.cox <<- as.numeric(survConcordance(smod ~ linear.pred.cox)[1])
##### Penalized AFT Model #############################################
linear.pred.paft <- c(0)
### see if we can use glmnet
reg.paft <- cv.glmnet(x = Y, y = time, family = "gaussian")
linear.pred.paft <- predict(object = reg.paft, newx = Y.new, s= "lambda.min")
smod <- Surv(exp(time.new), censoring.new)
predCIndex.aft <<- as.numeric(survConcordance(smod ~ exp(-linear.pred.paft))[1])
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.