blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b782e56be49e9d3a96a591448575634fed19aaa6
|
6f720f61a5cb26de1d70ce3a191779216a3b89ff
|
/Replication Files for Why Does the ANES Overestimate Voter Turnout/useSampledExport.R
|
e55c071be0cee5324f7b44a41ad1d21469e5d1a2
|
[] |
no_license
|
bspahn/ANES-Research
|
4deeaec218b186a3b99eaad00490d30f319101f0
|
ecd39e0d8a6bcaeb2a08770521d213cab41d9641
|
refs/heads/master
| 2020-04-03T09:58:29.031274
| 2018-10-29T08:54:59
| 2018-10-29T08:54:59
| 155,180,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,404
|
r
|
useSampledExport.R
|
##############################################
## make data set for individual-level analysis
##
## simon jackman
## july 2014
###############################################
library(stringr)
###############################################
## this is a household level attribute
sdata <- read.delim(file="sampexp_nodupes.txt",
sep="\t")
##sdata <- read.delim(file="../data/sampexp_20160314.txt",
## sep="\t")
##drop <- sdata$col0_abtid=="14337" & sdata$vendorid != "LALTN2176182"
##sdata <- sdata[!drop,]
sdata$latereg <- sdata$regdate > 20120908
## responder hh (completed interview)
responder.hh <- sdata$dispo=="Complete"
ok <- responder.hh & sdata$namematch==1
anes_sample <- read.csv(file="anes_sample.csv",
stringsAsFactors=FALSE)
theVars <- c("col0_abtid",
names(sdata)[!(names(sdata) %in% names(anes_sample))])
foo <- merge(anes_sample,
sdata[ok,theVars],
by.x="COL0_ABTID",
by.y="col0_abtid",
all.x=TRUE)
foo$col0_abtid.1 <- NULL
foo$e2012new <- NULL
foo$haveName <- foo$n1!="" | foo$n2!="" | foo$n3!=""
foo$namematch[foo$dispo=="Complete" & is.na(foo$namematch)] <- 0
## extra Brad code
name <- paste(anes_sample$n1, anes_sample$n2, anes_sample$n3)
ok <- anes_sample$complete & str_length(name)<12 ##& str_length(name)>3
table(str_length(name))
name[ok]
|
f5afed3aa8b24443c51197a52004d845b0e484b7
|
421366a39299a1a82bd0f2a42e667da7fc602b62
|
/man/DateAfterReachValue.Rd
|
5e045f912050cbb03287355c52e24d73c7b4162e
|
[] |
no_license
|
thomasferte/PredictCovidOpen
|
363ef4cc9006696d5fa16c2ac4bdf9b58882a476
|
2468b0006a6f19310a9f8c7de6aa46979f19d627
|
refs/heads/main
| 2023-03-17T19:28:52.863817
| 2023-01-26T09:51:38
| 2023-01-26T09:51:38
| 496,952,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 493
|
rd
|
DateAfterReachValue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DateAfterReachValue.R
\name{DateAfterReachValue}
\alias{DateAfterReachValue}
\title{DateAfterReachValue}
\usage{
DateAfterReachValue(value, df, feature, date = "START_DATE")
}
\arguments{
\item{value}{A numeric value}
\item{df}{a dataframe}
\item{feature}{the feature column name}
\item{date}{the date column name}
}
\value{
A date
}
\description{
Obtain date after a feature exceed a certain threshold value.
}
|
47230892170778db750bbcb155d4f77855ece87f
|
a83c07192848b855e2324cb59ee7c289b6ac76fc
|
/programs/master.R
|
af2302bfafb08d8437162309788f3a50f9035c3d
|
[
"MIT"
] |
permissive
|
michaelkotrous/daedalus
|
1ca27b9998020848f6ec5510a5b7b16b4d563ac4
|
445160040e12dbf976f4e1638fed77a3ab9bd706
|
refs/heads/master
| 2020-12-30T12:11:45.247015
| 2019-02-26T19:02:07
| 2019-02-26T19:02:07
| 91,404,794
| 1
| 1
|
MIT
| 2019-02-26T19:02:09
| 2017-05-16T02:24:16
|
R
|
UTF-8
|
R
| false
| false
| 892
|
r
|
master.R
|
# Set working directory to match path to daedalus project
setwd("/path/to/daedalus")
# Create directory to write output if it does not exist
dir.create("exports", showWarnings = FALSE)
# Run R scripts to load and clean NTSB dataset
source("programs/config/settings.R")
source("programs/config/data-cleanup.R")
source("programs/config/dataframes.R")
# Aggregate statistics, run tests, and export csvs for analysis
## csv output files will be written to the exports directory
source("programs/tests/accident-timeseries.R")
source("programs/tests/occurrence-code-percentages.R")
source("programs/tests/timeseries-autocorrelations.R")
source("programs/tests/accident-activity-change-correlations.R")
source("programs/tests/fatality-top5counts.R")
source("programs/tests/fatalityrate-coderate-change-correlations.R")
source("programs/tests/durbinwatson.R")
source("programs/tests/chisquare.R")
|
3dc0c25f08045828618b4fa9176f6064ac68a8a2
|
e1816403b93f741a38b58263bac160971ea28ef4
|
/R/get_bid.R
|
475d59c97085dd5438d0debc77d3c2fb88c1e1be
|
[] |
no_license
|
elmstedt/autograder
|
53cce0cb69669c0f112221e14fe71531b1e40595
|
b1ca92c776090212085429dbdc781e5292a3f34a
|
refs/heads/master
| 2021-05-23T15:02:27.756454
| 2020-05-03T10:49:34
| 2020-05-03T10:49:34
| 253,351,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
get_bid.R
|
#' Title
#'
#' @param r
#'
#' @return
#' @export
#'
#' @examples
#' @importFrom stringr str_extract_all str_trim
#' @importFrom dplyr "%>%"
get_bid <- function(r) {
suppressWarnings(r %>%
stringr::str_extract_all("\\d{9}(?=\\/)") %>%
unlist() %>%
stringr::str_trim())
}
|
ffe2f8966db38cc4ed37258032453f3898226bf5
|
4b9955701ca424c19bec17f0bc4b36f72cfcbcc4
|
/man/NULLCPO.Rd
|
ea113660effed57d470e7c65ad2b6dc117dce300
|
[
"BSD-2-Clause"
] |
permissive
|
mlr-org/mlrCPO
|
c238c4ddd72ece8549f8b48a79f02f543dac60e5
|
e6fc62a4aeb2001a3760c9d1126f6f2ddd98cc54
|
refs/heads/master
| 2022-11-21T17:30:54.108189
| 2022-11-16T16:08:10
| 2022-11-16T16:08:10
| 100,395,368
| 39
| 4
|
NOASSERTION
| 2022-10-18T23:46:13
| 2017-08-15T16:08:30
|
R
|
UTF-8
|
R
| false
| true
| 3,122
|
rd
|
NULLCPO.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NULLCPO.R
\docType{data}
\name{NULLCPO}
\alias{NULLCPO}
\title{CPO Composition Neutral Element}
\format{
An object of class \code{NULLCPO} (inherits from \code{CPOPrimitive}, \code{CPORetrafo}, \code{CPOInverter}, \code{CPOTrained}, \code{CPO}) of length 0.
}
\usage{
NULLCPO
}
\description{
\code{NULLCPO} is the neutral element of \code{\link{CPO}} and \code{\link{CPOTrained}} composition when using
\code{\link{\%>>\%}} or \code{\link{composeCPO}}. It is furthermore no effect when attached to a \code{\link[mlr:makeLearner]{Learner}}
using \code{\link{attachCPO}} (or \code{\link{\%>>\%}}), or when applied to data using \code{\link{applyCPO}}, \code{\link{invert}},
or \code{\link[stats]{predict}} (or, again, \code{\link{\%>>\%}}).
\code{NULLCPO} works as a stand-in for certain operations that have an "empty" return value:
It is returned when \code{\link{retrafo}} and \code{\link{inverter}} are applied to an object that has no retrafo or inverter
associated with it, and by \code{\link{pipeCPO}} when applied to an empty list.
\code{NULLCPO} can be checked using \code{\link{is.nullcpo}}, and converted from or to \code{NULL} using \code{\link{nullToNullcpo}} and
\code{\link{nullcpoToNull}}. Otherwise it behaves very similarly to other \code{\link{CPO}} or \code{\link{CPOTrained}} objects.
}
\seealso{
Other retrafo related:
\code{\link{CPOTrained}},
\code{\link{\%>>\%}()},
\code{\link{applyCPO}()},
\code{\link{as.list.CPO}},
\code{\link{clearRI}()},
\code{\link{getCPOClass}()},
\code{\link{getCPOName}()},
\code{\link{getCPOOperatingType}()},
\code{\link{getCPOPredictType}()},
\code{\link{getCPOProperties}()},
\code{\link{getCPOTrainedCPO}()},
\code{\link{getCPOTrainedCapability}()},
\code{\link{getCPOTrainedState}()},
\code{\link{is.retrafo}()},
\code{\link{makeCPOTrainedFromState}()},
\code{\link{pipeCPO}()},
\code{\link{print.CPOConstructor}()}
Other inverter related:
\code{\link{CPOTrained}},
\code{\link{\%>>\%}()},
\code{\link{applyCPO}()},
\code{\link{as.list.CPO}},
\code{\link{clearRI}()},
\code{\link{getCPOClass}()},
\code{\link{getCPOName}()},
\code{\link{getCPOOperatingType}()},
\code{\link{getCPOPredictType}()},
\code{\link{getCPOProperties}()},
\code{\link{getCPOTrainedCPO}()},
\code{\link{getCPOTrainedCapability}()},
\code{\link{getCPOTrainedState}()},
\code{\link{is.inverter}()},
\code{\link{makeCPOTrainedFromState}()},
\code{\link{pipeCPO}()},
\code{\link{print.CPOConstructor}()}
Other CPO lifecycle related:
\code{\link{CPOConstructor}},
\code{\link{CPOLearner}},
\code{\link{CPOTrained}},
\code{\link{CPO}},
\code{\link{\%>>\%}()},
\code{\link{attachCPO}()},
\code{\link{composeCPO}()},
\code{\link{getCPOClass}()},
\code{\link{getCPOConstructor}()},
\code{\link{getCPOTrainedCPO}()},
\code{\link{identicalCPO}()},
\code{\link{makeCPO}()}
Other NULLCPO related:
\code{\link{is.nullcpo}()},
\code{\link{nullToNullcpo}()},
\code{\link{nullcpoToNull}()}
}
\concept{CPO lifecycle related}
\concept{NULLCPO related}
\concept{inverter related}
\concept{retrafo related}
\keyword{datasets}
|
602cec897e2c595c43b51476c12c0e1cdfb913b5
|
444936c27b6a76d40c100bdfdbcd6917ec136840
|
/scripts/readConfig.r
|
97d6cb9a4b6561f67b95ca2f9dcbde5406204984
|
[] |
no_license
|
pickledplum/mpg
|
bb1c09b2393006513d6120a01284ccde059e182e
|
8b0f376ba7088164bb2273dbc6083dd9659e6979
|
refs/heads/master
| 2016-09-10T20:10:54.655702
| 2014-03-07T23:51:23
| 2014-03-07T23:51:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,724
|
r
|
readConfig.r
|
# Convert the namelist (key=val(s) pairs) to an environment.
#
# An example of a namelist file. The namelist can contain #-led comment lines.
#
# OUTPUT_DIR = "D:/home/honda/mpg/dummy/fs_output"
# PORTFOLIO_OFDB = "PERSONAL:HONDA_MSCI_ACWI_ETF"
# #PORTFOLIO_LIST = "list_of_constituents.txt" # this line will be ignored.
# PREFIX = "dummy"
# T0 = "19800101"
# T1 = "0"
# DEFAULT_CURRENCY = USD
# FF_ASSETS=Q
# FF_CACH_ONLY=Q,LOCAL
#
readConfig <- function( config_file ) {
table <- read.table(config_file, sep="=", comment.char="#", strip.white=TRUE, as.is=TRUE)
rownames(table) <- table[,1]
env <- new.env(hash=TRUE)
for( para in rownames(table) ) {
val_str <- gsub(" ", "", table[para,2])
tokens <- strsplit(val_str, ",")
if( length(tokens[[1]]) == 1 ){
#tryCatch({
# as.numeric(tokens[[1]][1])
# assign(para, as.numeric(tokens[[1]]), envir=env)
#}, warning = function(w){
# assign(para, tokens[[1]][1], envir=env)
#})
assign(para, tokens[[1]][1], envir=env)
}
else{
#l <- vector("list", length(tokens[[1]]))
#i=1
#for( token in tokens[[1]] ){
# l[i] = token
# tryCatch({
# as.numeric(token)
# l[i] = as.numeric(token)
# }, warning = function(w){
# #ignore
# })
#
# #print(l[i])
# i = i+1
#}
assign(para, unlist(tokens[[1]]), envir=env)
}
}
return(env)
}
|
cb0ad7e8393a48ee0ad3592e11de9d74b1349231
|
06e14a5bea07a3fb3a434eb484875165410fe30e
|
/eval/plotting/output_protocol_stats.R
|
6ae9241ee1b743a90258e251061830a86bac8ecf
|
[
"MIT"
] |
permissive
|
tiago-peres/ipfs-crawler
|
2c8335239ee310959d6f216bed798fcd3accc480
|
79bb82506f1a2e8c544f710bf4872fac46396c2a
|
refs/heads/master
| 2023-07-17T19:52:40.084224
| 2021-08-25T15:49:09
| 2021-08-25T15:49:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,130
|
r
|
output_protocol_stats.R
|
### protocolStats.R: outputs basic statistics about the used protocols in the found multiaddresses.
### The data is assumed to reside in a .csv where it is simply read and output into a table.
source("includes.R")
######### CONSTANTS ###########
sourceFile = "plot_data/protocol_stats.csv"
########## FUNCTIONS ###########
createProtocolTable = function(path, inputdt) {
setnames(inputdt, c("prot", "count", "perc"))
fileConn=path
cat(c("\\begin{tabular}{| c | c | c |}\n", "\\hline\n", "Protocol & Perc. of peers & Abs. count\\\\\n", "\\hline\n"),
file=fileConn)
for(i in seq(1, nrow(inputdt), by=1)) {
cat(c(paste(inputdt[i]$prot, round(inputdt[i]$perc, digits=4), inputdt[i]$count, sep=" & "), "\\\\\n", "\\hline\n"),
append=T, file=fileConn)
}
cat(c("\\end{tabular}\n"), append=T, file=fileConn)
}
####### COMPUTATION ##############
dt = LoadDT(sourceFile, header=T)
countDT = dt[, .(count = .N), .(protocol)]
numPeers = length(unique(dt$nodeid))
countDT$percentage = countDT$count*100/numPeers
# dt$count = NULL
createProtocolTable(paste(outTabPath, "protocol_stats.tex",sep=""), countDT)
|
f2300d31e90ceb790737a0dd64137fdfbfb61257
|
2fb4cc8514d1a472a96eec3ce9160841de177603
|
/bin/toot.R
|
63b16728808ada8ab71827b81356d177b45c260a
|
[] |
no_license
|
georoen/standbildNews
|
f36aacfd5acdb5ef9e237a02210274c0d877aa9b
|
ec351e7f09dd3ef7892b3bae27d23f4fcf9184f2
|
refs/heads/master
| 2022-10-30T23:56:26.530842
| 2022-10-11T20:01:46
| 2022-10-11T20:01:46
| 83,040,162
| 3
| 0
| null | 2018-07-16T20:51:06
| 2017-02-24T12:40:37
|
R
|
UTF-8
|
R
| false
| false
| 523
|
r
|
toot.R
|
#' Make PaspberryPi post status to mastodon (social.tchncs.de)
#' This bot allows to actually stay in touch with the instrument.
#' Dependency:
#' devtools::install.github('ThomasChln/mastodon')
#' library(mastodon)
#' Mastodon User:
#' https://social.tchncs.de/@standbildNews
## Input
#' msg
#' mediaPath
## Init API
source("extra/mastodon_credentials.R")
## Toot!
if(opt_social){
if (is.null(mediaPath)) {
dump <- post_status(token, msg)
} else {
dump <- post_media(token, msg, file = mediaPath)
}
}
|
c035547e468ebad3d94091e2db5fdc9611bc8d18
|
3f462dbc928053f63602724aa6e1c783c0a8cc6b
|
/tests/shortTest.R
|
e8b252a4af6552b3fba132fd3379ffdfd0689ca2
|
[] |
no_license
|
wadeschulz/sciclone-meta
|
8e614e7bfe650096fe5823db3376e70429029d9c
|
0ee660f0ca6aab3c5a406020e52e08a7d42b74e9
|
refs/heads/master
| 2020-04-29T14:18:26.981794
| 2014-05-14T20:40:06
| 2014-05-14T20:40:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,851
|
r
|
shortTest.R
|
#this is a short test
library(sciClone)
#read in vaf data
v = read.table("data/vafs.dat",header=T);
v1 = v[1:100,c(1,2,8,9,10)]
v2 = v[1:100,c(1,2,11,12,13)]
v3 = v[1:100,c(1,2,14,15,16)]
#read in regions to exclude
regions = read.table("data/exclude.chr1")
#read in copy number data
cn1 = read.table("data/copy_number_tum1")
cn1 = cn1[,c(1,2,3,5)]
cn2 = read.table("data/copy_number_tum2")
cn2 = cn2[,c(1,2,3,5)]
cn3 = read.table("data/copy_number_tum3")
cn3 = cn3[,c(1,2,3,5)]
#set sample names
names = c("Sample1","Sample2","Sample3")
#regions to exclude
reg1 = read.table("data/regionsToExclude")
#make an output directory, deleting old results first if they exist
suppressWarnings(dir.create("results"))
unlink("results/*", recursive=TRUE)
cat("\n")
cat("=========================================================\n")
cat("Test 1 - single sample - shortTest1\n")
cat("\n")
print("")
#run one sample
sc = sciClone(vafs=v1,
copyNumberCalls=cn1,
sampleNames=names[1],
regionsToExclude=reg1)
writeClusterTable(sc, "results/clusters1")
sc.plot1d(sc,"results/clusters1.1d.pdf")
## #run only one sample, but all sites are removed by excluded regions
## #should fail with "can't do clustering - no copy number 2 regions to operate on in sample 1"
## sciClone(vafs=v1,
## regionsToExclude=regions,
## copyNumberCalls=cn1,
## sampleNames=names,
## outputPrefix="test.results/shortTest1",
## overlayClusters=TRUE)
cat("\n")
cat("=========================================================\n")
cat("Test 2 - two samples - shortTest2\n")
cat("\n")
#run two samples
sc = sciClone(vafs=list(v1,v2),
copyNumberCalls=list(cn1,cn2),
sampleNames=names[1:2])
writeClusterTable(sc, "results/clusters2")
sc.plot1d(sc,"results/clusters2.1d.pdf")
sc.plot2d(sc,"results/clusters2.2d.pdf")
cat("\n")
cat("=========================================================\n")
cat("Test 3.0 - three samples - should fail")
cat("\n")
#run two samples
sc = sciClone(vafs=list(v1,v2,v3),
copyNumberCalls=list(cn1,cn2,cn3),
sampleNames=names,
regionsToExclude=list(reg1,reg1))
if(!(is.null(sc))){
print("ERROR - this should have failed, because there are no cn-neutral points in all three samples")
}
cat("\n")
cat("=========================================================\n")
cat("Test 3.1 - three samples - should succeed")
cat("\n")
#run two samples
sc = sciClone(vafs=list(v1,v2,v3),
copyNumberCalls=list(cn1,cn2,cn2),
sampleNames=names,
regionsToExclude=list(reg1,reg1))
writeClusterTable(sc, "results/clusters3")
sc.plot1d(sc,"results/clusters3.1d.pdf")
sc.plot2d(sc,"results/clusters3.2d.pdf")
sc.plot3d(sc, sc@sampleNames, size=700, outputFile="results/clusters3.3d.gif")
|
fb7383e4c7579be25d0daaa1846b87e45076e1c2
|
1d6fc79a7fe1b7457a061d4af23a82e809dea509
|
/cachematrix.R
|
e1ea5b5a53da1d1aff02655cdd5afdc34847c466
|
[] |
no_license
|
fsimmz23/datasciencecoursera
|
a3e4ef13dc545b2279deed0f5c0971abbaeb90ab
|
4fe22ee70858489a8cd7078440f7cc068045e848
|
refs/heads/master
| 2021-01-10T11:06:33.785603
| 2016-01-23T07:57:08
| 2016-01-23T07:57:08
| 48,659,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,455
|
r
|
cachematrix.R
|
## The purpose to create the two function is to cache the inverse of a matrix
## This way we avoid recomputing the inverse matrix if its cached. If we don'that
## cache it usually will be costly computation.
## Write a short comment describing this function
## The makeCacheMatrix:
## To be able to cache. Create a special
## matrix that will help us with this by using the
## makeCacheMatrix function. The input into this function
## is simply a variable of type matrix.
makeCacheMatrix <- function(x = matrix()) {
## Following the same format as the assignment example
## Creating a makeCacheMatrix object will consist of
## four functions encapsulated in a list
## 1. set the matrix
## 2. get the matrix
## 3. set the inverse of the matrix
## 4. get the inverse of the matrix
## Initially we set the inv to NULL
## the value Changes when the user sets the value
inv <- NULL
## set the y function
## Sets the matrix itself but not the inverse
set <- function(y) {
x <<- y
inv <<- NULL
}
## get the function
## we gets the matrix itself. Note not the inverse
get <- function() x
## We manually set the inverse
setinverse <- function(inverse) inv <<- inverse
## We get the inverse
getinverse <- function() inv
## Encapsulate into a list
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve:
## after you created the matrix, use the cacheSolve function to compute the inverse and cache the result
## If you use the cacheSolve again on the same matrix, then the pre-computed result will be retrived, hence
## avoiding any recomputation. An informative message
## will be shown in the command prompt when the pre-computed
## result is returned instead.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Changing the assignment example
## Changing the mean to inverse
## Get the current state of the inverse and see if it
## has been computed yet
inv <- x$getinverse()
## If it has...
if(!is.null(inv)) {
## Return the computed inverse
message("Getting cached matrix")
return(inv)
}
## Get the matrix itself
data <- x$get()
## Find the inverse
inv <- solve(data, ...)
## The result is cached in the object
x$setinverse(inv)
## Return the new result
inv
}
|
55080d65fe7d3d015b85adb28f58190deb158248
|
4147ae1063df8e6d3a1c42154dded3a0bb07fe03
|
/man/default_fonts.Rd
|
e60d629fa0677e4d5a1679c8f6a7fb86832a6764
|
[
"MIT"
] |
permissive
|
steveputman/gt
|
54f6b63104405cbe10376dd27b6315eb244d28db
|
ff050936ef0336ed44249e85c0e27a2f29eb5815
|
refs/heads/master
| 2022-11-14T11:45:48.098930
| 2022-10-21T05:13:58
| 2022-10-21T05:13:58
| 167,079,382
| 0
| 0
| null | 2019-01-22T22:49:02
| 2019-01-22T22:49:02
| null |
UTF-8
|
R
| false
| true
| 2,792
|
rd
|
default_fonts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{default_fonts}
\alias{default_fonts}
\title{A vector of default fonts for use with \strong{gt} tables}
\usage{
default_fonts()
}
\value{
A character vector of font names.
}
\description{
The vector of fonts given by \code{default_fonts()} should be used with a \strong{gt}
table that is rendered to HTML. We can specify additional fonts to use but
this default set should be placed after that to act as fallbacks. This is
useful when specifying \code{font} values in the \code{\link[=cell_text]{cell_text()}} function (itself
used in the \code{\link[=tab_style]{tab_style()}} function). If using \code{\link[=opt_table_font]{opt_table_font()}} (which also
has a \code{font} argument) we probably don't need to specify this vector of fonts
since it is handled by its \code{add} option (which is \code{TRUE} by default).
}
\section{Examples}{
Use \code{\link{exibble}} to create a \strong{gt} table. Attempting to modify the fonts used
for the \code{time} column is much safer if \code{default_fonts()} is appended to the
end of the \code{font} listing in the \code{cell_text()} call (the \code{"Comic Sansa"} and
\code{"Menloa"} fonts don't exist, but, we'll get the first available font from
the \code{default_fonts()} set).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{exibble \%>\%
dplyr::select(char, time) \%>\%
gt() \%>\%
tab_style(
style = cell_text(
font = c(
"Comic Sansa", "Menloa",
default_fonts()
)
),
locations = cells_body(columns = time)
)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_default_fonts_1.png" alt="This image of a table was generated from the first code example in the `default_fonts()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
7-28
}
\seealso{
Other helper functions:
\code{\link{adjust_luminance}()},
\code{\link{cell_borders}()},
\code{\link{cell_fill}()},
\code{\link{cell_text}()},
\code{\link{cells_body}()},
\code{\link{cells_column_labels}()},
\code{\link{cells_column_spanners}()},
\code{\link{cells_footnotes}()},
\code{\link{cells_grand_summary}()},
\code{\link{cells_row_groups}()},
\code{\link{cells_source_notes}()},
\code{\link{cells_stub_grand_summary}()},
\code{\link{cells_stub_summary}()},
\code{\link{cells_stubhead}()},
\code{\link{cells_stub}()},
\code{\link{cells_summary}()},
\code{\link{cells_title}()},
\code{\link{currency}()},
\code{\link{escape_latex}()},
\code{\link{google_font}()},
\code{\link{gt_latex_dependencies}()},
\code{\link{html}()},
\code{\link{md}()},
\code{\link{pct}()},
\code{\link{px}()},
\code{\link{random_id}()},
\code{\link{stub}()}
}
\concept{helper functions}
|
f4cfe4ba743d43bbf3037ffd3cc9673a5d3647c0
|
8d3d3cb60740ab51a318d130cc8ec21014098a76
|
/plot3.R
|
a478fbae668c5016b0218a605186ac51017dd052
|
[] |
no_license
|
ritesh256/ExData_Plotting1
|
18ad2876e613864d92674a3892f208b80c3127a6
|
1ffe406a06cecda6696a33072d0c92c9cf2efb2e
|
refs/heads/master
| 2022-05-14T18:40:49.930538
| 2022-05-05T12:32:25
| 2022-05-05T12:32:25
| 22,800,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
plot3.R
|
plot3 <- function() {
dataread <- read.table("household_power_consumption.txt", sep=";", header=T, na.strings=c("?"),colClasses="character")
dataread$Date <- as.Date(dataread$Date, "%d/%m/%Y")
finaldata <- dataread[which(dataread$Date >= "2007-02-01" & dataread$Date <= "2007-02-02"),]
finaldata$Date <- strptime(paste(finaldata$Date,finaldata$Time), format = "%Y-%m-%d %H:%M:%S")
with(finaldata,plot(finaldata$Date, as.numeric(finaldata$Sub_metering_1), type="n", xlab="", ylab="Energy sub metering"))
with(finaldata, lines(finaldata$Date, as.numeric(finaldata$Sub_metering_1), col = "grey"))
with(finaldata, lines(finaldata$Date, as.numeric(finaldata$Sub_metering_3), col = "blue"))
with(finaldata, lines(finaldata$Date, as.numeric(finaldata$Sub_metering_2), col = "red"))
legend("topright", lty="solid", , cex=0.64, col = c("Grey", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file = "plot3.png")
dev.off()
}
|
ec62c6757880b1b8aaf4a7287df2cf08109fea0f
|
88dfe929a29807725a5f645838292c50cce6dfd2
|
/tests/testthat/test_polygon.R
|
d53a2be945d431a845d52952061bd5c96f15d59a
|
[] |
no_license
|
flooose/AlpacaforR
|
d1a5cd435fa0e93d5c7cfd06b8a3ac369e097646
|
dd6a3317fd16edfeb094d9c06b9357c2b5704101
|
refs/heads/master
| 2023-08-09T21:00:50.748078
| 2021-09-10T19:16:17
| 2021-09-10T19:16:17
| 405,643,999
| 0
| 0
| null | 2021-09-12T13:35:55
| 2021-09-12T13:05:24
| null |
UTF-8
|
R
| false
| false
| 11,225
|
r
|
test_polygon.R
|
#' @include internal.R
#' @include Polygon.R
vcr::vcr_configure(dir = file.path(dirname(.log_path), "polygon"))
# CHANGED 2021-03-19T17:43:02 Commented out endpoints no long accessible to Alpaca users
vcr::use_cassette("Tickers_is_accessible_and_returns_the_appropriate_data", match_requests_on = c("path"), {
test_that("Tickers is accessible and returns the appropriate data", {
.resp <- polygon("Tickers", search = "Tesla", market = "stocks")
expect_true(any(stringr::str_detect(.resp$ticker, "TSLA")))
expect_true(any(purrr::map_lgl(.resp, ~"POSIXct" %in% class(.x))))
})
})
vcr::use_cassette("Ticker_Types", match_requests_on = c("path"), {
test_that("Ticker Types is accessible and returns appropriate data", {
.resp <- polygon("Ticker Types")
expect_type(.resp, "list")
expect_type(attr(.resp, "query"), "list")
})
})
vcr::use_cassette("Ticker_Details", match_requests_on = c("path"), {
test_that("Ticker Details is accessible and returns appropriate data", {
.resp <- polygon("Ticker Details", symbol = "AMZN")
expect_s3_class(do.call(c, .resp[, c("listdate","updated")]), "POSIXct")
expect_identical(.resp$sic, 5961L)
expect_identical(.resp$exchangeSymbol, "NGS")
expect_identical(.resp$symbol, "AMZN")
})
})
vcr::use_cassette("Ticker_News", match_requests_on = c("path"), serialize_with = "json", {
test_that("Ticker News is accessible and returns appropriate data", {
.resp <- polygon("Ticker News", ticker = "AAPL")
expect_s3_class(.resp, "data.frame")
expect_s3_class(.resp$published_utc, "POSIXct")
})
})
vcr::use_cassette("Markets", match_requests_on = c("path"), {
test_that("Markets is accessible and returns appropriate data", {
.resp <- polygon("Markets")
expect_true(any(stringr::str_detect(.resp$market, "STOCKS")))
expect_length(.resp, 2)
})
})
vcr::use_cassette("Locales", match_requests_on = c("path"), {
test_that("Locales is accessible and returns appropriate data", {
.resp <- polygon("Locales")
expect_identical(sum(.resp$locale %in% c("G","US")), 2L)
expect_length(.resp, 2)
})
})
vcr::use_cassette("Stock_Splits", match_requests_on = c("path"), {
test_that("Stock Splits is accessible and returns appropriate data", {
.resp <- polygon("Stock Splits", symbol = "AMD")
expect_s3_class(.resp$exDate, "Date")
expect_identical(.resp$ticker, "AMD")
expect_length(.resp, 4)
expect_type(attr(.resp, "query"), "list")
})
})
vcr::use_cassette("Stock_Dividends", match_requests_on = c("path"), {
test_that("Stock Dividends is accessible and returns appropriate data", {
.resp <- polygon("Stock Dividends", symbol = "MSFT")
expect_s3_class(.resp$exDate, "Date")
expect_identical(.resp$ticker[[1]], "MSFT")
expect_length(.resp, 6)
expect_identical(attr(.resp, "query")$status, "OK")
})
})
vcr::use_cassette("Stock_Financials", match_requests_on = c("path"), {
test_that("Stock Financials is accessible and returns appropriate data", {
.resp <- polygon("Stock Financials", symbol = "BYND")
expect_s3_class(.resp$calendarDate, "Date")
expect_s3_class(.resp$reportPeriod, "Date")
expect_s3_class(.resp$updated, "Date")
expect_identical(.resp$ticker[[1]], "BYND")
expect_length(.resp, 111)
})
})
vcr::use_cassette("Market_Status", match_requests_on = c("path"), {
test_that("Market Status is accessible and returns appropriate data", {
.resp <- polygon("Market Status")
expect_s3_class(.resp$serverTime, "POSIXct")
expect_length(.resp, 4)
.ms_open <<- .resp$market
})
})
vcr::use_cassette("Market_Holidays", match_requests_on = c("path"), {
test_that("Market Holidays is accessible and returns appropriate data", {
.resp <- polygon("Market Holidays")
expect_s3_class(.resp$date, "Date")
expect_identical(unique(.resp$status), c("closed", "early-close"))
expect_length(.resp, 6)
})
})
vcr::use_cassette("Exchanges", match_requests_on = c("path"), {
test_that("Exchanges is accessible and returns appropriate data", {
.resp <- polygon("Exchanges")
expect_true(any(stringr::str_detect(.resp$name, "NYSE America")))
expect_length(.resp, 7)
})
})
# vcr::use_cassette("Historic_Trades", match_requests_on = c("path"), {
# test_that("Historic Trades is accessible and returns appropriate data", {
# .resp <- polygon("Historic Trades", limit = 5)
# expect_s3_class(.resp$t, "POSIXct")
# expect_length(.resp, 9)
# .exp <- list(
# c = list(name = "conditions", type = "[]int"),
# I = list(name = "orig_id", type = "string"),
# e = list(name = "correction",
# type = "int"),
# x = list(name = "exchange", type = "int"),
# r = list(name = "trf_id", type = "int"),
# s = list(name = "size",
# type = "int"),
# t = list(name = "sip_timestamp", type = "int64"),
# f = list(name = "trf_timestamp", type = "int64"),
# i = list(name = "id", type = "string"),
# p = list(name = "price",
# type = "float64"),
# z = list(name = "tape", type = "int"),
# y = list(name = "participant_timestamp", type = "int64"),
# q = list(name = "sequence_number", type = "int")
# ) %>% {.[sort(names(.))]}
# expect_identical(attr(.resp, "query")$map %>% {.[sort(names(.))]}, .exp)
# })
# })
# vcr::use_cassette("Historic_Quotes", match_requests_on = c("path"), {
# test_that("Historic Quotes is accessible and returns appropriate data", {
# .resp <- polygon("Historic Quotes", ticker = "MSFT", date = "2008-04-15", limit = 5)
# expect_equal(.resp$t, structure(c(1208246852.8, 1208246875.777, 1208246877.527, 1208247302.04, 1208247302.04), class = c("POSIXct", "POSIXt")), tolerance = 1)
# expect_identical(attr(.resp,"query")$map %>% {.[sort(names(.))]},list(
# s = list(name = "bid_size", type = "int"),
# x = list(name = "bid_exchange",
# type = "int"),
# P = list(name = "ask_price", type = "float64"),
# S = list(name = "ask_size", type = "int"),
# t = list(name = "sip_timestamp",
# type = "int64"),
# q = list(name = "sequence_number", type = "int"),
# c = list(name = "conditions", type = "[]int"),
# p = list(name = "bid_price",
# type = "float64"),
# X = list(name = "ask_exchange", type = "int"),
# z = list(name = "tape", type = "int"),
# y = list(name = "participant_timestamp",
# type = "int64"),
# f = list(name = "trf_timestamp", type = "int64"),
# i = list(name = "indicators", type = "[]int")
# )%>% {.[sort(names(.))]})
# expect_length(.resp, 10)
# })
# })
# vcr::use_cassette("Last_Trade", match_requests_on = c("path"), {
# test_that("Last Trade is accessible and returns appropriate data", {
# .resp <- polygon("Last trade for a symbol", symbol = "BYND")
# expect_type(attr(.resp,"query"), "list")
# })
# })
# vcr::use_cassette("Last_Quote", match_requests_on = c("path"), {
# test_that("Last Quote is accessible and returns appropriate data", {
# .resp <- polygon("Last quote for a symbol", symbol = "BYND")
# expect_type(attr(.resp,"query"), "list")
# expect_equal(dim(.resp), c(1,7))
# })
# })
vcr::use_cassette("Daily_Open_Close", match_requests_on = c("path"), {
test_that("Daily Open/Close is accessible and returns appropriate data", {
.resp <- polygon("Daily Open/Close", symbol = "BYND", date = "2019-12-04")
expect_equal(.resp, structure(
list(
from = structure(
1575435600,
class = c("POSIXct",
"POSIXt"),
tzone = "America/New_York"
),
symbol = "BYND",
open = 76.75,
high = 77.05,
low = 73.51,
close = 73.89,
volume = 5168416L,
afterHours = 73.90,
preMarket = 76.92
),
class = c("tbl_df",
"tbl", "data.frame"),
row.names = c(NA,-1L),
query = list(status = "OK")
), ignore_attr = TRUE)
})
})
vcr::use_cassette("Condition_Mappings", match_requests_on = c("path"), {
test_that("Condition Mappings is accessible and returns appropriate data", {
.resp <- polygon("Condition Mappings", ticktype = "trades")
expect_true(is.list(.resp))
.resp <- polygon("Condition Mappings", ticktype = "quotes")
expect_true(is.list(.resp))
})
})
# vcr::use_cassette("Snapshot_All_Tickers", match_requests_on = c("path"), {
# test_that("Snapshot: All Tickers is accessible and returns appropriate data", {
# if (.ms_open) {
# .resp <- polygon("Snapshot: All Tickers")
# expect_s3_class(.resp, "data.frame")
# expect_s3_class(.resp$updated, "POSIXct")
# expect_gt(nrow(.resp), 1)
# } else {
# # if it's a non market day
# expect_warning(.resp <- polygon("Snapshot: All Tickers"), regexp = "(?:Query returned no results)|(?:returns no data when market is closed)")
# }
# expect_identical(attr(.resp, "query")$status_code, 200L)
# })
# })
# vcr::use_cassette("Snapshot_Single_Ticker", match_requests_on = c("path"), {
# test_that("Snapshot: Single Ticker is accessible and returns appropriate data", {
#
#
# if (.ms_open) {
# .resp <- polygon("Snapshot: Single Ticker", ticker = "BYND")
# expect_identical(attr(.resp, "query")$status_code, 200L)
# expect_s3_class(.resp, "tbl")
# expect_identical(unique(.resp$ticker), "BYND")
# expect_s3_class(.resp$updated, "POSIXct")
# expect_equal(nrow(.resp), 1, tolerance = 1.1)
# } else {
# # if not a day the market was open
# expect_warning({.resp <- polygon("Snapshot: Single Ticker", ticker = "BYND")}, regexp = "NotFound")
# }
# })
# })
# vcr::use_cassette("Snapshot_Gainers_Losers", match_requests_on = c("path"), {
# test_that("Snapshot: Gainers/Losers is accessible and returns appropriate data", {
# if (.ms_open) {
# .resp <- polygon("Snapshot: Gainers/Losers", direction = "gainers")
# expect_s3_class(.resp, "data.frame")
# expect_s3_class(.resp$lastQuote.t, "POSIXct")
# } else {
# expect_warning(.resp <- polygon("Snapshot: Gainers/Losers"), regexp = "(?:Query returned no results)|(?:returns no data when market is closed)")
# }
# expect_identical(attr(.resp, "query")$status_code, 200L)
# })
# })
vcr::use_cassette("Previous_Close", match_requests_on = c("path"), {
test_that("Previous Close is accessible and returns appropriate data", {
.resp <- polygon("Previous Close", ticker = "BYND")
expect_identical(attr(.resp, "query")$status, "OK")
expect_identical(attr(.resp, "query")$ticker, "BYND")
expect_s3_class(.resp, "data.frame")
expect_true(any(purrr::map_lgl(.resp, ~any(c("POSIXct", "Date") %in% class(.x)))))
expect_equal(dim(.resp), c(1,9))
})
})
vcr::use_cassette("Grouped_Daily_Bars", match_requests_on = c("path"), {
test_that("Grouped Daily (Bars) is accessible and returns appropriate data", {
.resp <- polygon("Grouped Daily (Bars)", locale = "US", market = "STOCKS", date = "2020-04-16")
expect_identical(attr(.resp, "query")$status, "OK")
expect_s3_class(.resp, "data.frame")
expect_true(any(purrr::map_lgl(.resp, ~any(c("POSIXct", "Date") %in% class(.x)))))
})
})
|
ab3f87a7e5aaaa7ce1fc68c0ad7203802e08a88e
|
d11dba6dafe5f5204743e03662d8d6d216672393
|
/R/country_ranges.R
|
d2d125fcd64dbc67e33b5d9b91089032c19d9c96
|
[] |
no_license
|
ktargows/iptools
|
d7b6e260296750198444b0edde26a09df4ad3630
|
d3d85680cd85d276672a42f4bbdeb8fac3d8758e
|
refs/heads/master
| 2021-01-11T01:55:03.682784
| 2016-10-06T01:54:41
| 2016-10-06T01:54:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,809
|
r
|
country_ranges.R
|
#' Return CIDR ranges for given ISO 3166-1 alpha-2 country codes
#'
#' Query \url{http://www.iwik.org/ipcountry/} for the CIDR ranges for a given
#' set of ISO 3166-1 alpha-2 country codes and return the results in a named
#' list of character vectors.
#'
#' An internal cache of the CIDR query results are maintained as they only change
#' daily (12:00 CET). The cache can be flushed with \code{flush_country_cidrs} and
#' which codes have been cached can be retrieved with \code{cached_country_cidrs)}.
#'
#' @param countries character vector of ISO 3166-1 alpha-2 country codes (case-insensitive)
#' @return named list of character vectors of CIDR blocks
#' @note This function requires internet connectivity as it pulls daily updated data
#' from \url{http://www.iwik.org/ipcountry/}.
#' @export
#' @examples
#' rng <- country_ranges(c("PW", "UZ"))
country_ranges <- function(countries) {
ISO_3166_1 <- get("ISO_3166_1", envir=.pkgenv)
countries <- toupper(countries)
retrieve <- countries[countries %in% ISO_3166_1$Alpha_2]
if (length(retrieve) != length(countries)) {
warning("Skipping invalid country coides")
}
if (length(retrieve) > 0) {
setNames(lapply(retrieve, get_country_cidrs), retrieve)
} else {
return(list())
}
}
#' Flush the country CIDR cache
#' @export
flush_country_cidrs <- function() {
.pkgenv$cached_country_cidrs <- list()
return(invisible())
}
#' Inspect cached CIDR countries
#' @export
cached_country_cidrs <- function() {
cns <- names(.pkgenv$cached_country_cidrs)
if (length(cns) == 0) {
message("No entries in country CIDR cache")
return(invisible())
} else {
return(cns)
}
}
#' Fetch all country CIDR blocks
#'
#' Iterates through all the country codes in \code{ISO_3166_1$Alpha_2} and returns
#' a named list of CIDR blocks for all those countries.
#'
#' @return named list of character vectors of CIDR blocks
#' @note This is an expensive operation as it pulls 249 files from
#' \url{http://www.iwik.org/ipcountry/}. Try not to do this too often.
#' @export
get_all_country_ranges <- function() {
ISO_3166_1 <- get("ISO_3166_1", envir=.pkgenv)
setNames(lapply(ISO_3166_1$Alpha_2, get_country_cidrs), ISO_3166_1$Alpha_2)
}
# fetch CIDR blocks for a country. keeping this in a separate function
# so it's easier to swap out later if the site goes bad
get_country_cidrs <- function(cn) {
cn_ret <- .pkgenv$cached_country_cidrs[[cn]]
if (length(cn_ret) == 0) {
suppressWarnings(
cn_ret <- grep("^#",
tryCatch(
readLines(sprintf("http://www.iwik.org/ipcountry/%s.cidr", cn), warn=FALSE),
error=function(err) { NA }
),
invert=TRUE, value=TRUE)
)
.pkgenv$cached_country_cidrs[[cn]] <- cn_ret
}
return(cn_ret)
}
|
e754f413878d76a8ba88ef77bb895031576ff43a
|
2654582b75045a9f95e7bd72c4cd200e2a1d3756
|
/analysisFunctions/accPlots.R
|
75f41d6bd3f7b188d5b400c7523a4e8cb63bcd98
|
[] |
no_license
|
nvarnett/analysisScripts
|
b84364e828e9cc0ca26713b650e3c116db7309e9
|
7a55fded5f61e3c9c842c41d969751d1a2186676
|
refs/heads/master
| 2021-01-20T10:38:47.536667
| 2013-12-11T01:55:47
| 2013-12-11T01:55:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 542
|
r
|
accPlots.R
|
library(ggplot2)
library(plyr)
source("~/Dropbox/Work/Rscripts/HelperFunctions.R")
acc.summary <- ddply(work.Acc, .(VType, EmbSubj), summarize,
Mean = mean(Corr), SE = stdErr(Corr))
acc.base <- ggplot(work.Acc)
acc.base + geom_bar(aes(y = Corr, x = COND),
stat = "summary", fun.y = "mean",
colour = "darkgrey", alpha = .9)
acc.base + geom_bar(aes(y = Corr, x = Qregion),
stat = "summary", fun.y = "mean",
colour = "darkgrey", alpha = .9)
|
5f5d3eba9920b73288fde3a3a8e19505672ea6c9
|
49d5c04c5895cbe6c9a307939126e21a63044945
|
/Dygraph/server.R
|
755bdaa932681887ed1be420521503f53ab5e9e0
|
[] |
no_license
|
edithbird/Homework
|
d4dce490f43721f80ee94fefee43b7671e1f9903
|
4e511da8eb26e1d7edccd41a673f6011e5dd55ca
|
refs/heads/master
| 2021-01-19T22:26:12.068025
| 2017-05-31T19:43:17
| 2017-05-31T19:43:17
| 88,813,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,684
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(dygraphs)
library(forecast)
air <- read.csv("data/CopyOfAirTravel2.csv")
air <- air %>% mutate(Air_Miles_in_Millions = round(air$Air),0) %>% select(Month, Air_Miles_in_Millions)
air.ts <- ts(air$Air/10000, start = c(1990, 1), end = c(2000, 12), frequency = 12)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
predicted <- reactive({
hw <- HoltWinters(air.ts)
predict(hw, n.ahead = input$months,
prediction.interval = TRUE,
level = as.numeric(input$interval))
})
output$dygraph <- renderDygraph({
dygraph(predicted(), main = "Predicted Air Miles Travelled/Month") %>%
dySeries(c("lwr", "fit", "upr"), label = "Miles") %>%
dyOptions(drawGrid = input$showgrid)
})
output$from <- renderText({
strftime(req(input$dygraph_date_window[[1]]), "%d %b %Y")
})
output$to <- renderText({
strftime(req(input$dygraph_date_window[[2]]), "%d %b %Y")
})
output$clicked <- renderText({
strftime(req(input$dygraph_click$x), "%d %b %Y")
})
output$point <- renderText({
paste0('X = ', strftime(req(input$dygraph_click$x_closest_point), "%d %b %Y"),
'; Y = ', req(input$dygraph_click$y_closest_point))
})
output$tbl <- DT::renderDataTable({
air
# output$accuracy <- renderTable({
# accuracy <- accuracy(HoltWinters(air.ts))
# accuracy
})
})
|
c1b2204093f887395531ee723fca95e111364b96
|
4fddebf5c76cfe938a356019fe08041e52699684
|
/drafts/floraison2017.R
|
124efbb107093ee17e0290f2adc72990820b854f
|
[] |
no_license
|
bastienreyne/cecidomyie
|
83b90cd43d6b233678255f6935b7b91f9e80082c
|
d32cf37e06926def4850c620601950a38114ce2a
|
refs/heads/master
| 2020-05-04T22:13:22.932630
| 2019-11-27T09:29:12
| 2019-11-27T09:29:12
| 179,503,920
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,855
|
r
|
floraison2017.R
|
## Script pour prendre les inflorescences du fichier floraison et les corriger pour les faire
## coincider avec les inflos du fichier piege.
# Packages / data ---------------------------------------------------------
library(tidyverse)
library(magrittr)
library(lubridate)
inflos_piege_ER <- read_csv2("Data/2017_B1_enh.ras.csv") %>% select(date, inflos_vivantes)
inflos_piege_PS <- read_csv2("Data/2017_B1_bache.csv") %>% select(date, inflos_vivantes)
inflos_piege_EH <- read_csv2("Data/2017_B1_enh.haut.csv") %>% select(date, inflos_vivantes)
inflos_floraison <- read_csv2("Data/raw/2017_floraison.csv")
inflos_floraison %<>% mutate_at(c("ApdateC", "ApdateM",
c(paste0("Lat",1:5,"dateC"), paste0("Lat", 1:5, "dateM"))), dmy) %>%
filter(Annee == 2017) %>%
mutate_at("Traitm", as_factor) %>%
filter(Bloc == 1)
# Mise en forme inflo floraison -------------------------------------------
inflos_apic <- inflos_floraison %>% filter(!is.na(ApdateC)) %>%
select(Traitm, ApdateC, ApdateM) %>%
rename(birth = ApdateC, death = ApdateM)
inflos_lat1 <- inflos_floraison %>% filter(!is.na(Lat1dateC)) %>%
select(Traitm, Lat1dateC, Lat1dateM) %>%
rename(birth = Lat1dateC, death = Lat1dateM)
inflos_lat2 <- inflos_floraison %>% filter(!is.na(Lat2dateC)) %>%
select(Traitm, Lat2dateC, Lat2dateM) %>%
rename(birth = Lat2dateC, death = Lat2dateM)
inflos_lat3 <- inflos_floraison %>% filter(!is.na(Lat3dateC)) %>%
select(Traitm, Lat3dateC, Lat3dateM) %>%
rename(birth = Lat3dateC, death = Lat3dateM)
inflos_lat4 <- inflos_floraison %>% filter(!is.na(Lat4dateC)) %>%
select(Traitm, Lat4dateC, Lat4dateM) %>%
rename(birth = Lat4dateC, death = Lat4dateM)
inflos_lat5 <- inflos_floraison %>% filter(!is.na(Lat5dateC)) %>%
select(Traitm, Lat5dateC, Lat5dateM) %>%
rename(birth = Lat5dateC, death = Lat5dateM)
floraison2017 <- rbind(inflos_apic, inflos_lat1, inflos_lat2,
inflos_lat3, inflos_lat4, inflos_lat5)
floraison2017_ER <- floraison2017 %>% filter(Traitm == "Sn")
floraison2017_PS <- floraison2017 %>% filter(Traitm == "B")
floraison2017_EH <- floraison2017 %>% filter(Traitm == "E")
date <- floraison2017 %$% unique(c(birth, death)) %>% na.omit
date <- min(date):max(date) %>% as_date
inflos_floraison_ER <- rep(NA, length(date))
inflos_floraison_PS <- rep(NA, length(date))
inflos_floraison_EH <- rep(NA, length(date))
alive <- rep(NA, length(date))
dead <- rep(NA, length(date))
alive_ER <- rep(NA, length(date))
alive_PS <- rep(NA, length(date))
alive_EH <- rep(NA, length(date))
for (day in 1:length(date)) {
inflos_floraison_ER[day] <- floraison2017_ER %$% which(birth <= date[day] & death > date[day]) %>%
length + floraison2017_ER %$% which(birth <= date[day] & is.na(death)) %>% length
inflos_floraison_PS[day] <- floraison2017_PS %$% which(birth <= date[day] & death > date[day]) %>%
length + floraison2017_ER %$% which(birth <= date[day] & is.na(death)) %>% length
inflos_floraison_EH[day] <- floraison2017_EH %$% which(birth <= date[day] & death > date[day]) %>%
length + floraison2017_ER %$% which(birth <= date[day] & is.na(death)) %>% length
alive[day] <- floraison2017 %$% which(birth == date[day]) %>% na.omit %>% length
dead[day] <- floraison2017 %$% which(death == date[day]) %>% na.omit %>% length
alive_ER[day] <- floraison2017_ER %$% which(birth == date[day]) %>% na.omit %>% length
alive_PS[day] <- floraison2017_PS %$% which(birth == date[day]) %>% na.omit %>% length
alive_EH[day] <- floraison2017_EH %$% which(birth == date[day]) %>% na.omit %>% length
}
inflos_floraison_ER <- cbind(date, inflos = inflos_floraison_ER) %>% as_tibble %>%
mutate_at("date", as_date) %>% filter(date >= "2018-07-18")
inflos_floraison_PS <- cbind(date, inflos = inflos_floraison_PS) %>% as_tibble %>%
mutate_at("date", as_date) %>% filter(date >= "2018-07-18")
inflos_floraison_EH <- cbind(date, inflos = inflos_floraison_EH) %>% as_tibble %>%
mutate_at("date", as_date) %>% filter(date >= "2018-07-18")
inflos_floraison_ER$date <- inflos_floraison_ER$date - 365
inflos_floraison_PS$date <- inflos_floraison_PS$date - 365
inflos_floraison_EH$date <- inflos_floraison_EH$date - 365
# Correction --------------------------------------------------------------
index <- inflos_floraison_ER %$% which(date == "2017-08-01" | date == "2017-09-05" | date == "2017-09-06")
ecart_ER <- (inflos_floraison_ER[50, 2] - inflos_floraison_ER[51, 2]) %>%
as.numeric() / sum(inflos_floraison_ER$inflos)
ecart_PS <- (inflos_floraison_PS[50, 2] - inflos_floraison_PS[51, 2]) %>%
as.numeric() / sum(inflos_floraison_PS$inflos)
ecart_EH <- (inflos_floraison_EH[50, 2] - inflos_floraison_EH[51, 2]) %>%
as.numeric() / sum(inflos_floraison_EH$inflos)
date2017 <- inflos_floraison_ER$date
inflos_piege_ER <- approx(inflos_piege_ER$date %>% as.numeric,
inflos_piege_ER$inflos_vivantes,
xout = date2017 %>% as.numeric)$y
inflos_piege_PS <- approx(inflos_piege_PS$date %>% as.numeric,
inflos_piege_PS$inflos_vivantes,
xout = date2017 %>% as.numeric)$y
inflos_piege_EH <- approx(inflos_piege_EH$date %>% as.numeric,
inflos_piege_EH$inflos_vivantes,
xout = date2017 %>% as.numeric)$y
inflos_target_ER <- inflos_piege_ER / sum(inflos_piege_ER)
inflos_target_PS <- inflos_piege_PS / sum(inflos_piege_PS)
inflos_target_EH <- inflos_piege_EH / sum(inflos_piege_EH)
inflos_current_ER <- inflos_floraison_ER$inflos / sum(inflos_floraison_ER$inflos)
inflos_current_PS <- inflos_floraison_PS$inflos / sum(inflos_floraison_PS$inflos)
inflos_current_EH <- inflos_floraison_EH$inflos / sum(inflos_floraison_EH$inflos)
inflos_ER <- inflos_floraison_ER$inflos
inflos_ER[16:50] <- inflos_ER[16:50] - seq(0, 16, length.out = 35)
inflos_ER <- inflos_ER / sum(inflos_ER)
## Corriger entre le 1er aout et le 5 septembre
# plot(date2017, inflos_target_ER)
# lines(date2017, inflos_ER)
#
# plot(date2017, inflos_target_PS)
# lines(date2017, inflos_current_PS)
my_rmse <- function(x, y) {
n_obs <- length(x)
sqrt(sum((x - y)^2) / n_obs )
}
objectiveEH <- function(x) {
poids <- x[1:50]
morts <- poids * ecart_EH / sum(poids)
inflos <- inflos_current_EH
inflos[1:50] <- inflos[1:50] - cumsum(morts)
my_rmse(x[51] * inflos, inflos_target_EH)
}
library(mco)
resEH <- nsga2(objectiveEH, 51, 1,
lower.bounds = c(rep(0, 51)),
upper.bounds = c(rep(100, 50), 6000),
popsize = 200,
generations = 100)
my_x_EH <- resEH$par[1, 1:50]
my_alpha_EH <- resEH$par[1, 51]
deads <- my_x_EH * ecart_EH / sum(my_x_EH)
inflos_EH <- inflos_current_EH
inflos_EH[1:50] <- inflos_EH[1:50] - cumsum(deads)
inflos_EH <- inflos_EH * my_alpha_EH
plot(date2017, inflos_target_EH)
lines(date2017, inflos_EH)
lines(date2017, inflos_current_EH, col = "red")
objectiveER <- function(x) {
poids <- x[1:50]
morts <- poids * ecart_ER / sum(poids)
inflos <- inflos_current_ER
inflos[1:50] <- inflos[1:50] - cumsum(morts)
my_rmse(x[51] * inflos, inflos_target_ER)
}
resER <- nsga2(objectiveER, 51, 1,
lower.bounds = c(rep(0, 51)),
upper.bounds = c(rep(100, 50), 6000),
popsize = 200,
generations = 100)
my_x_ER <- resER$par[1, 1:50]
my_alpha_ER <- resER$par[1, 51]
deads <- my_x * ecart_ER / sum(my_x)
inflos_ER <- inflos_current_ER
inflos_ER[1:50] <- inflos_ER[1:50] - cumsum(deads)
inflos_ER <- inflos_ER * my_alpha_ER
plot(date2017, inflos_target_ER)
lines(date2017, inflos_ER)
lines(date2017, inflos_current_ER, col = "red")
## PS
objectivePS <- function(x) {
poids <- x[1:50]
morts <- poids * ecart_PS / sum(poids)
inflos <- inflos_current_PS
inflos[1:50] <- inflos[1:50] - cumsum(morts)
my_rmse(x[51] * inflos, inflos_target_PS)
}
resPS <- nsga2(objectivePS, 51, 1,
lower.bounds = c(rep(0, 51)),
upper.bounds = c(rep(100, 50), 6000),
popsize = 200,
generations = 100)
my_x_PS <- resPS$par[1, 1:50]
my_alpha_PS <- resPS$par[1, 51]
deads <- my_x * ecart_PS / sum(my_x)
inflos_PS <- inflos_current_PS
inflos_PS[1:50] <- inflos_PS[1:50] - cumsum(deads)
inflos_PS <- inflos_PS * my_alpha_PS
plot(date2017, inflos_target_PS)
lines(date2017, inflos_PS)
lines(date2017, inflos_current_PS, col = "red")
resultats_ER <- cbind(date = date2017,
corrected = inflos_ER,
piege = inflos_target_ER,
floraison = my_alpha_ER * inflos_current_ER) %>% as_tibble %>%
mutate_at("date", as_date) %>%
gather(corrected, piege, floraison, key = Source, value = Nombre)
resultats_PS <- cbind(date = date2017,
corrected = inflos_PS,
piege = inflos_target_PS,
floraison = my_alpha_PS * inflos_current_PS) %>% as_tibble %>%
mutate_at("date", as_date) %>%
gather(corrected, piege, floraison, key = Source, value = Nombre)
resultats_EH <- cbind(date = date2017,
corrected = inflos_EH,
piege = inflos_target_EH,
floraison = my_alpha_EH * inflos_current_EH) %>% as_tibble %>%
mutate_at("date", as_date) %>%
gather(corrected, piege, floraison, key = Source, value = Nombre)
plot_ER <- resultats_ER %>% ggplot(aes(x = date, y = Nombre, color = Source)) +
geom_point() +
geom_line()
plot_PS <- resultats_PS %>% ggplot(aes(x = date, y = Nombre, color = Source)) +
geom_point() +
geom_line()
plot_EH <- resultats_EH %>% ggplot(aes(x = date, y = Nombre, color = Source)) +
geom_point() +
geom_line()
library(gridExtra)
grid.arrange(plot_ER, plot_PS, plot_EH, nrow = 3)
resultat <- cbind(inflos_ER = inflos_ER * sum(inflos_piege_ER),
inflos_PS = inflos_PS * sum(inflos_piege_PS),
inflos_EH = inflos_EH * sum(inflos_piege_EH))
burst_ER <- (floraison2017_ER %>% arrange(birth) %>% count(birth))
burst_PS <- (floraison2017_PS %>% arrange(birth) %>% count(birth))
burst_EH <- (floraison2017_EH %>% arrange(birth) %>% count(birth))
burst_ER$birth <- burst_ER$birth - 365
burst_PS$birth <- burst_PS$birth - 365
burst_EH$birth <- burst_EH$birth - 365
burstER <- burstPS <- burstEH <- rep(0, length(date2017))
ind_ER <- which(date2017 %in% burst_ER$birth)
ind_PS <- which(date2017 %in% burst_PS$birth)
ind_EH <- which(date2017 %in% burst_EH$birth)
burstER[ind_ER] <- burst_ER$n[-(1:2)]
burstPS[ind_PS] <- burst_PS$n[-(1:3)]
burstEH[ind_EH] <- burst_EH$n[-(1:7)]
burst <- cbind(burstER, burstPS, burstEH)
|
de54b5e0c8d3484e0921d5b5ceba3519e3ec67f5
|
4140e4547307219057a6dd6319e302f44812b475
|
/database_script.R
|
86e2d0e213c15da49bbdf13ce3059d9c981d5341
|
[] |
no_license
|
waterpolymer/DatabaseProject
|
7c16f7d54cc88c4fdc38f16cbd810edc67cfe645
|
b929ca91715f9e5be6bd147b6adfc77870406b96
|
refs/heads/master
| 2020-07-15T04:27:15.759277
| 2019-09-17T20:50:20
| 2019-09-17T20:50:20
| 205,479,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,310
|
r
|
database_script.R
|
library("jsonlite")
library("tidyr")
#manual input of information
sql_user <- "root"
sql_pass <- "cat"
sql_dbname <- "cyes435"
sql_host <- "localhost"
library(RMySQL)
library(dbConnect)
library(httr)
#establish shorthand connection to my input database
con = dbConnect(RMySQL::MySQL(), user = sql_user, password = sql_pass, dbname = sql_dbname, host = sql_host)
#con = dbConnect(RMySQL::MySQL(), user = "root", password = "cat", dbname = "cyes435", host = "localhost")
#c_ stands for challenger
challenger_region <- "na1"
manual_key <- "RGAPI-ea9dd280-3e04-456c-823d-9ab95eaf76f8"
api_end <- paste("?api_key=", manual_key, sep='')
api_start <- paste("https://", challenger_region, ".api.riotgames.com/lol", sep='')
challenger_link <- paste(api_start, "/league/v4/challengerleagues/by-queue/RANKED_SOLO_5x5", api_end, sep='')
c_json = fromJSON(challenger_link, flatten = TRUE)
c_df = as.data.frame(c_json)
c_vars <- c("entries.summonerName", "entries.inactive", "entries.rank", "queue")
c_data <- c_df[c_vars]
colnames(c_data) <- c("Summoner", "Inactive", "Rank", "Queue")
dbWriteTable(con, "challengers", c_data, overwrite = TRUE, append = FALSE)
c_names <- c_df[["entries.summonerName"]]
name_counter <- 1
for(name in c_names){
summoner_link <- paste(api_start, "/summoner/v4/summoners/by-name/", name, api_end, sep='')
summoner_link <- gsub(" ", "", summoner_link, fixed = TRUE)
sum_text <- paste("looking at player: ", name, sep='')
print(sum_text)
if(http_error(GET(summoner_link))){
stop("something is wrong with the summoner_json")
print(http_status(GET(summoner_link)))
}
s_json <- fromJSON(summoner_link, flatten = TRUE)
s_df = as.data.frame(s_json)
s_vars <- c("name", "puuid", "summonerLevel", "accountId", "id")
s_data <- s_df[s_vars]
colnames(s_data) <- c("Summoner", "puuid", "Level", "AccountID", "SummonerID")
if(name_counter == 1){
dbWriteTable(con, "players", s_data, overwrite = TRUE, append = FALSE)
}else{
dbWriteTable(con, "players", s_data, overwrite = FALSE, append = TRUE)
}
accID <- s_df[["accountId"]]
acc_link <- paste(api_start, "/match/v4/matchlists/by-account/", accID, api_end, sep='')
ml_json = fromJSON(acc_link, flatten = TRUE)
ml_df = as.data.frame(ml_json)
ml_vars <- c("matches.lane", "matches.gameId", "matches.champion", "matches.platformId", "matches.season", "matches.queue", "matches.role")
ml_data <- ml_df[ml_vars]
colnames(ml_data) <- c("Lane", "GameID", "ChampionID", "PlatformID", "Season", "Queue", "Role")
if(name_counter == 1){
dbWriteTable(con, "matches", ml_data, overwrite = TRUE, append = FALSE)
}else{
dbWriteTable(con, "matches", ml_data, overwrite = FALSE, append = TRUE)
}
match_list <- ml_df[["matches.gameId"]]
game_number <- 1
for(gameID in match_list){
match_link <- paste(api_start, "/match/v4/matches/", gameID, api_end, sep = '')
match_text <- paste("getting information from gameId: ", gameID, sep='')
print(match_text)
if(http_error(GET(match_link))){
stop("something failed in getting the match data")
print(http_status(GET(match_link)))
}
m_json = fromJSON(match_link, flatten = TRUE)
m_df = as.data.frame(m_json)
season <- m_df[["seasonId"]]
#print(as.character(season[[1]]))
if(!(identical(as.character(season[[1]]), "13"))){
#print("AM LESS THAN 13")
break
}
gameMode <- m_df[["gameMode"]]
if(identical(as.character(gameMode[[1]]), "CLASSIC"))
{
m_var = c("gameId", "participantIdentities.player.accountId", "participantIdentities.participantId", "participants.championId", "participants.stats.visionScore", "participants.stats.longestTimeSpentLiving", "participants.stats.kills", "participants.stats.wardsKilled", "teams.dragonKills", "participants.stats.assists", "participants.timeline.role","participants.timeline.lane")
m_data <- m_df[m_var]
colnames(m_data) <- c("GameID", "AccountID", "ParticipantID", "ChampionID", "Vision_Score", "Longest_Time_Spent_Alive", "Kills", "Wards_Killed", "Dragons_Killed", "Assists", "Role", "Lane")#, "cs_Diff_Per_Min", "gold_Diff_Per_Min", "xp_Diff_Per_Min")
if(name_counter == 1){
dbWriteTable(con, "match_information", m_data, overwrite = TRUE, append = FALSE)
}else{
dbWriteTable(con, "match_information", m_data, overwrite = FALSE, append = TRUE)
}
}
#only checks last 10 games played by each challenger
if(game_number >= 3){
break
}
game_number <- game_number + 1
Sys.sleep(3)
}
name_counter <- name_counter + 1
if(name_counter >= 10){
break
}
}
#last 3 columns have 2 children, it complicates selecting specific columns
#dbGetQuery(con, "drop table if exists c_averages")
#dbGetQuery(con, "create table c_averages (Vision_Score int, Longest_Time_Spent_Alive int, Kills int, Wards_Killed int, Dragons_Killed int, Assists int)")
#dbGetQuery(con, "insert into c_averages select avg(Vision_Score),avg(Longest_Time_Spent_Alive), avg(Kills), avg(Wards_Killed), avg(Dragons_Killed), avg(Assists) from match_information;")
|
4cd90c32861a0d7e15f6b694371394cf126221b6
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/SyNet/R/mst.r
|
9d61ac05b05d884900d6b4768a4b564e1271c791
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,268
|
r
|
mst.r
|
mst <- function(x) {
numpoints <- nrow(x)
if(numpoints == 1) return (list(path = 0, wght = 1, xy0 = 1, xy1 = 1))
caso <- order(x[lower.tri(x)])
anc <- vector("integer", numpoints)
path <- 0
nanc <- 0
inmst <- vector("logical", numpoints)
cl <- ceiling(-0.5 + numpoints - sqrt(0.25 + numpoints*(numpoints-1) - 2*caso))
rw <- numpoints*(1 - cl) + 0.5*cl*(cl + 1) + caso
flag <- 0
i <- 0
pt1 <- pt2 <- c()
nlz <- suma <- array(0, numpoints)
while (flag < (numpoints - 1))
{
i <- i + 1
aux <- 2*inmst[cl[i]] + inmst[rw[i]]
if(aux == 3 & anc[cl[i]] == anc[rw[i]]) next
if(aux == 0) {inmst[c(cl[i], rw[i])] <- TRUE; nanc <- nanc + 1; anc[c(cl[i], rw[i])] <- nanc}
if(aux == 1) {inmst[cl[i]] <- TRUE; anc[cl[i]] <- anc[rw[i]]}
if(aux == 2) {inmst[rw[i]] <- TRUE; anc[rw[i]] <- anc[cl[i]]}
if(anc[cl[i]] != anc[rw[i]]) anc[anc == anc[rw[i]]] <- anc[cl[i]]
path <- path + x[rw[i], cl[i]]
flag <- flag + 1
suma[c(rw[i], cl[i])] <- suma[c(rw[i], cl[i])] + x[rw[i], cl[i]]
nlz[c(rw[i], cl[i])] <- nlz[c(rw[i], cl[i])] + 1
pt1 <- c(pt1, rw[i])
pt2 <- c(pt2, cl[i])
}
suma <- suma/nlz
return (list(path = path, wght = suma/sum(suma), xy0 = pt1, xy1 = pt2))
}
|
08464b1ec407c844285613524af5bc4b3ce24d6a
|
edf8e116805d1160e5e1a87640da66c16a7c9611
|
/Etezova_Liuaza/1/task_05.R
|
28e383cdb630869c31b4c67b88006a882d2b20c0
|
[] |
no_license
|
LabraMabra/M2019_4135
|
65645def3493078e485cfb7a71608ec62c18dc1e
|
34b74668976cee2770fa4db1d8e3a4d09cd6744e
|
refs/heads/master
| 2020-08-10T21:38:13.173583
| 2020-06-13T19:20:14
| 2020-06-13T19:20:14
| 214,425,223
| 2
| 4
| null | 2019-10-31T19:58:36
| 2019-10-11T12:01:51
|
HTML
|
UTF-8
|
R
| false
| false
| 716
|
r
|
task_05.R
|
calculation <- function(column, calculation_function) {
if (is.numeric(column)) {
do.call(calculation_function, list(column))
} else {
table(column)
}
}
data_processing <- function(data_frame, row_selection, column_selection, splitter, calculation_function) {
data_frame %>%
slice(row_selection) %>%
select(column_selection) ->
subset
subset %>%
split(as.factor(subset[,splitter]), drop = T) %>%
lapply(function(category) {
lapply(category, calculation, calculation_function)
}) ->
calculations
return(list(subset, calculations))
}
print(data_processing(iris, 1:100, 3:5, 'Species', sd))
print(data_processing(PlantGrowth, 1:20, c(1, 2), 'group', mean))
|
2de831b61ba15796cac9124f47343a7654c56b7c
|
f252c7af74b27070ec28fc6120a72273fce2c39b
|
/Log/2015August.R
|
a6518ea65184ee98d50325ee437c45bc0f29e808
|
[] |
no_license
|
sadapple/Research
|
bf142ff98a30c28713281aed6a870e19045cb11b
|
a07b5894c0c6be7e5d980ef79730fd8442046a15
|
refs/heads/master
| 2016-09-05T11:36:56.473431
| 2016-02-01T02:22:41
| 2016-02-01T02:22:41
| 28,351,519
| 0
| 0
| null | 2016-01-29T21:25:15
| 2014-12-22T18:05:05
|
R
|
UTF-8
|
R
| false
| false
| 282
|
r
|
2015August.R
|
## install packages for R 3.2.2 on Windows 10
chooseCRANmirror(FALSE) # this is crucial!!!!
install.packages("dplyr")
install.packages("quadprog")
install.packages("reshape2")
install.packages("ggplot2")
install.packages("lars")
install.packages("glmnet")
install.packages("tree")
|
8a041c2eec1646d9ec8864c631ce952a87d5575c
|
c3c9324afc6873b0de45a3cffcdfcdb884163288
|
/language/nlp/grams_fiddle/nlp.r
|
3f6dfc059386c2998cf5269c3f004710ac5eb1ec
|
[] |
no_license
|
lefft/boosh
|
273c44b04f65f64937fc319d5f4542a0b79daf4a
|
e4c0665ab1db9c7b47ce4085bf070fed9d8f37ea
|
refs/heads/master
| 2020-03-28T00:13:11.754076
| 2018-01-04T21:16:31
| 2018-01-04T21:16:31
| 64,482,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,183
|
r
|
nlp.r
|
### setup ---------------------------------------------------------------------
# load dependencies
lefftpack::quiet_attach(c(
"dplyr","magrittr","knitr","tidyr","tidytext","gutenbergr",
"ggplot2","igraph","ggraph","grid"
))
# load functions
source("nlp-foncs.r")
# fig width
opts_chunk$set(fig.width=12)
# get stopwords [this matters quite a bit, as does where u remove them]
stops <- stopze(stop_list="una_stops"); stops <- stops[stops!="the"]
### unabomber manifesto -------------------------------------------------------
# location of text (for quicker loading can grab local "kaczynski1995.txt")
link <- "http://lefft.xyz/stuff/posts/btc/input/kaczynski1995.txt"
# get the manifesto as-is -- `una_raw`
una_raw <- readLines(link)
# and get it as a single-column df (for dplyr compatibility)
una_raw_df <- data.frame(text=una_raw, stringsAsFactors=FALSE)
# now tokenize it and put it into a df for easier handling
una_words <- data.frame(
# tokenize by " " for words, and split compounds by "-"
word=unlist(strsplit(una_raw, split=" |-")),
stringsAsFactors=FALSE
)
# clean up the data a bit
una_words <- una_words %>%
# index each word
mutate(index = seq(along=word)) %>%
# reorder cols
select(index, word) %>%
# delete quote marks (that i put in manually)
mutate(word = gsub("LQUOTE|RQUOTE", "", word)) %>%
# remove all other punctuation
mutate(word = gsub("[[:punct:]]", "", word)) %>%
# get rid of tabs, which are somehow still in there...
mutate(word = gsub("\t", "", word)) %>%
# filter out chapter headings
filter(!grepl("^\\d\\.$", word)) %>%
# filter out numeric stuff
filter(!grepl("^\\d+$", word)) %>%
# remove anything that became empty as a result
filter(word!="") %>%
# lowercase everything
mutate(word = tolower(word))
kable(head(una_words))
# 20th is 'society', first open class word
sort(table(una_words$word), decreasing=TRUE)[1:20]
# get list of unique words, assign grammatical categories later
una_unique_words <- data.frame(
word = unique(una_words$word),
category = rep(NA, times=length(unique(una_words$word))),
features = rep("f1|f2|...|fn", times=length(unique(una_words$word)))
)
# [NOTE: will have to duplicate some if theyre ambiguous]
head(una_unique_words)
# get frequencies
una_freq <- una_words %>% group_by(word) %>% summarize(count = n()) %>%
arrange(desc(count))
kable(cbind(head(una_freq), head(una_freq[!una_freq$word %in% stops, ])))
# create bigrams by lagging the text column
una_bg <- una_words %>%
rename(word1 = word) %>%
mutate(word2 = lead(word1)) %>%
# toss the final element, since it's not a bigram
filter(!is.na(word2)) %>%
# also get a column with the full bigrams
mutate(bigram = paste(word1, word2, sep=" ")) %>%
# arrange them nicerly
select(index, bigram, word1, word2)
kable(head(una_bg))
# see the most common bigrams after removing stops
# and compare that the the default output from the tt:: book
bg_counts <- una_bg %>%
# remove stops later, as needed
# filter(!word1 %in% stops) %>%
# filter(!word2 %in% stops) %>%
group_by(bigram) %>%
summarize(count = n()) %>%
separate(col=bigram, into=c("w1","w2"), sep=" ", remove=TRUE) %>%
arrange(desc(count)) %>% data.frame()
kable(head(bg_counts, 3))
kable(head(bg_counts %>% filter(!w1 %in% stops, !w2 %in% stops), 3))
# the plot w stopwords
bigram_plot(bg_counts, top_n=50, remove_stops=FALSE)
# and plot w/o stopwords
bigram_plot(bg_counts, top_n=50, remove_stops=TRUE, stops=stopze("una"))
# same as mine above except idk what's going on under hood here...
# unnest_tokens(una_raw_df, word, text, token="ngrams", n=2)
### the bible (kjv) -----------------------------------------------------------
# to acquire data, dl and then load from local copy:
# bible <- gutenberg_download(10); write.csv(bible, "gutenberg-kjv-bib.csv")
bible <- read.csv("/Users/timothyleffel/Google Drive/sandboxxxe/boosh_repo/oneoffs/gutenberg-kjv-bib.csv", stringsAsFactors=FALSE)
bible_words <- bible %>% select(text) %>%
unnest_tokens(word, text, token="words") %>%
mutate(word = gsub("\\d|[[:punct:]]| ", "", word)) %>%
filter(word != "")
# could set tolower=F in unnest, can loookit him/Him etc.
# mutate(word = ifelse(word %in% c("His","Him","He")))
# word frequencies
bible_freq <-
bible_words %>% group_by(word) %>% summarize(count=n()) %>%
arrange(desc(count))
# illustration that order of operations matters v much!
# if we remove digits + punct first, then non-adjacent stuff gets counted
# as a bigram!!!
bible_bg <- bible %>%
unnest_tokens(bigram, text, token="ngrams", n=2) %>%
mutate(bigram = gsub("\\d|[[:punct:]]", "", bigram)) %>%
filter(grepl("^[a-zA-Z]+ [a-zA-Z]+$", bigram)) %>%
select(-gutenberg_id) %>% group_by(bigram) %>% summarize(count=n()) %>%
separate(bigram, into=c("w1","w2"), sep=" ", remove=TRUE)
# make a plot, incl stopwords
bigram_plot(bible_bg, top_n=50, remove_stops=FALSE)
# the plot w/o stopwords
bigram_plot(bible_bg, top_n=50, remove_stops=TRUE, stops=stopze("bible"))
### tf-idf --------------------------------------------------------------------
# just some example stuff for tf-idf
boosh <- boosh_text_ex()
table(unname(unlist(sapply(boosh, function(x) strsplit(x, split=" ")))))
sapply(boosh, function(x) tf_idf(doc=x, docs=boosh, term="this"))
tf_idf(doc=boosh[1], docs=boosh, term="this")
tf_idf(doc=boosh[3], docs=boosh, term="this")
### pos tagging ---------------------------------------------------------------
pos <- tidytext::parts_of_speech %>%
# just get the alphabetically first tag for each word (so not to duplicate)
group_by(word) %>% summarize(pos = max(pos))
una_freq %>% left_join(pos, by="word") %>%
mutate(pos = ifelse(word=="a", "INDEF", pos)) %>%
group_by(pos) %>% summarize(total_count = sum(count)) %>%
ggplot(aes(x=pos, y=total_count)) + geom_bar(stat="identity") +
coord_flip()
### notes ---------------------------------------------------------------------
# note that this one counts there being one more bg for 'industrial society'...
# tt_count_2grams(una_raw_df, stops=stops) %>% arrange(desc(n)) %>% head(n=5)
|
e076e1caa2de5b61614b60bfb7f0e26519132453
|
cc9b594ac83d5a138bb56fea0d5c8e1af898260b
|
/binomial/man/bin_variable.Rd
|
2a141081e45c8dbc4167136b79a66036cb98dd17
|
[] |
no_license
|
stat133-sp19/hw-stat133-D1792
|
fcc078dff198279c59ebd8e6e24f849e421ba92a
|
0c2b061ed7a5cf8470afe97c87ecfa2f18dd898c
|
refs/heads/master
| 2020-05-05T05:25:13.176813
| 2019-05-02T05:49:35
| 2019-05-02T05:49:35
| 179,751,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
bin_variable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_variable}
\alias{bin_variable}
\title{bin_variable}
\usage{
bin_variable(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability of success}
}
\value{
a list object of class binvar
}
\description{
make a list of class binvar with number of trials and probability of success
}
\examples{
bin_variable(trials = 10, prob = 0.3)
}
|
25c13e4f24e2581c62ac272407878f965e519319
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/geophys/examples/get.heat2.Rd.R
|
9a10deab07090a89c82ee41b9cecfa17f9b3263f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
get.heat2.Rd.R
|
library(geophys)
### Name: get.heat2
### Title: Heat Equation Solution
### Aliases: get.heat2 ' get.heat'
### Keywords: misc
### ** Examples
k = 1*10^(-6)
dt = 3600
dz = 20*10^(-2)
T0=25
T1 = 1200
x = seq(from=0, to=80, by=0.5)/100
i = 1
Tx = get.heat2(x, T1-T0, k, i*dt)
plot(Tx, max(x)-x, type='n', xlim=c(700, 1200) , axes=FALSE, xlab="Temp", ylab="Depth, cm")
axis(3)
axis(2, at=pretty(x), labels=100*(max(x)-pretty(x)))
for(j in 1:5)
{
Tx = get.heat2(x, T1-T0, k, j*dt)
Tx = Tx+T0
lines(Tx, max(x)-x, lty=2, col=j)
}
|
dcd9e661e8a676ee1d623c4beaece08b69c94a53
|
08bfe20e94ba9e8f8bddc9cb0ddf9129afaac918
|
/Demonstrations/UserExperimentDefinition.R
|
344ca20518e5d53273885d243bfb5efb376c6932
|
[] |
no_license
|
BayesExeter/ExeterUQ
|
e0c1095bc4f818a784a93c5605158a1bbb2c90a1
|
70a0222266324d442a87d3feb84a5dd5000a7e4a
|
refs/heads/master
| 2021-07-03T16:48:14.995097
| 2019-11-06T12:20:28
| 2019-11-06T12:20:28
| 196,030,557
| 4
| 0
| null | 2019-11-06T12:20:29
| 2019-07-09T14:57:42
|
R
|
UTF-8
|
R
| false
| false
| 535
|
r
|
UserExperimentDefinition.R
|
# Input file to construct a GP emulator and perform HM --------------------
experiment_name = "lmdzAWave"
observation_name = "lmdzLES"
# Define the Wave number of iterative refocussing
WAVEN = 1
# Define the value of a threshold
cutoff = 3
# Define a parameter that determines your implausibility measure
tau = 0
# Define the number of points in input space at which you are planning to
# compute implausibility
sample_size = 10000
# Define your variance of model error
Disc = c(0)
# Define a vector of metrics
metric = c("SCMdata")
|
3501a4eee892229dbf1fc96c44805f9adb30884d
|
af11fe3ff3fec9f631df5d1bd10cd6b8dae32c89
|
/simple network.R
|
beb7dd8ff30024de63fca95ddd0dc8a1b211f164
|
[] |
no_license
|
y1220/R-practice
|
fc483bef6831fe37c7b22d5c10babaf53ae31772
|
b2fc05202b04e39c5b33b2d0bfa08b947fa0d605
|
refs/heads/main
| 2023-08-17T13:20:45.450404
| 2021-10-10T22:26:57
| 2021-10-10T22:26:57
| 397,967,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 434
|
r
|
simple network.R
|
# graph
# Libraries
library(igraph)
library(networkD3)
# create a dataset:
data <- data_frame(
from=c("A", "A", "B", "D", "C", "D", "E", "B", "C", "D", "K", "A", "M"),
to=c("B", "E", "F", "A", "C", "A", "B", "Z", "A", "C", "A", "B", "K")
)
# Plot
p <- simpleNetwork(data, height="100px", width="100px")
p
# save the widget
# library(htmlwidgets)
# saveWidget(p, file=paste0( getwd(), "/HtmlWidget/networkInteractive1.html"))
|
7a81f7fb058434cd3b52e2663bcc7c1c89920924
|
546df55367a9812ec69c3ae2ef00eab564fda5c4
|
/Translate.R
|
2cdc39a97467f1d4691d812c68ed93c0f3f53825
|
[] |
no_license
|
QZeegers/KPN
|
fa03986dd339da52ab3f81eab697218013f9afa4
|
899181dd41a03fba1b670c27a602b6a1b815e5e9
|
refs/heads/main
| 2023-02-17T07:46:59.814809
| 2021-01-20T21:32:49
| 2021-01-20T21:32:49
| 331,274,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,968
|
r
|
Translate.R
|
library(readr)
dataset1_kpn_contacts <- read_csv("Downloads/Case seminar 3/dataset1_kpn_contacts.csv")
View(dataset1_kpn_contacts)
library(tidyverse)
dataset1_kpn_contacts<- as.data.frame(dataset1_kpn_contacts)
#Columns
dataset1_kpn_contacts<- dataset1_kpn_contacts %>%
rename(y_Purchase = y_aankooporder,
y_order_kpneen_fixed = y_order_kpneen_vast,
y_order_kpneen_mobile = y_order_kpneen_mobiel,
Number_Employees_Factor = Klasse_Aantal_Medewerkers,
Number_Locations_Parent = Klasse_Aantal_Locaties_Concern,
Legal_Form = Klasse_Rechtsvorm,
Number_Employees = Aantal_Medewerkers,
Behavioral_Description = Bedrijfsgedragstype,
Industry_Segment = Omschrijving_Segment,
Industry_Segment_Parent = Omschrijving_Segment_Concern,
Work_Type = Werk_Type,
Think_or_Do = Werk_Denken_Doen,
Revenue_Factor = Klasse_Omzet
)
#Factor
colsd<- c("y_Purchase", "y_order_kpneen_fixed", "y_order_kpneen_mobile", "customer_id", "y_positive_campaign_result", "y_positive_response_tm",
"y_click", "y_click_mopinion", "y_churn")
colsc<-c("Number_Employees_Factor",
"Number_Locations_Parent", "Legal_Form", "Behavioral_Description", "Industry_Segment",
"Industry_Segment_Parent", "Work_Type", "Think_or_Do", "Revenue_Factor", "campaign_code",
"campaign_name","campaign_channel", "campaign_status", "campaign_train",
"train_channel_first", "train_channel_last", "Treatment_Code", "FK", "XSellTarget")
dataset1_kpn_contacts[colsd] <- lapply(dataset1_kpn_contacts[colsd], factor)
dataset1_kpn_contacts[colsc] <- lapply(dataset1_kpn_contacts[colsc], factor)
#Levels
library(plyr)
dataset1_kpn_contacts$Behavioral_Description<-mapvalues(dataset1_kpn_contacts$Behavioral_Description,
from = c("Dagelijkse Kantoorwerkers", "Faciliterende beheerders",
"Mobiele handelaren","Onbekend","Regionale uitvoerders","Verstrekkende aanbieders","Verzorgende specialisten"),
to = c("Daily Office Workers", "Facilitating Administrators",
"Mobile Traders", "Unknown", "Regional Executives", "Suppliers",
"Caring Specialists")) #creerende arbeiders = manual workers, financiele regelaars= financial fixers
dataset1_kpn_contacts$Work_Type<- mapvalues(dataset1_kpn_contacts$Work_Type,
from = c( "Advies","Agrarisch","Hand","Horeca","Ontwerp","Op pad","Overig","Uitvoering","Winkel"),
to = c("Consultancy", "Agricultural", "Manual", "HotelRestauranCafe", "Design", "On the Road",
"Other", "Executing/performing", "Store"))
dataset1_kpn_contacts$Think_or_Do<- mapvalues(dataset1_kpn_contacts$Think_or_Do,
from =c("Denken","Doen"), to = c("Think", "Do"))
dataset1_kpn_contacts$Number_Employees_Factor<- mapvalues(dataset1_kpn_contacts$Number_Employees_Factor,
from = c("0 medewerkers","1 medewerker","1.000 of meer medewerkers", "10 t/m 19 medewerkers" ,
"100 t/m 199 medewerkers","2 t/m 4 medewerkers","20 t/m 49 medewerkers","200 t/m 499 medewerkers",
"5 t/m 9 medewerkers","50 t/m 99 medewerkers","500 t/m 749 medewerkers","750 t/m 999 medewerkers",
"Onbekend"),
to = c("0", "1", "1000+", "10-19", "100-199", "2-4", "20-49", "200-499", "5-9", "50-99", "500-749",
"750-999", "Unknown"))
dataset1_kpn_contacts$Number_Employees<- factor(dataset1_kpn_contacts$Number_Employees, levels = c("Unknown","0", "1","2-4","5-9","10-19", "20-49","50-99","100-199", "200-499",
"500-749", "750-999", "1000+")) #remove Unknown
dataset1_kpn_contacts$Number_Locations_Parent<- mapvalues(dataset1_kpn_contacts$Number_Locations_Parent,
from = c("1 (alle instellingen concern op 1 locatie)","1 (zelfstandige instelling)","11 t/m 25 locaties",
"2 locaties","26 of meer locaties","3 t/m 5 locaties",
"6 t/m 10 locaties"),
to = c("1 Parent/Concern", "1 Independent Institution","11-25", "2", "26+", "3-5",
"6-10"))
|
50de763ec1cee547be81b9c9941e501181f735a5
|
137222a21c6213ed0105be73269fa3fea832c60b
|
/Classification/PerfForClassification.R
|
793ae96b17e5cc2117a2a9ac5198ccd12c54d87d
|
[] |
no_license
|
mdmeschi72/MachineLearningWithR
|
f60f9361e69385adbbbe45403735821befaa7eed
|
41cfe8cce1edde4092b7424559e32084e77a7bb0
|
refs/heads/master
| 2020-03-19T16:52:04.492609
| 2018-07-04T11:19:24
| 2018-07-04T11:19:24
| 136,733,847
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,320
|
r
|
PerfForClassification.R
|
## classification applications
## classification with kNN
## class package if for KNN
## ROCR is for area under the curve.
library(class)
library(ROCR)
knn_predict <- knn(train = train, test = test, cl = train$dd.test_diabet, k=5)
### classification with logistic regression
model <- glm(dd.test_diabet~., family = binomial(link='logit'), data=train)
glm_predict <- predict(model, newdata=test[,1:8],type='response')
fixed_glm_predict <- ifelse(glm_predict > 0.5, 1, 0)
glm_pr <- prediction(fixed_glm_predict, test$dd.test_diabet)
### classification with classification tree ##
model <- ctree(dd.test_diabet ~., data=train)
cftree_predict <- predict(model, newdata= test[,1:8], type="response")
fixed_cftree_predict <- ifelse(cftree_predict > 0.5, 1, 0)
cftree_pr <- prediction(fixed_cftree_predict, test$dd.test_diabet)
## comparing results #####
# create confusion matrix
table(knn_predict, test$dd.test_diabet)
table(fixed_cftree_predict, test$dd.test_diabet)
# draw ROC curve
glm_prf <- performance(glm_pr, measure = "tpr", x.measure = "fpr")
plot(glm_prf)
auc <- performance(glm_pr, measure = "auc")
auc <- auc@y.values[[1]]
auc
cftree_prf <- performance(cftree_pr, measure = "tpr", x.measure = "fpr")
plot(cftree_prf)
auc <- performance(cftree_pr, measure = "auc")
auc <- auc@y.values[[1]]
auc
|
826029510c1535b86c5cae21b6d145cd21e7bd32
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Luminescence/tests/test_calc_WodaFuchs2008.R
|
190d4c0f9b72fff6cc4c63a97659ea77d33a59c9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 541
|
r
|
test_calc_WodaFuchs2008.R
|
context("test_calc_WodaFuchs2008")
test_that("Test general functionality", {
testthat::skip_on_cran()
##load example data
## read example data set
data(ExampleData.DeValues, envir = environment())
##test arguments
expect_is(calc_WodaFuchs2008(data = ExampleData.DeValues$CA1), "RLum.Results")
##test arguments
expect_is(calc_WodaFuchs2008(data = ExampleData.DeValues$CA1, plot = FALSE), "RLum.Results")
##test arguments
expect_is(calc_WodaFuchs2008(data = ExampleData.DeValues$CA1, breaks = 20), "RLum.Results")
})
|
134209086e01e9be5a288a25c86674847cb84283
|
2a7dcccef588ad76dd66df3b74f5e495bdda174d
|
/R/fit_mpm.R
|
9d42ad9b6ade140c238778e1df800c2372e973a1
|
[] |
no_license
|
cran/foieGras
|
6181dc2dbfd10c58cde8e57c598a54538470f117
|
d2779676b4c929b47c0f2c684bfe3cbff711fe4f
|
refs/heads/master
| 2021-07-07T22:06:09.514290
| 2021-04-26T21:10:07
| 2021-04-26T21:10:07
| 236,599,322
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,967
|
r
|
fit_mpm.R
|
##' @title fit a a Move Persistence Model (mpm)
##' @description fit a random walk with time-varying move persistence to
##' temporally regular or irregular location data
##' @param x a `fG_ssm` fit object or a data frame of observations (see details)
##' @param what if a `fG_ssm` fit object is supplied then \code{what} determines
##' whether fitted or predicted (default) values are mapped; ignored if
##' \code{x} is a data frame
##' @param model mpm model to fit; either \code{mpm} with unpooled random walk
##' variance parameters (\code{sigma_(g,i)}) or \code{jmpm} with a single,
##' pooled random variance parameter (\code{sigma_g})
##' @param coords column numbers of the location coordinates (default = 3:4)
##' @param control list of control settings for the outer optimizer (see \code{mpm_control} for details)
##' @param inner.control list of control parameters for the inner optimization
##' @param verbose `r lifecycle::badge("deprecated")` use ssm_control(verbose = 1) instead, see \code{ssm_control} for details
##' @param optim `r lifecycle::badge("deprecated")` use ssm_control(optim = "optim") instead, see \code{ssm_control} for details
##' @param optMeth `r lifecycle::badge("deprecated")` use ssm_control(method = "L-BFGS-B") instead, see \code{ssm_control} for details
##'
##' @return a list with components
##' \item{\code{fitted}}{a dataframe of fitted locations}
##' \item{\code{par}}{model parameter summary}
##' \item{\code{data}}{input dataframe}
##' \item{\code{tmb}}{the tmb object}
##' \item{\code{opt}}{the object returned by the optimizer}
##'
##' @examples
##' ## fit jmpm to two southern elephant seal tracks
##' xs <- fit_ssm(sese2, spdf=FALSE, model = "rw", time.step=72,
##' control = ssm_control(se = FALSE, verbose = 0))
##'
##' fmpm <- fit_mpm(xs, model = "jmpm")
##'
##' @importFrom TMB MakeADFun sdreport newtonOption
##' @importFrom dplyr "%>%" mutate select
##' @importFrom purrr map
##' @importFrom lifecycle deprecate_warn
##' @export
fit_mpm <- function(x,
what = "predicted",
model = c("jmpm", "mpm"),
coords = 3:4,
control = mpm_control(),
inner.control = NULL,
optim = NULL,
optMeth = NULL,
verbose = NULL
) {
model <- match.arg(model)
## warnings for deprecated arguments
if(!is.null(verbose)) {
deprecate_warn("0.7-5", "fit_ssm(verbose)",
details = "use `control = ssm_control(verbose)` instead")
control$verbose <- verbose
}
if(!is.null(optim)) {
deprecate_warn("0.7-5", "fit_ssm(optim)",
details = "use `control = ssm_control(optim)` instead")
if(optim %in% c("nlminb", "optim")) control$optim <- optim
else stop("invalid optimiser specified, see ?ssm_control for options")
}
if(!is.null(optMeth)) {
deprecate_warn("0.7-5", "fit_ssm(optMeth)",
details = "use `control = ssm_control(method)` instead")
if(optMeth %in% c("L-BFGS-B", "BFGS", "Nelder-Mead", "CG", "SANN", "Brent"))
control$method <- optMeth
else stop("invalid optimisation method specified, see ?ssm_control for options")
}
if(control$verbose == 1)
cat(paste0("fitting ", model, "...\n"))
if(inherits(x, "fG_ssm")) {
x <- grab(x, what = what, as_sf = FALSE) %>%
select(id, date, coords[1], coords[2])
} else {
x <- x %>% select(id, date, coords[1], coords[2])
}
if(all(c("x","y") %in% names(x))) {
# rescale x,y in km for better optimisation
xm <- max(x$x)
ym <- max(x$y)
x <- x %>% mutate(x = x/xm, y = y/ym)
} else {
# standardise coord names to x,y
names(x)[3:4] <- c("x","y")
}
switch(model,
mpm = {
fit <- split(x, x$id) %>%
map(~ try(mpmf(.x,
model = model,
control = control,
inner.control = inner.control
), silent = TRUE)
)
fit <- tibble(id = names(fit), mpm = fit) %>%
mutate(converged = sapply(.$mpm, function(x)
if(length(x) == 8) {
x$opt$convergence == 0
} else if(length(x) < 8) {
FALSE
})) %>%
mutate(model = model)
},
jmpm = {
fit <- try(mpmf(
x = x,
model = model,
control = control,
inner.control = inner.control
), silent = TRUE)
fit <- tibble(mpm = list(fit)) %>%
mutate(converged = ifelse(length(.$mpm[[1]]) == 8,
.$mpm[[1]]$opt$convergence == 0,
FALSE)) %>%
mutate(model = model)
})
class(fit) <- append("fG_mpm", class(fit))
return(fit)
}
|
06f5f7174cfa75b4bd1d6f6e6b19e17a20190a80
|
65b2d8f88199970ca6b83e658760dfb66c5d951d
|
/myTest1.R
|
490be22fe49d64ee492ef863969f743a34e52649
|
[] |
no_license
|
ClayCampaigne/stat243_Project
|
8776a1572701f9fb261b3ab1b81186fbd11fddcb
|
b3483b486a39b66c00f3dac96495265335f7dea9
|
refs/heads/master
| 2021-12-04T01:24:46.989193
| 2014-12-11T10:15:11
| 2014-12-11T10:15:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,714
|
r
|
myTest1.R
|
####################### Project: Test Code ########################
##### Description:
##### 1. Test function for each auxiliary function
##### 2. Three test functions for the select() function
##### ( testStepwise(), testSim1() & testSim2() )
###################################################################
test <- function(){
testInitial()
cat("\n")
testSingleEval()
cat("\n")
testEval()
cat("\n")
testUpdate()
cat("\n")
testCrossOver()
cat("\n")
testMutation()
cat("\n")
testStepwise()
cat("\n")
testSim1()
cat("\n")
testSim2()
}
testInitial <- function(){
pop <- popInitialize(popSize = 100, zeroToOneRatio = 1, geneLength = 10)
cat("1. The popInitialize() function returns an initial generation of models with 1 indicating an included variable and 0 indicating an excluded variable.\n")
cat("Here is what one model inside the generation looks like :\n")
print(pop[1,])
cat("The zero-to-one ratio in the model is approximately 1 to 1 as specified in default.\n")
}
testInitial()
testSingleEval <- function(){
X <- mtcars[,2:11]
y <- mtcars[,1]
singleGene <- sample(c(0,1),dim(X)[2],replace=T)
criValue <- singleEval(singleGene,X,y,"lm","BIC",NULL,"gaussian")
cat("2. The singleEval() function returns the value of the evaluation criterion for one specified model.\n")
cat("For example, the BIC here is:\n")
cat(criValue)
cat("\n")
}
testEval <- function(){
X <- mtcars[,2:11]
y <- mtcars[,1]
currentGenePool <- popInitialize(popSize = 100, geneLength = dim(X)[2],zeroToOneRatio = 1)
criterion <- evalFunction(X,y,currentGenePool = currentGenePool,popSize = 100)
cat("3. The evalFunction() returns the AIC rank and sampling probability of each of the models in the generation, for example:\n")
print(criterion[,1])
cat("\n")
}
testEval()
testUpdate <- function(){
X <- mtcars[,2:11]
y <- mtcars[,1]
weights <-rep(1,100)
currentGenePool <- popInitialize(popSize = 100, geneLength = dim(X)[2],zeroToOneRatio = 1)
newGenePool <- updateSamp(currentGenePool,popSize = 100, weights = weights)
cat("4. The updateSamp function updates the population according to the specified weights.\n")
cat("Here is what one model in the new generation looks like:\n")
print(newGenePool[1,])
}
testUpdate()
testCrossOver <- function(){
set.seed(1)
v1 <- rep(1,10)
v2 <- rep(0,10)
geneLength <- length(v1)
child <- crossover(v1,v2,geneLength,1)
cat("5. The crossover() function returns two models generated by cross over:\n")
cat("The genes before crossover:\n")
print(rbind(v1,v2))
cat("The genes after crossover:\n")
print(child)
cat("\n")
}
testCrossOver()
testMutation <- function(){
v1 <- sample(c(0,1),10,replace=T)
v2 <- sample(c(1,0),10,replace=T)
geneLength <- length(v1)
child <- mutation(v1,v2,1)
cat("6. The mutation() function returns two models generated from mutation:\n")
cat("The genes before mutation:\n")
print(rbind(v1,v2))
cat("The genes after mutation:\n")
print(child)
cat("\n")
}
testBest <- function(){
X <- mtcars[,2:11]
y <- mtcars[,1]
currentPool <- popInitialize(popSize = 100, geneLength = dim(X)[2], zeroToOneRatio = 1)
best(X, y, pool = currentPool, popSize = 100, type = "lm", criterion = "AIC")
}
testStepwise = function(){
##### Implement our function on the mtcars dataset ######
##### Using stepwise regression on the same dataset and compare the results #####
X <- mtcars[,2:11]
y <- mtcars[,1]
cat("Testing select() function on mtcars dataset ... \n")
cat("Our function running ...")
set.seed(2)
result <- select(X, y, popSize = 100, max_iterations = 500, crossRate = 0.95, mRate = 0.0001)
cat("Now we implement stepwise regression on the dataset.")
fullModel <- lm( mpg ~ cyl+disp+hp+drat+wt+qsec+vs+am+gear+carb, data = mtcars)
stepResult <- step(fullModel, direction = "both", trace = FALSE)
cat("The stepwise regression has picked the following model:")
print(summary(stepResult))
cat("The AIC value for this model is:",AIC(stepResult),"\n")
cat("\n")
cat("Our function has chosen the following model:")
print(summary(result))
cat("The AIC value for our model is:",unlist(AIC(result)),"\n")
cat("\n")
if((abs(AIC(result)-AIC(stepResult))) < 10)
cat("The model our function chose is close to the one that stepwise regression chose. Test succeeded.\n")
else
cat("The model our function chose is not close to the one that stepwise regression chose. Test failed.\n")
}
testStepwise()
testSim1 <- function(){
##### Simulate outcome variable based on 5 predicting variables #####
##### Throw in 6 more "noise" variables and use our function to select the predictor variables #####
X <- mtcars[,1:11]
n <- dim(mtcars)[1]
set.seed(1)
error <- matrix(rnorm(n),nrow = n)
y <- 1*X[,1] + 2*X[,2] + 3*X[,3] + 4*X[,4] + 5*X[,5] + error
cat("Testing our function on the simulated dataset ...\n")
cat("Function is running ...\n")
set.seed(1)
result <- select (X, y, popSize = 200, max_iteration = 200, criterion = "BIC", zeroToOneRatio = 1, crossRate = 0.95, mRate = 0.001)
cat("Our function has chosen the following model:")
print(summary(result))
cat("The BIC value for our model is:",unlist(BIC(result)),"\n")
cat("The true model has the mpg, cyl, disp, hp and drat as the independent variables.\n")
cat("Using the current seed, our function has picked out all of the 5 relevant variables\nbut included 1 additional irrelevant variable. The performance of our function is decent. Test succeeded.")
}
testSim1()
testSim2 <- function(){
##### Simulate outcome variable based on 5 predicting variables #####
##### Throw in 36 more "noise" variables and use our function to select the predictor variables #####
set.seed(2)
X1 <- mtcars[,1:11]
n <- dim(mtcars)[1]
X2 <- as.data.frame(matrix(sample(0:100, 20*n,replace = T),nrow = n))
X <- cbind(X1,X2)
error <- rnorm(n)
y <- 1*X[,1] + 2*X[,2] + 3*X[,3] + 4*X[,4] + 5*X[,5] + error
cat("Testing our function on the simulated dataset ...\n")
cat("Function is running ...\n")
set.seed(1)
result <- select (X, y, popSize = 200, max_iteration = 200, criterion = "BIC", zeroToOneRatio = 1, crossRate = 0.95, mRate = 0.001)
cat("Our function has chosen the following model:")
print(summary(result))
cat("The BIC value for our model is:",unlist(BIC(result)),"\n")
cat("The true model has the mpg, cyl, disp, hp and drat as the independent variables.\n")
cat("Using the current seed, our function has picked out all of the 5 relevant variables but included 1 additional irrelevant variable. The performance of our function is decent. Test succeeded.")
}
testSim2()
|
098fe5d5dfb8583d82b545e2cd9366edfc983e27
|
2f94e0f8911055053c30f9ab88e2f66691ca719d
|
/R/diagnostic_maf.R
|
be47f0044ac1f78d48b9bc0a9854ece12ae1fe6d
|
[] |
no_license
|
anne-laureferchaud/stackr-1
|
4e4dd971dd83a8b238a858d6e1026ab2c6e271ed
|
55837d3005d4e4f13ce5706284e8137c98e8501a
|
refs/heads/master
| 2020-06-17T10:01:15.282997
| 2016-11-28T16:49:44
| 2016-11-28T16:49:44
| 75,013,926
| 0
| 0
| null | 2016-11-28T21:02:47
| 2016-11-28T21:02:46
| null |
UTF-8
|
R
| false
| false
| 2,259
|
r
|
diagnostic_maf.R
|
# Minor Allele Frequency Diagnostic
#' @title MAF diagnostic
#' @description Minor Allele Frequency diagnostic, help choose a filter threshold.
#' @param data A data frame object or file (using ".tsv")
#' of class sumstats or tidy VCF.
#' @param group.rank The number of group to class the MAF (Number)
#' @param filename Name of the file written to the working directory (optional).
#' @rdname diagnostic_maf
#' @export
#' @import dplyr
#' @import readr
#' @details Highly recommended to look at the distribution of MAF
#' \link{plot_density_distribution_maf}.
#' @seealso \link{filter_maf}
diagnostic_maf <- function(data, group.rank, filename){
LOCUS <- NULL
POP_ID <- NULL
FREQ_ALT <- NULL
RANK <- NULL
GLOBAL_MAF <- NULL
MAF_P <- NULL
MAF_L <- NULL
if (is.vector(data) == "TRUE") {
data <- read_tsv(data, col_names = TRUE)
message("Using the file in your directory")
} else {
data <- data
message("Using the file from your global environment")
}
# Local
test.local <- data %>%
select(LOCUS, POP_ID, FREQ_ALT) %>%
group_by(LOCUS, POP_ID) %>%
summarise(
MAF_P = min(FREQ_ALT, na.rm = TRUE)
) %>%
group_by(LOCUS) %>%
summarise(MAF_L = mean(MAF_P, na.rm =TRUE)) %>%
group_by(RANK = ntile(MAF_L, group.rank)) %>%
summarise(
LOCAL_MAF = mean(MAF_L, na.rm = T),
n = length(LOCUS)
) %>%
select(-n)
# Global
test.global <- data %>%
select(LOCUS, POP_ID, GLOBAL_MAF) %>%
group_by(LOCUS) %>%
summarise(GLOBAL_MAF = mean(GLOBAL_MAF, na.rm =TRUE)) %>%
group_by(RANK = ntile(GLOBAL_MAF, group.rank)) %>%
summarise(
GLOBAL_MAF = mean(GLOBAL_MAF, na.rm = T),
n = length(LOCUS)
) %>%
select(GLOBAL_MAF, n)
maf.diagnostic <- bind_cols(test.local, test.global)
if (missing(filename) == "FALSE") {
message("Saving the table in your working directory...")
write_tsv(maf.diagnostic, filename, append = FALSE, col_names = TRUE)
saving <- paste("Saving was selected, the filename:", filename, sep = " ")
} else {
saving <- "Saving was not selected..."
}
invisible(cat(sprintf(
"%s\n
Working directory:
%s",
saving, getwd()
)))
return(maf.diagnostic)
}
|
8f0454b07347f2bf19f3c7c33e053518b8ef10ba
|
f2b82a6acbd9939aa9174c51f76f4e82952a3665
|
/server.R
|
cf63f0b78c69092b99b4b589bb81a6881bb31a8d
|
[] |
no_license
|
kowdan99/UN-FAO-ANALYSIS
|
4077b1877892ac0c87732c1a2ecc226ed87b36c8
|
efb5851f4ac19ac2d9d36be835c2f1a362aa48b3
|
refs/heads/master
| 2020-05-18T21:04:51.026486
| 2019-03-14T21:41:20
| 2019-03-14T21:41:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,728
|
r
|
server.R
|
library("shiny")
library("dplyr")
library("leaflet")
library("stringr")
library("rbokeh")
library("tidyr")
library("ggplot2")
library("ggmap")
library("plotly")
library("shinyWidgets")
# LOAD ALL DATA SETS HERE AT THE BEGINNING
countries <- read.csv("data/countries_long_lat.csv", stringsAsFactors = FALSE)
full_pop_data <- read.csv("data/FAOSTAT_population.csv", stringsAsFactors = FALSE)
full_land_data <- read.csv("data/FAOSTAT_landuse.csv", stringsAsFactors = FALSE)
df <- read.csv("https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv", stringsAsFactors = FALSE)
food_data <- read.csv("data/FAOSTAT_foodsecurity.csv", stringsAsFactors = FALSE)
country_region <- read.csv("data/country_region.csv", stringsAsFactors = FALSE)
# Read in data
food_data <- read.csv("data/FAOSTAT_foodsecurity.csv", stringsAsFactors = FALSE)
country_region <- read.csv("data/country_region.csv", stringsAsFactors = FALSE)
#clean region data
country_region <- country_region %>%
select(name, region)
names(country_region) <- c("Area", "region")
# Clean up
clean_food_data <- food_data %>%
select(Area.Code, Area, Item.Code, Item, Year.Code, Year, Unit, Value)
# Vector of item names for the dropdown
grouped_by_item <- clean_food_data %>%
group_by(Item) %>%
filter(Area == "Afghanistan") %>%
filter(Item.Code == 21010 |
Item.Code == 21034 |
Item.Code == 210011 |
Item.Code == 21034 |
Item.Code == 21033 |
Item.Code == 21047 |
Item.Code == 21048 |
Item.Code == 21042) %>% # Select indicators
filter(str_detect(Year.Code, "2000"))
item_names <- pull(select(grouped_by_item, Item))
my_server <- function(input, output) {
#--------------------------------------------CHOROPLETH PAGE-------------------------------------------------------------------------------------------------
output$choropleth <- renderPlotly({
land_data <- full_land_data %>%
select(
-Domain.Code, -Domain, -Area.Code, -Element.Code, -Item.Code,
-Year.Code, -Unit, -Flag, -Flag.Description
) %>%
filter(Year >= 2000, Year <= 2016, Element != "Share in Forest land")
colnames(land_data)[colnames(land_data) == "Area"] <- "Country"
countries <- read.csv("data/countries_long_lat.csv", stringsAsFactors = FALSE) %>%
select(-Alpha.2.code, -Alpha.3.code, -Numeric.code, -Icon)
tot_land_data <- left_join(land_data, countries)
share_agricultural_land <- tot_land_data %>% filter(Element == "Share in Agricultural land")
# INPUT---------------------------------------
agri_element_map <- share_agricultural_land %>%
filter(Item == input$itemn, Year == input$yearn) %>%
select(-Latitude..average., -Longitude..average.)
colnames(df)[1] <- "Country"
df[15, 1] <- "Bahamas"
df[46, 1] <- "Congo"
df[47, 1] <- "Democratic Republic of the Congo"
df[74, 1] <- "Gambia"
df[134, 1] <- "Micronesia (Federated States of)"
df[165, 1] <- "Russian Federation"
df[177, 1] <- "Serbia and Montenegro"
df[190, 1] <- "Sudan (former)"
df[195, 1] <- "Syrian Arab Republic"
df[198, 1] <- "United Republic of Tanzania"
df[212, 1] <- "United States of America"
df[216, 1] <- "Venezuela (Bolivarian Republic of)"
df[217, 1] <- "Viet Nam"
df[218, 1] <- "United States Virgin Islands"
df[25, 1] <- "Bolivia (Plurinational State of)"
df[30, 1] <- "Brunei Darussalam"
df[50, 1] <- "Côte d'Ivoire"
df[55, 1] <- "Czechia"
df[109, 1] <- "Democratic People's Republic of Korea"
df[95, 1] <- "Iran (Islamic Republic of)"
df[108, 1] <- "Republic of Korea"
df[113, 1] <- "Lao People's Democratic Republic"
df[123, 1] <- "North Macedonia"
df[135, 1] <- "Republic of Moldova"
df <- df[-c(33, 53, 7, 67, 74, 78, 80, 84, 90, 110, 122, 136, 138, 169, 181, 187, 192, 196, 207)]
chorodata <- left_join(agri_element_map, df) %>% na.omit()
l <- list(color = toRGB("grey"), width = 0.5)
# specify map projection/options
g <- list(
showframe = FALSE,
showcoastlines = FALSE,
projection = list(type = "Mercator")
)
choropleth <- plot_geo(chorodata) %>%
add_trace(
z = ~Value, color = ~Value, colors = "Greens",
text = ~Country, locations = ~CODE, marker = list(line = l)
) %>%
colorbar(title = paste0("% of ", input$itemn), ticksuffix = "%") %>%
layout(
title = paste0("Percentage of ", input$itemn, " Worldwide in ", input$yearn),
geo = g
)
choropleth
})
#--------------------------------------------END CHOROPLETH PAGE--------------------------------------------------------------------------------------------
#--------------------------------------------MACROINDICATOR PAGE--------------------------------------------------------------------------------------------
gdp_data <- read.csv("data/FAOSTAT_macroindicators.csv", stringsAsFactors = FALSE)
output$bar_ploty <- renderPlot({
gdp_d <- gdp_data %>%
select(Area.Code, Area, Element, Item, Year, Unit, Value) %>%
filter(Year >= 2000, Year <= 2016,
Item %in% c("Gross Domestic Product", "Gross Output (Agriculture)", "Gross Output (Agriculture, Forestry and Fishing)", "Gross National Income"))
countriess <- gdp_d %>%
filter(Area == input$country_y)
j <- ggplot() + geom_bar(aes(y = Value, x = Year, fill = Item), data = countriess, stat = "identity")+
labs(
title = paste0("Graph of ", input$country_y, "'s Economic Outputs, Products and Income "),
x = "Year",
y = "Value in Millions of Dollars"
)
j
})
#--------------------------------------------END MACROINDICATOR PAGE----------------------------------------------------------------------------------------
#--------------------------------------------FOOD-SECURITY PAGE---------------------------------------------------------------------------------------------
# Try to make something of this mess
item_by_country <- reactive({
item_by_country <- clean_food_data %>%
# spread(key = Item, value = Value) %>%
group_by(Area) %>%
filter(Item == input$indicator) %>%
filter(str_detect(Year, input$year_food)) %>%
ungroup()
item_by_country$Value <- as.double(item_by_country$Value)
item_by_country <- item_by_country %>%
left_join(country_region)
item_by_country
})
# Create the dropdown for years
output$year_selector <- renderUI({
# Create the reactive values for the year dropdown
years_available <- reactive({
years_available <- clean_food_data %>%
filter(Item == input$indicator, Area == "Afghanistan") %>%
select(Year)
year_vector <- pull(years_available)
year_vector
})
# Create the dropdown itself
selectInput(
inputId = "year_food",
label = "Year",
choices = years_available(),
selected = years_available()[1]
)
})
# Create the plot
output$plot_food <- renderRbokeh({
p <- figure() %>%
ly_points(
Area, Value,
data = item_by_country(),
color = region,
hover = list(Area, Value)
) %>%
y_axis(label = input$indicator) %>%
x_axis(visible = FALSE)
p
})
#--------------------------------------------END FOOD-SECURITY PAGE-----------------------------------------------------------------------------------------
#--------------------------------------------ENERGYUSE PAGE-------------------------------------------------------------------------------------------------
energy <- read.csv("data/FAOSTAT_data_3-12-2019.csv", stringsAsFactors = F)
energy[is.na(energy)] <- 0
energy <- energy %>%
filter(Unit != "million kWh")
filtered <- reactive({
data <- energy %>%
filter(Year >= input$year[1], Year <= input$year[2], Element == input$element, Country == input$country, Item == input$item)
data
})
filtered_one <- reactive({
data_a <- filtered() %>%
group_by(Year) %>%
summarise(Value = sum(Value), Unit = unique(Unit))
data_a
})
output$trend <- renderPlot({
p <- ggplot(data = filtered_one(), mapping = aes_string(x = "Year", y = "Value")) +
geom_point() +
geom_line() +
scale_x_continuous(breaks = c(2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012))
p
})
}
#--------------------------------------------END ENERGYUSE PAGE---------------------------------------------------------------------------------------------
|
a7adfd6da42c7530f10aee40c4aaa7d0c29817a5
|
2fa2f15ebbf7adfa114f2a189020ae9c8af2ff17
|
/fun_joint_recnplan.R
|
e1c6529340364a8c9c1e96f7cd879d9801c63d22
|
[] |
no_license
|
xliu12/Sample-size-planning-for-mediation
|
5b19ebe347759b7707b8cc925a8afd5dc9cc0311
|
bac3e552fcdbf3e8a1fe5ea2052ff82fb37a6ee8
|
refs/heads/main
| 2023-07-24T17:54:09.045388
| 2021-09-09T15:13:45
| 2021-09-09T15:13:45
| 361,416,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,352
|
r
|
fun_joint_recnplan.R
|
joint.recnplan.assurance=function(Npilot,Nplan.start,nstep,
power.desired=0.8,
assurance.desired,
#meanpower.desired=NULL,
#medianpower.desired=NULL,
a_true,b_true,cp_true,alpha,
nrawdata,nemp){
source("fun_joint_1pilot.R")
powerdist.start=joint.powerdist(nrawdata,nemp,
a_true,b_true,cp_true,alpha,
Npilot,Nplan=Nplan.start)
if(mean(powerdist.start>=power.desired)>assurance.desired){
Nplan=Nplan.start-nstep
repeat{
powerdist=joint.powerdist(nrawdata,nemp,
a_true,b_true,cp_true,alpha,
Npilot,Nplan)
print(c( Nplan, mean(powerdist>=power.desired) ))
if(mean(powerdist>=power.desired)<=assurance.desired) break
Nplan=Nplan-nstep
}
recnplan=Nplan+nstep
}
else{
Nplan=Nplan.start+nstep
repeat{
powerdist=joint.powerdist(nrawdata,nemp,
a_true,b_true,cp_true,alpha,
Npilot,Nplan)
print(c( Nplan, mean(powerdist>=power.desired) ))
if(mean(powerdist>=power.desired)>=assurance.desired) break
Nplan=Nplan+nstep
}
recnplan=Nplan
}
return(recnplan)
}
##############################################################################
joint.recnplan.mean=function(Npilot,Nplan.start,nstep,
power.desired=0.8,
#assurance.desired,
meanpower.desired=0.8,
#medianpower.desired=NULL,
a_true,b_true,cp_true,alpha,
nrawdata,nemp){
source("fun_joint_1pilot.R")
powerdist.start=joint.powerdist(nrawdata,nemp,
a_true,b_true,cp_true,alpha,
Npilot,Nplan=Nplan.start)
if(mean(powerdist.start)>meanpower.desired){
Nplan=Nplan.start-nstep
repeat{
powerdist=joint.powerdist(nrawdata,nemp,
a_true,b_true,cp_true,alpha,
Npilot,Nplan)
print(c( Nplan, mean(powerdist) ))
if(mean(powerdist)<=meanpower.desired) break
Nplan=Nplan-nstep
}
recnplan=Nplan+nstep
}
else{
Nplan=Nplan.start+nstep
repeat{
powerdist=joint.powerdist(nrawdata,nemp,
a_true,b_true,cp_true,alpha,
Npilot,Nplan)
print(c( Nplan, mean(powerdist) ))
if(mean(powerdist)>=meanpower.desired) break
Nplan=Nplan+nstep
}
recnplan=Nplan
}
return(recnplan)
}
##############################################################################
n.sigasigb=function(a_true,b_true,cp_true,alpha,power.desired){
z_alpha=qnorm(1-alpha/2)
z_beta=qnorm(power.desired)
n_siga=( (1-a_true^2)/a_true^2 )*( z_alpha+z_beta )^2
n_sigb=( (1-b_true^2-cp_true^2-2*a_true*b_true*cp_true)/(b_true^2*(1-a_true^2)) )*( z_alpha+z_beta )^2
n_sigasigb=max(n_siga,n_sigb)
return(n_sigasigb)
}
|
d46b87db751bd84767f625c36a9b24c4e9c4c96f
|
7695d9fa40a26df410954287b48400dc5377abc7
|
/R/figureS8.R
|
7894bcfacc573b16feb798ebc72a35c389a4bf1d
|
[] |
no_license
|
andrewholding/ZMIZ1
|
e95049b8590bebfec1851f9f144ffaa4a4d83b43
|
898ed2ffbd18339663ebdd0d3555a10d6263ae06
|
refs/heads/master
| 2023-08-18T01:04:23.432511
| 2023-08-03T17:14:48
| 2023-08-03T17:14:48
| 177,729,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,924
|
r
|
figureS8.R
|
#' Figure S8mcf7
#'
#' This function allows you generate figure S8. GSEA results Part 2. P-value will vary as seed is
#' not set.
#' @keywords RNAseq ZMIZ1
#'
#' @export
#' @import DESeq2
#' @import vulcan
#' @importFrom graphics barplot
#' @importFrom stats relevel
#' @examples figureS8mcf7()
figureS8mcf7 <- function() {
result_list <- list()
for (cells in c("MCF7")) {
for (time in c("6h")) {
resname <- paste0(cells, "_", time)
message(resname)
subsamples <- annotationTable$Sample[annotationTable$Cells ==
cells & annotationTable$Treatment_Duration ==
time & annotationTable$Treatment == "Oestrogen"]
subraw <- rawcounts[, subsamples]
subannot <- annotationTable[annotationTable$Sample %in%
subsamples, c("Cells", "Condition", "Treatment")]
rownames(subannot) <- annotationTable$Sample[annotationTable$Sample %in%
subsamples]
subannot <- subannot[subsamples, ]
dds <- DESeqDataSetFromMatrix(countData = subraw,
colData = subannot, design = ~Condition)
dds <- dds[rowSums(counts(dds)) > 1, ]
dds$Condition <- relevel(dds$Condition, ref = "siCTRL")
dea <- DESeq(dds, parallel = TRUE)
res <- results(dea, contrast = c("Condition", "siZMIZ1",
"siCTRL"))
resannot <- cbind(rownames(res), eg2sym(rownames(res)))
annotations <- annotategene(rownames(res))
resannot <- cbind(as.matrix(resannot), as.matrix(annotations),
as.matrix(res))
colnames(resannot)[1:3] <- c("ENTREZID", "SYMBOL",
"NAME")
resannot <- as.data.frame(resannot)
resannot$log2FoldChange <- as.numeric(as.character(resannot$log2FoldChange))
resannot$stat <- as.numeric(as.character(resannot$stat))
resannot$pvalue <- as.numeric(as.character(resannot$pvalue))
resannot$padj <- as.numeric(as.character(resannot$padj))
resannot <- resannot[order(resannot$pvalue), ]
result_list[[resname]] <- resannot
rm(dea, resannot, res)
}
}
geneList <- result_list[["MCF7_6h"]]$log2FoldChange
names(geneList) <- rownames(result_list[["MCF7_6h"]])
williams <- msigdb[["c2_cgp;_;WILLIAMS_ESR1_TARGETS_UP"]]
stein <- msigdb[["c2_cgp;_;STEIN_ESR1_TARGETS"]]
bhat <- msigdb[["c2_cgp;_;BHAT_ESR1_TARGETS_NOT_VIA_AKT1_UP"]]
go_cc <- msigdb[["c5_bp;_;GO_CELL_CYCLE"]]
kegg_cell_cycle <- msigdb[["c2_cp;_;KEGG_CELL_CYCLE"]]
react_cc <- msigdb[["c2_cpreactome;_;REACTOME_CELL_CYCLE"]]
overlapped <- react_cc[react_cc %in% c(bhat, williams, stein)]
overlapped_react <- ZMIZ1:::eg2sym(react_cc[react_cc %in% c(bhat, williams, stein)])
overlapped_go <- ZMIZ1:::eg2sym(go_cc[go_cc %in% c(bhat, williams, stein)])
overlapped_kegg <- ZMIZ1:::eg2sym(kegg_cell_cycle[kegg_cell_cycle %in% c(bhat, williams, stein)])
obj <- gsea(sort(geneList, decreasing = TRUE), set = names(overlapped_react),
method = "pareto")
plot_gsea(obj, bottomYtitle = "siZMIZ/siCTRL at 6h", title = "Reactome Cell Cycle/ER Response overlap")
obj <- gsea(sort(geneList, decreasing = TRUE), set = names(overlapped_go),
method = "pareto")
plot_gsea(obj, bottomYtitle = "siZMIZ/siCTRL at 6h", title = "Go Cell Cycle/ER Response overlap")
obj <- gsea(sort(geneList, decreasing = TRUE), set = names(overlapped_kegg),
method = "pareto")
plot_gsea(obj, bottomYtitle = "siZMIZ/siCTRL at 6h", title = "Kegg Cell Cycle/ER Response overlap")
}
#' Figure S8t47d
#'
#' This function allows you generate figure S8. GSEA results Part 2. P-value will vary as seed is
#' not set.
#' @keywords RNAseq ZMIZ1
#'
#' @export
#' @import DESeq2
#' @import vulcan
#' @importFrom graphics barplot
#' @importFrom stats relevel
#' @examples figureS8mcf7()
figureS8t47d <- function() {
result_list <- list()
for (cells in c("T47D")) {
for (time in c("6h")) {
resname <- paste0(cells, "_", time)
message(resname)
subsamples <- annotationTable$Sample[annotationTable$Cells ==
cells & annotationTable$Treatment_Duration ==
time & annotationTable$Treatment == "Oestrogen"]
subraw <- rawcounts[, subsamples]
subannot <- annotationTable[annotationTable$Sample %in%
subsamples, c("Cells", "Condition", "Treatment")]
rownames(subannot) <- annotationTable$Sample[annotationTable$Sample %in%
subsamples]
subannot <- subannot[subsamples, ]
dds <- DESeqDataSetFromMatrix(countData = subraw,
colData = subannot, design = ~Condition)
dds <- dds[rowSums(counts(dds)) > 1, ]
dds$Condition <- relevel(dds$Condition, ref = "siCTRL")
dea <- DESeq(dds, parallel = TRUE)
res <- results(dea, contrast = c("Condition", "siZMIZ1",
"siCTRL"))
resannot <- cbind(rownames(res), eg2sym(rownames(res)))
annotations <- annotategene(rownames(res))
resannot <- cbind(as.matrix(resannot), as.matrix(annotations),
as.matrix(res))
colnames(resannot)[1:3] <- c("ENTREZID", "SYMBOL",
"NAME")
resannot <- as.data.frame(resannot)
resannot$log2FoldChange <- as.numeric(as.character(resannot$log2FoldChange))
resannot$stat <- as.numeric(as.character(resannot$stat))
resannot$pvalue <- as.numeric(as.character(resannot$pvalue))
resannot$padj <- as.numeric(as.character(resannot$padj))
resannot <- resannot[order(resannot$pvalue), ]
result_list[[resname]] <- resannot
rm(dea, resannot, res)
}
}
geneList <- result_list[["T47D_6h"]]$log2FoldChange
names(geneList) <- rownames(result_list[["T47D_6h"]])
williams <- msigdb[["c2_cgp;_;WILLIAMS_ESR1_TARGETS_UP"]]
stein <- msigdb[["c2_cgp;_;STEIN_ESR1_TARGETS"]]
bhat <- msigdb[["c2_cgp;_;BHAT_ESR1_TARGETS_NOT_VIA_AKT1_UP"]]
go_cc <- msigdb[["c5_bp;_;GO_CELL_CYCLE"]]
kegg_cell_cycle <- msigdb[["c2_cp;_;KEGG_CELL_CYCLE"]]
react_cc <- msigdb[["c2_cpreactome;_;REACTOME_CELL_CYCLE"]]
overlapped <- react_cc[react_cc %in% c(bhat, williams, stein)]
overlapped_react <- ZMIZ1:::eg2sym(react_cc[react_cc %in% c(bhat, williams, stein)])
overlapped_go <- ZMIZ1:::eg2sym(go_cc[go_cc %in% c(bhat, williams, stein)])
overlapped_kegg <- ZMIZ1:::eg2sym(kegg_cell_cycle[kegg_cell_cycle %in% c(bhat, williams, stein)])
obj <- gsea(sort(geneList, decreasing = TRUE), set = names(overlapped_react),
method = "pareto")
plot_gsea(obj, bottomYtitle = "siZMIZ/siCTRL at 6h", title = "Reactome Cell Cycle/ER Response overlap")
obj <- gsea(sort(geneList, decreasing = TRUE), set = names(overlapped_go),
method = "pareto")
plot_gsea(obj, bottomYtitle = "siZMIZ/siCTRL at 6h", title = "Go Cell Cycle/ER Response overlap")
obj <- gsea(sort(geneList, decreasing = TRUE), set = names(overlapped_kegg),
method = "pareto")
plot_gsea(obj, bottomYtitle = "siZMIZ/siCTRL at 6h", title = "Kegg Cell Cycle/ER Response overlap")
}
|
43909c85d2a543e1f1232ddb7b885bf075c95276
|
941b25f4eb01051ed507f8715cf8514444a31116
|
/man/gmte_plot.Rd
|
3f185fffccd6caab0a155492f90be5e9a4398de0
|
[] |
no_license
|
lukepilling/twistR
|
e46bc934c573b928818db6745ad7eedb6e63ae3b
|
023f1329cbc6c5d67a299ba21b67e47b62599989
|
refs/heads/main
| 2023-04-25T21:22:40.158066
| 2023-04-25T15:38:18
| 2023-04-25T15:38:18
| 402,818,137
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,065
|
rd
|
gmte_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gmte_plot.R
\name{gmte_plot}
\alias{gmte_plot}
\title{Forest plot individual and combined TWIST/GMTE estimates}
\usage{
gmte_plot(
x,
plot_title = "",
plot_cat = TRUE,
cols = c("#f46036", "#2e294e", "#1b998b"),
pchs = c(15, 16, 23)
)
}
\arguments{
\item{x}{An object of class \code{twistR_GMTE} e.g., the output from \code{gmte_continuous}}
\item{plot_title}{A string to print as the plot title}
\item{plot_cat}{Logical. Plot the CAT estimates? (Default=TRUE)}
\item{cols}{Three colours to indiciate the three model types (GMTE0, individual estimates, combined estimates)}
\item{pchs}{Three point types to indiciate the three model types (GMTE0, individual estimates, combined estimates)}
}
\description{
Plot the individual and combined estimates from a Triangulation WIthin A STudy (TWIST) analysis. The \code{gmte_plot} function takes an object of class \code{twistR_GMTE}, containing effect estimates from the individual tests (such as RGMTE) and the results when combinations are performed (such as RGMTE+MR), and creates a forest plot, highlighting the individual and combined estimates, and indicating with a "*" when the combined estimate may be valid.
}
\examples{
# Example using a continuous outcome (LDL), binary treatment (statins), and binary genotype (SLCO1B1*5 homozygotes) variables
Y="ldl"
T="statin"
G="slco1b1_5_hmz"
Z="age+PC1+PC2+PC3+PC4+PC5+PC6+PC7+PC8+PC9+PC10"
results=gmte_continuous(Y,T,G,Z,D)
gmte_plot(results, plot_title = "SLCO1B1*5 effect on LDL during statin treatment")
# If desired, remove CAT estimates for "cleaner" plot, as these are often orders of magnititude larger than the other estimates
gmte_plot(results, plot_title = "SLCO1B1*5 effect on LDL during statin treatment", plot_cat=FALSE)
}
\references{
Bowden, J., et al., The Triangulation WIthin A STudy (TWIST) framework for causal inference within Pharmacogenetic research. PLoS Genetics. https://doi.org/10.1371/journal.pgen.1009783
}
\author{
Jack Bowden; Luke Pilling.
}
|
56666b0bedcf94ea07e4c38f873b8f90e726487a
|
322d9c291a98bb460bfeedadd6cdebca78bbca04
|
/inst/doc/EVT.R
|
dafdcb86f5c928aa87ccd9b98b9bcf7a25cc0cb9
|
[] |
no_license
|
alexbhatt/RecordLinkage
|
79b10e22f24940662a38800dc4645bce6748c9dc
|
616a49b14b7b48118cdaf06908e06151054cbcea
|
refs/heads/master
| 2020-12-30T06:53:25.709832
| 2019-08-23T14:41:04
| 2019-08-23T14:41:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,561
|
r
|
EVT.R
|
### R code from vignette source 'EVT.rnw'
###################################################
### code chunk number 1: EVT.rnw:3-4
###################################################
options(width=50)
###################################################
### code chunk number 2: EVT.rnw:28-29
###################################################
library(RecordLinkage)
###################################################
### code chunk number 3: EVT.rnw:32-37
###################################################
data(RLdata500)
rpairs=compare.dedup(RLdata500,identity=identity.RLdata500,
blockfld=list(1,3,5,6,7),strcmp=1:4)
rpairs=emWeights(rpairs)
###################################################
### code chunk number 4: EVT.rnw:54-55
###################################################
## Not run: getParetoThreshold(rpairs)
###################################################
### code chunk number 5: EVT.rnw:59-60
###################################################
plotMRL(rpairs)
###################################################
### code chunk number 6: EVT.rnw:68-73
###################################################
plotMRL(rpairs)
abline(v=c(1.2,12.8),col="red",lty="dashed")
l=mrl(rpairs$Wdata)
range=l$x>1.2 & l$x < 12.8
points(l$x[range], l$y[range],col="red",type="l")
###################################################
### code chunk number 7: EVT.rnw:83-86
###################################################
threshold=getParetoThreshold(rpairs,interval=c(1.2,12.8))
result=emClassify(rpairs,threshold)
summary(result)
|
5640c521eec35a9f0d044609a6642ec3862ed44c
|
c4522a72b9543374d9f6b74bd387a071490348d8
|
/man/gbsdat.Rd
|
8f7a406fe50eb303445e5abebbbe77c0300706ba
|
[] |
no_license
|
cran/SCCS
|
8aab25b4cf8b2e547369a71d3b3508e25147667c
|
aa0e7c0a549f67ba7017712f13cdbe5e529c852b
|
refs/heads/master
| 2022-07-07T05:02:30.814331
| 2022-07-05T13:20:09
| 2022-07-05T13:20:09
| 133,012,639
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,030
|
rd
|
gbsdat.Rd
|
\name{gbsdat}
\docType{data}
\alias{gbsdat}
\title{Data on influenza vaccine and GBS}
\description{
The data comprise days (day 1 is 1st October 2010) at seasonal influenza vaccination and at onset of Guillain-Barre Syndrome (GBS) in Italy, gathered in the 2010-2011 influenza season. There are 174 cases from Galeotti (2013). Times have been jittered.
}
\usage{gbsdat}
\format{A data frame containing 174 rows and 6 columns. The column names are 'case' (individual identifier), 'sta' (the first day of the observation period), 'end' (the last day of the observation period), 'gbs' (day of GBS onset), 'flu' (day of influenza vaccination), 'sage' (age in years on day 1, 1st October 2010).}
%\source{}
\references{Galeotti, F., M. Massari, R. D`Alessandro, E. Beghi, A. Chio, G. Logroscino, G. Filippini, M. D. Benedetti, M. Pugliatti, C. Santiccio, and R. Raschetti (2013). Risk of Guillain-Barre syndrome after 2010-2011 influenza vaccination. European Journal of Epidemiology 28 (5), 433-444.
}
\keyword{datasets}
|
a85287c6acbda40f20d84bfc2b1f7f62bfd62a92
|
88147e2bddc2add4f51b507dbf1eed86de849495
|
/man/rvinenllkderiv.Rd
|
823c4ce7b056bf2603cd2327e5ff86901c9f975d
|
[] |
no_license
|
hoanguc3m/CopulaModel
|
39906379ed88d56f579851d45f157733c42bf926
|
1522b9a6476c5550736f652f902c61b3ac5e8fd3
|
refs/heads/master
| 2020-03-27T14:29:49.345584
| 2019-11-03T22:18:14
| 2019-11-03T22:18:14
| 146,665,628
| 2
| 0
| null | 2018-08-29T22:25:56
| 2018-08-29T22:25:56
| null |
UTF-8
|
R
| false
| false
| 3,195
|
rd
|
rvinenllkderiv.Rd
|
\name{rvinenllkderiv}
\Rdversion{1.1}
\alias{rvinenllkderiv}
\alias{rvinenllkder1.trunc}
\alias{dvinenllkder1.trunc}
\alias{rvinenllkder2.trunc}
\title{
Negative log-likelihood and gradient for regular vine models
}
\description{
Negative log-likelihood and gradient for regular vine models
}
\usage{
rvinenllkder1.trunc(parvec,udat,A,logdcopdernames,pconddernames,LB=0,UB=10)
dvinenllkder1.trunc(parvec,udat,logdcopdernames,pconddernames,LB=0,UB=10)
rvinenllkder2.trunc(parvec,udat,A,logdcopdernames,pconddernames,LB=0,UB=10)
}
\arguments{
\item{parvec}{parameter vector for the model}
\item{udat}{nxd matrix of uniform scores for rvinenllk.trunc}
\item{A}{dxd vine array with 1:d on diagonal}
\item{logdcopdernames}{string vector with names of log copula pdfs and
derivatives, length ntrunc, ntrunc=truncation level}
\item{pconddernames}{string vector with names of copula conditional cdfs and
derivatives, length ntrunc, ntrunc=truncation level}
\item{LB}{lower bound of components of parvec}
\item{UB}{upper bound of components of parvec; scalar or same length as parvec}
}
\details{
dvinenllkder1.trunc() was written before rvinenllkder1.trunc() because
the algorithm looks simpler for a D-vine versus the general R-vine.
rvinenllkder1.trunc() can be tested with a D-vine and matched to the
output of rvinenllkder1.trunc().
}
\value{
negative log-likelihood value with gradient as an atribute;
suitable for use with nlm.
}
\seealso{
\code{\link{rvinenllk}}
}
\examples{
d=5
A=vnum2array(d,3)
nsim=20
np=matrix(0,d,d)
# example 1
qcondnames=rep("qcondfrk",4)
pcondnames=rep("pcondfrk",4)
logdcopdernames=rep("logdfrk.deriv",4)
pconddernames=rep("pcondfrk.deriv",4)
parvec=c(3.6,3.6,3.6,3.6, 1.5,1.5,1.5, 1.4,1.4, 0.3)
set.seed(123)
np[1,2:d]=1; np[2,3:d]=1; np[3,4:d]=1; np[4,5]=1
udat=rvinesimvec(nsim,A,parvec,np,qcondnames,pcondnames,iprint=FALSE)
mle=nlm(rvinenllkder1.trunc,p=parvec,udat=udat,A=A,
logdcopdernames=logdcopdernames,pconddernames=pconddernames,
hessian=TRUE,iterlim=30,print.level=1,LB=-10,UB=30,check.analyticals=FALSE)
mle2=nlm(rvinenllkder1.trunc,p=parvec[1:7],udat=udat,A=A,
logdcopdernames=logdcopdernames[1:2],pconddernames=pconddernames[1:2],
hessian=TRUE,iterlim=30,print.level=1,LB=-10,UB=30,check.analyticals=FALSE)
# example 2
qcondnames=c("qcondbb1",rep("qcondfrk",3))
pcondnames=c("pcondbb1",rep("pcondfrk",3))
logdcopdernames=c("logdbb1.deriv",rep("logdfrk.deriv",3))
pconddernames=c("pcondbb1.deriv",rep("pcondfrk.deriv",3))
parvec=c(0.5,1.6,0.5,1.6,0.5,1.6,0.5,1.6, 1.5,1.5,1.5, 1.4,1.4, 0.3)
np[1,2:d]=2; np[2,3:d]=1; np[3,4:d]=1; np[4,5]=1
set.seed(123)
udat=rvinesimvec(nsim,A,parvec,np,qcondnames,pcondnames,iprint=FALSE)
lb=c(rep(c(0,1),4),rep(-10,6))
ub=c(rep(c(6,6),4),rep(30,6))
mle=nlm(rvinenllkder2.trunc,p=parvec,udat=udat,A=A,
logdcopdernames=logdcopdernames,pconddernames=pconddernames,
hessian=TRUE,iterlim=30,print.level=1,LB=lb,UB=ub,check.analyticals=FALSE)
mle2=nlm(rvinenllkder2.trunc,p=parvec[1:11],udat=udat,A=A,
logdcopdernames=logdcopdernames[1:2],pconddernames=pconddernames[1:2],
hessian=TRUE,iterlim=30,print.level=1,LB=lb[1:11],UB=ub[1:11],check.analyticals=FALSE)
}
\keyword{maximum likelihood}
\keyword{vine}
|
c619382750d91945b2c3d3ed2f617dd7ae5df144
|
853eee25f84fa341794305514193f9051e05ef10
|
/R/SummaryMethods.R
|
272c0ca37ea138d8e2512d8f75c62601a9b3acdf
|
[] |
no_license
|
luciu5/antitrust
|
f9a5d5703cc59dea063c700547303398f15c4cba
|
f504c3c41f730ce93338ead93d2ffc2562422e8e
|
refs/heads/master
| 2023-03-20T20:34:20.676729
| 2023-03-08T03:48:19
| 2023-03-08T03:48:19
| 202,005,177
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,711
|
r
|
SummaryMethods.R
|
#' @title Summary Methods
#' @description Summary methods for the \code{Bertrand}, \code{Auction2ndCap}, \code{Cournot}, and \code{Auction2ndLogit} classes.
#' Summarizes the effect of the merger, including price and revenue changes.
#' @name summary-methods
#' @docType methods
#'
#' @aliases summary,ANY-method
#' summary,AIDS-method
#' summary,Bertrand-method
#' summary,Auction2ndLogit-method
#' summary,Cournot-method
#' summary,Auction2ndCap-method
#' summary,VertBargBertLogit-method
#'
#' @param object an instance of class \code{Bertrand}, \code{Auction2ndCap}, \code{Cournot}, or \code{Auction2ndLogit}
#' @param revenue When TRUE, returns revenues, when FALSE returns quantitities. Default is TRUE.
#' @param shares When TRUE, returns shares, when FALSE returns quantities (when possible). Default is TRUE.
#' @param levels When TRUE, returns changes in levels rather than percents and quantities rather than shares, when FALSE, returns
#' changes as a percent and shares rather than quantities. Default is FALSE.
#' @param parameters When TRUE, displays all demand parameters. Default is FALSE.
#' @param market When TRUE, displays aggregate information about the effect of a tariff.
#' When FALSE displays product-specific (or in the case of Cournot, plant-specific) effects.
#' Default is FALSE.
#' @param insideOnly When TRUE, rescales shares on inside goods to sum to 1. Default is FALSE.
#' @param digits Number of significant digits to report. Default is 2.
#' @param exAnte If \sQuote{exAnte} equals TRUE then the
#' \emph{ex ante} expected result for each firm is produced, while FALSE produces the
#' expected result conditional on each firm winning the auction. Default is FALSE.
#' @param ... Allows other objects to be passed to a \code{CV} method.
#'
#' @keywords methods
#' @include OwnershipMethods.R
NULL
#'@rdname summary-methods
#'@export
setMethod(
f= "summary",
signature= "Bertrand",
definition=function(object,revenue=TRUE,shares=TRUE,levels=FALSE,parameters=FALSE,market=FALSE,insideOnly = TRUE,digits=2,...){
curWidth <- getOption("width")
pricePre <- object@pricePre
pricePost <- object@pricePost
if(any(grepl("aids",class(object),ignore.case=TRUE))){
priceDelta <- object@priceDelta
}
else{ priceDelta <- calcPriceDelta(object,levels=levels)}
if(!levels) priceDelta <- priceDelta *100
if(!shares && !all(is.na(object@prices))){
outPre <- calcQuantities(object,preMerger=TRUE)
outPost <- calcQuantities(object,preMerger=FALSE)
if(revenue){
outPre <- pricePre*outPre
outPost <- pricePost*outPost
}
sumlabels=paste("quantity",c("Pre","Post"),sep="")
}
else{
if(!shares){warning("'shares' equals FALSE but 'calcQuantities' not defined. Reporting shares instead of quantities")}
outPre <- calcShares(object,preMerger=TRUE,revenue=revenue) * 100
outPost <- calcShares(object,preMerger=FALSE,revenue=revenue) * 100
if(insideOnly){
outPre <- outPre/sum(outPre)* 100
outPost <- outPost/sum(outPost,na.rm=TRUE)* 100
}
sumlabels=paste("shares",c("Pre","Post"),sep="")
}
mcDelta <- object@mcDelta
if(any(!grepl("auction2nd",class(object),ignore.case=TRUE))){
mcDelta <- mcDelta * 100
}
if(levels){outDelta <- outPost - outPre}
else{outDelta <- (outPost/outPre - 1) * 100}
isParty <- as.numeric(rowSums( abs(object@ownerPost - object@ownerPre))>0)
isParty <- factor(isParty,levels=0:1,labels=c(" ","*"))
results <- data.frame(pricePre=pricePre,pricePost=pricePost,
priceDelta=priceDelta,outputPre=outPre,
outputPost=outPost,outputDelta=outDelta)
if(sum(abs(mcDelta))>0) results <- cbind(results,mcDelta=mcDelta)
rownames(results) <- paste(isParty,object@labels)
sharesPost <- calcShares(object,FALSE,revenue)
if(market){
thiscmcr <- thiscv <- NA_real_
try(thiscmcr <- cmcr(object,levels=levels), silent=TRUE)
try(thiscv <- CV(object),silent = TRUE)
thispsdelta <- NA_real_
try(thispsdelta <- sum(calcProducerSurplus(object,preMerger=FALSE) - calcProducerSurplus(object,preMerger=TRUE),na.rm=TRUE),silent=TRUE)
isparty <- isParty == "*"
results <- with(results,
data.frame(
'HHI Change' = as.integer(HHI(outputPre/sum(outputPre),owner=object@ownerPost) - HHI(outputPre/sum(outputPre),owner=object@ownerPre)),
'Industry Price Change (%)' = sum(priceDelta * outputPost/sum(outputPost, na.rm = TRUE),na.rm=TRUE),
'Merging Party Price Change (%)'= sum(priceDelta[isparty] * outputPost[isparty], na.rm=TRUE) / sum(outputPost[isparty], na.rm=TRUE),
'Compensating Marginal Cost Reduction (%)' = sum(thiscmcr * outputPost[isparty]) / sum(outputPost[isparty], na.rm=TRUE),
'Consumer Harm ($)' = thiscv,
'Producer Benefit ($)' = thispsdelta,
'Difference ($)'= thiscv - thispsdelta,
check.names=FALSE
))
if(levels){colnames(results) <- gsub("%","$/unit",colnames(results))}
}
colnames(results)[colnames(results) %in% c("outputPre","outputPost")] <- sumlabels
cat("\nMerger simulation results under '",class(object),"' demand:\n\n",sep="")
options("width"=ifelse(market,25,100)) # this width ensures that everything gets printed on the same line
print(round(results,digits),digits=digits, row.names=ifelse(market, FALSE, TRUE))
options("width"=curWidth) #restore to current width
if(!market){
results <- cbind(isParty, results)
rownames(results) <- object@labels
cat("\n\tNotes: '*' indicates merging parties' products.\n ")
if(levels){cat("\tDeltas are level changes.\n")}
else{cat("\tDeltas are percent changes.\n")}
if(revenue){cat("\tOutput is based on revenues.\n")}
else{cat("\tOutput is based on units sold.\n")}
}
cat("\n\n")
if(parameters){
cat("\nDemand Parameter Estimates:\n\n")
if(is.list(object@slopes)){
print(lapply(object@slopes,round,digits=digits))
}
else{
print(round(object@slopes,digits))
}
cat("\n\n")
if(.hasSlot(object,"intercepts")){
cat("\nIntercepts:\n\n")
print(round(object@intercepts,digits))
cat("\n\n")
}
if(.hasSlot(object,"constraint") && object@constraint){cat("\nNote: (non-singleton) nesting parameters are constrained to be equal")}
cat("\n\n")
}
return(invisible(results))
})
#'@rdname summary-methods
#'@export
setMethod(
f= "summary",
signature= "VertBargBertLogit",
definition=function(object,revenue=TRUE,
levels=FALSE,parameters=FALSE,
market=FALSE,insideOnly = TRUE,
digits=2,...){
curWidth <- getOption("width")
up <- object@up
down <- object@down
priceUpPre <- up@pricePre
priceUpPost <- up@pricePost
priceDownPre <- down@pricePre
priceDownPost <- down@pricePost
priceDelta <- calcPriceDelta(object,levels=levels,market=market)
if(!levels) priceDelta <- lapply(priceDelta, function(x){x*100})
#if(!shares && !all(is.na(object@prices))){
# outPre <- calcQuantities(object,preMerger=TRUE)
# outPost <- calcQuantities(object,preMerger=FALSE)
# if(revenue){
# outPre <- pricePre*outPre
# outPost <- pricePost*outPost
# }
# sumlabels=paste("quantity",c("Pre","Post"),sep="")
#}
#else{
# if(!shares){warning("'shares' equals FALSE but 'calcQuantities' not defined.
#Reporting shares instead of quantities")}
outPre <- calcShares(object,preMerger=TRUE,revenue=revenue) * 100
outPost <- calcShares(object,preMerger=FALSE,revenue=revenue) * 100
if(insideOnly){
outPre <- outPre/sum(outPre)* 100
outPost <- outPost/sum(outPost,na.rm=TRUE)* 100
}
sumlabels=paste("shares",c("Pre","Post"),sep="")
#}
mcDeltaUp <- up@mcDelta * 100
mcDeltaDown <- down@mcDelta * 100
if(levels){outDelta <- outPost - outPre}
else{outDelta <- (outPost/outPre - 1) * 100}
isPartyHorzDown <- down@ownerPost %in% down@ownerPre &
down@ownerPost != down@ownerPre
if(any(isPartyHorzDown)){
isPartyHorzDown <- down@ownerPost %in% down@ownerPost[isPartyHorzDown]
}
isPartyHorzUp <- up@ownerPost %in% up@ownerPre &
up@ownerPost != up@ownerPre
if(any(isPartyHorzUp)){
isPartyHorzUp <- up@ownerPost %in% up@ownerPost[isPartyHorzUp]
}
isPartyVert <- unique(down@ownerPost[down@ownerPost == up@ownerPost &
down@ownerPre != up@ownerPre])
isPartyVert <- (down@ownerPost %in% isPartyVert) | (up@ownerPost %in% isPartyVert)
isParty <- factor(isPartyHorzDown | isPartyHorzUp |isPartyVert,levels=c(FALSE,TRUE),labels=c(" ","*"))
results <- data.frame(priceUpPre=priceUpPre,
priceUpPost=priceUpPost,
priceUpDelta=priceDelta$up,
priceDownPre=priceDownPre,
priceDownPost=priceDownPost,
priceDownDelta=priceDelta$down,
outputPre=outPre,
outputPost=outPost,outputDelta=outDelta)
if(sum(abs(mcDeltaUp))>0) results <- cbind(results,mcDeltaUp=mcDeltaUp)
if(sum(abs(mcDeltaDown))>0) results <- cbind(results,mcDeltaDown=mcDeltaDown)
rownames(results) <- paste(isParty,down@labels)
sharesPost <- calcShares(object,FALSE,revenue)
if(market){
thiscmcr <- thiscv <- NA_real_
#try(thiscmcr <- cmcr(object), silent=TRUE)
try(thiscv <- CV(object),silent = TRUE)
try(thispsPre <- calcProducerSurplus(object,TRUE),silent=TRUE)
try(thispsPost <- calcProducerSurplus(object,FALSE),silent=TRUE)
thispsdeltaUp <- sum(thispsPost$up - thispsPre$up,na.rm=TRUE)
thispsdeltaDown <- sum(thispsPost$down - thispsPre$down,na.rm=TRUE)
if(object@chain_level == "wholesaler") thispsdeltaDown <- -thispsdeltaUp
isparty <- isParty == "*"
hhiUp <- as.integer(HHI(outPre/sum(outPre),owner=up@ownerPost) - HHI(outPre/sum(outPre),owner=up@ownerPre))
hhiDown <- as.integer(HHI(outPre/sum(outPre),owner=down@ownerPost) - HHI(outPre/sum(outPre),owner=down@ownerPre))
partylabel <- unique(down@ownerPost[isParty])
downOwnerPost <- down@ownerPost
downOwnerPost[isparty]=partylabel[1]
hhiVert <- as.integer(HHI(outPre/sum(outPre),owner=downOwnerPost) - HHI(outPre/sum(outPre),owner=down@ownerPre))
hhidelta <- ifelse(any(isPartyVert),hhiVert,max(hhiUp,hhiDown))
results <- with(results,
data.frame(
'HHI Change' = hhidelta,
'Up Price Change (%)' = priceDelta$up,
'Down Price Change (%)' = priceDelta$down,
#'Merging Party Price Change (%)'= sum(priceDelta[isparty] * outputPost[isparty], na.rm=TRUE) / sum(outputPost[isparty]),
#'Compensating Marginal Cost Reduction (%)' = sum(thiscmcr * outputPost[isparty]) / sum(outputPost[isparty]),
'Consumer Harm ($)' = thiscv,
'Up Producer Benefit ($)' = thispsdeltaUp,
'Down Producer Benefit ($)' = thispsdeltaDown,
'Difference ($)'= thiscv - thispsdeltaUp - thispsdeltaDown,
check.names=FALSE
))
if(levels){colnames(results) <- gsub("%","$/unit",colnames(results))}
}
colnames(results)[colnames(results) %in% c("outputPre","outputPost")] <- sumlabels
cat("\nMerger simulation results under '",class(object),"' demand:\n\n",sep="")
options("width"=ifelse(market,25,100)) # this width ensures that everything gets printed on the same line
print(round(results,digits),digits=digits, row.names=ifelse(market, FALSE, TRUE))
options("width"=curWidth) #restore to current width
if(!market){
results <- cbind(isParty, results)
rownames(results) <- down@labels
cat("\n\tNotes: '*' indicates merging parties' products.\n ")
if(levels){cat("\tDeltas are level changes.\n")}
else{cat("\tDeltas are percent changes.\n")}
if(revenue){cat("\tOutput is based on revenues.\n")}
else{cat("\tOutput is based on units sold.\n")}
}
cat("\n\n")
if(parameters){
print(getParms(object), digits=digits)
if(.hasSlot(object,"constraint") && object@constraint){cat("\nNote: (non-singleton) nesting parameters are constrained to be equal")}
cat("\n\n")
}
return(invisible(results))
})
#'@rdname summary-methods
#'@export
setMethod(
f= "summary",
signature= "Auction2ndCap",
definition=function(object,exAnte=FALSE,parameters=FALSE,market=TRUE,digits=2){
curWidth <- getOption("width")
pricePre <- calcPrices(object,preMerger=TRUE,exAnte=exAnte)
pricePost <- calcPrices(object,preMerger=FALSE,exAnte=exAnte)
priceDelta <- (pricePost/pricePre - 1) * 100
outPre <- calcShares(object,TRUE,exAnte=exAnte) * 100
outPost <- calcShares(object,FALSE,exAnte=exAnte) * 100
mcDelta <- object@mcDelta
outDelta <- (outPost/outPre - 1) * 100
isParty <- object@ownerPost != object@ownerPre
isParty <- c(object@ownerPre[isParty],object@ownerPost[isParty])
isParty <- factor(ifelse(object@ownerPre %in% isParty,1,0),levels=0:1,labels=c(" ","*"))
results <- data.frame(pricePre=pricePre,pricePost=pricePost,
priceDelta=priceDelta,sharesPre=outPre,
sharesPost=outPost,sharesDelta=outDelta)
if(sum(abs(mcDelta))>0) results <- cbind(results,mcDelta=mcDelta)
rownames(results) <- paste(isParty,object@labels)
if( market){
thiscmcr <- thiscv <- NA_real_
#try(thiscmcr <- cmcr(object), silent=TRUE)
try(thiscv <- CV(object),silent = TRUE)
try(thispsPre <- calcProducerSurplus(object,TRUE),silent=TRUE)
try(thispsPost <- calcProducerSurplus(object,FALSE),silent=TRUE)
thispsdelta <- sum(thispsPost - thispsPre,na.rm=TRUE)
results <- with(results,
data.frame(
'HHI Change' = as.integer(HHI(outPre/sum(outPre),owner=object@ownerPost) - HHI(outPre/sum(outPre),owner=object@ownerPre)),
'Industry Price Change (%)' = sum(priceDelta * outPost/sum(outPost, na.rm = TRUE),na.rm=TRUE),
'Merging Party Price Change (%)'= sum(priceDelta[isParty] * outPost[isParty], na.rm=TRUE) / sum(outPost[isParty], na.rm=TRUE),
'Compensating Marginal Cost Reduction (%)' = sum(thiscmcr * outPost[isParty]) / sum(outPost[isParty], na.rm=TRUE),
'Consumer Harm ($)' = thiscv,
'Producer Benefit ($)' = thispsdelta,
'Difference ($)'= thiscv - thispsdelta,
check.names=FALSE
))
}
cat("\nMerger simulation results under '",class(object),"':\n\n",sep="")
options("width"=100) # this width ensures that everything gets printed on the same line
print(round(results,digits),digits=digits)
options("width"=curWidth) #restore to current width
if(!market){
cat("\n\tNotes: '*' indicates merging parties. Deltas are percent changes.\n")
if(exAnte){cat("\tEx Ante shares and prices are reported.\n")}
else{cat("\tShares and prices conditional on a firm winning are reported.\n")}
results <- cbind(isParty, results)
cat("\n\nPre-Merger Buyer Reserve:",round(object@reservePre,digits),sep="\t")
cat("\nPost-Merger Buyer Reserve:",round(object@reservePost,digits),sep="\t")
cat("\n\n% Change In Expected Price:",round((calcExpectedPrice(object,FALSE)-calcExpectedPrice(object,TRUE))/calcExpectedPrice(object,TRUE)*100,digits),sep="\t")
cat("\n")
cat("% Change In Buyer's Expected Cost:",round((calcBuyerExpectedCost(object,FALSE)-calcBuyerExpectedCost(object,TRUE))/calcBuyerExpectedCost(object,TRUE)*100,digits),sep="\t")
cat("\n\n")
rownames(results) <- object@labels
}
if(parameters){
cat("\nSupplier Cost Distribution Parameters:\n\n")
print(round(object@sellerCostParms,digits))
cat("\nBuyer Valuation:\n\n")
print(round(object@buyerValuation,digits))
cat("\n\n")
}
return(invisible(results))
})
#'@rdname summary-methods
#'@export
setMethod(
f= "summary",
signature= "Cournot",
definition=function(object,market=FALSE,revenue=FALSE,shares=FALSE,levels=FALSE,parameters=FALSE,digits=2,...){
if(market){nplants <- 1}
else{ nplants <- nrow(object@quantities) }
curWidth <- getOption("width")
curSci <- getOption("scipen")
pricePre <- object@pricePre
pricePost <- object@pricePost
priceDelta <- calcPriceDelta(object,levels=levels)
if(!levels) priceDelta <- priceDelta *100
if(!shares){
outPre <- object@quantityPre
outPost <- object@quantityPost
sumlabels=paste("quantity",c("Pre","Post"),sep="")
if(revenue){
outPre <- t(pricePre*t(outPre))
outPost <- t(pricePost*t(outPost))
sumlabels=paste("revenue",c("Pre","Post"),sep="")
}
}
else{
if(!shares){warning("'shares' equals FALSE but 'calcQuantities' not defined. Reporting shares instead of quantities")}
outPre <- calcShares(object,preMerger=TRUE,revenue=revenue) * 100
outPost <- calcShares(object,preMerger=FALSE,revenue=revenue) * 100
sumlabels=paste("shares",c("Pre","Post"),sep="")
}
if(market){
outPre <- colSums(outPre,na.rm=TRUE)
outPost <- colSums(outPost,na.rm=TRUE)
ids <- data.frame(plant = 1 ,product= object@labels[[2]])
}
else{
ids <- expand.grid(plant=object@labels[[1]], product=object@labels[[2]])
}
out <- data.frame(product=ids$product,
plant=ids$plant,outPre=as.vector(outPre),
outPost = as.vector(outPost))
if(market) {out$plant <- NULL}
else{
out$isParty <- as.numeric(rowSums( abs(object@ownerPost - object@ownerPre))>0)
out$isParty <- factor(out$isParty,levels=0:1,labels=c(" ","*"))
}
mcDelta <- object@mcDelta * 100
if(levels){out$outDelta <- out$outPost - out$outPre}
else{out$outDelta <- (out$outPost/out$outPre - 1) * 100}
out$pricePre <- rep(pricePre,each=nplants)
out$pricePost <- rep(pricePost,each=nplants)
out$priceDelta <- rep(priceDelta, each=nplants)
if(market){
results <- out[,c("product","pricePre","pricePost","priceDelta","outPre","outPost","outDelta" )]
}
else{
results <- out[, c("isParty","product","plant", "pricePre","pricePost","priceDelta","outPre","outPost","outDelta" )]
}
colnames(results)[colnames(results) %in% c("outPre","outPost")] <- sumlabels
if(!market && sum(abs(mcDelta))>0) results <- cbind(results,mcDelta=mcDelta)
sharesPost <- calcShares(object,FALSE,revenue)
cat("\nMerger simulation results under '",class(object),"' demand:\n\n",sep="")
options("width"=100) # this width ensures that everything gets printed on the same line
options("scipen"=999) # this width ensures that everything gets printed on the same line
print(format(results,digits=digits),row.names = FALSE)
options("width"=curWidth) #restore to current width
options("scipen"=curSci) #restore to current scientific notation
cat("\n\tNotes: '*' indicates merging parties' products.\n ")
if(levels){cat("\tDeltas are level changes.\n")}
else{cat("\tDeltas are percent changes.\n")}
if(revenue){cat("\tOutput is based on revenues.\n")}
else{cat("\tOutput is based on units sold.\n")}
##Only compute cmcr if cmcr method doesn't yield an error
thisCMCR <- tryCatch(cmcr(object),error=function(e) FALSE)
if(!is.logical(thisCMCR)){
cat("\n\nCMCR:\n\n")
cat(format(cmcr(object),digits=digits), fill=TRUE,labels=object@labels[[2]])
}
##Only compute upp if prices are supplied
#thisUPP <- tryCatch(upp(object),error=function(e) FALSE)
#if(!is.logical(thisUPP)){
# cat("\nShare-Weighted Pricing Pressure:",format(sum(thisUPP*sharesPost[isParty=="*"],na.rm=TRUE)/sum(sharesPost[isParty=="*"],na.rm=TRUE),digits),sep="\t")}
##Only compute CV if prices are supplied
thisCV <- tryCatch(CV(object,...),error=function(e) FALSE)
if(!is.logical(thisCV)){
cat("\n\nCompensating Variation (CV):\n\n")
cat(format(thisCV,digits=digits),fill=TRUE, labels=object@labels[[2]])}
cat("\n\n")
if(parameters){
cat("\nDemand Parameter Estimates:\n\n")
print(format(object@slopes,digits=digits))
cat("\n\n")
if(.hasSlot(object,"intercepts")){
cat("\nIntercepts:\n\n")
print(format(object@intercepts,digits=digits))
cat("\n\n")
}
}
return(invisible(results))
})
#'@rdname summary-methods
#'@export
setMethod(
f= "summary",
signature= "Auction2ndLogit",
definition=function(object,levels=TRUE,revenue=FALSE,...){
callNextMethod(object,levels=levels,revenue=revenue,...)
}
)
|
e8b27c8cff267e781a21383414e3a6c0ed02441f
|
847fefdc9709126653a3de3387c8eb6fe7dfcc17
|
/Time Series in R/ARIMA Models in R/2.Fitting ARMA Model.R
|
b13f40cac75906e13dc6ef2262fcceda807e7a43
|
[] |
no_license
|
dylannguyen1/Exercises
|
4c9bb3383b6cbd2bfae591bbc4f118324b0aec14
|
5ae64fef6a5041f0c66725b6f45689896895ade9
|
refs/heads/master
| 2021-03-31T08:21:46.743234
| 2020-05-22T23:59:12
| 2020-05-22T23:59:12
| 248,091,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,108
|
r
|
2.Fitting ARMA Model.R
|
#
## AR(Autoregressive) and MA(Moving Average) model
#
x<- arima.sim(list(order = c(1,0,0),ar = -0.7),n = 200) # AR
y<- arima.sim(list(order = c(0,0,1),ar = -0.7),n = 200) # MA
par(mfrow=c(1,2))
plot(x, main ="AR(1)")
plot(y, main = "MA(1)")
library(astsa)
## Simulate AR(2) with mean 50
x<- arima.sim(list(order = c(2,0,0),ar = c(1.5,-0.75)),n = 200) +50
## Estimation for time series using ASTSA
x_fit <-sarima(x, p =2, d=0, q=0)
x_fit$ttable ## show t table
## Simulate MA(1) with mean 0
y<- arima.sim(list(order = c(0,0,1),ar = -0.7),n = 200)
y_fit <-sarima(y, p=0, d=0, q= 1)
y_fit$ttable
#
## Fitting an AR(1) model
#
# Generate 100 observations from the AR(1) model
x <- arima.sim(model = list(order = c(1, 0, 0), ar = .9), n = 100)
# Plot the generated data
plot(x)
# Plot the sample P/ACF pair
plot(acf2(x))
# Fit an AR(1) to the data and examine the t-table
x_fit <- sarima(x,p=1,d=0,q=0)
x_fit$ttable
#
## Fitting an AR(2) model
#
x <- arima.sim(model = list(order = c(2, 0, 0), ar = c(1.5, -.75)), n = 200)
plot(x)
# astsa is preloaded
# Plot x
plot(x)
# Plot the sample P/ACF of x
plot(acf2(x))
# Fit an AR(2) to the data and examine the t-table
x_fitted <- sarima(x,p=2,d=0,q=0)
x_fitted$ttable
#
## Fitting an MA(1) model
#
x <- arima.sim(model = list(order = c(0, 0, 1), ma = -.8), n = 100)
# Plot x
plot(x)
# Plot the sample P/ACF of x
plot(acf2(x))
# Fit an MA(1) to the data and examine the t-table
x_fit <- sarima(x,p=0,d=0,q=1)
x_fit$ttable
#
## ARMA model
#
x_arma <- arima.sim(list(order = c(1,0,1),
ar =0.9,
ma = -0.4),
n = 200)
plot(x_arma, main = "ARMA(1,1)")
x_arma_fit <- sarima(x_arma, p =1, d= 0, q=1)
x_arma_fit$ttable
#
##Fitting ARMA(2,1) model
#
# astsa is preloaded
# Plot x
plot(x)
# Plot the sample P/ACF of x
plot(acf2(x))
# Fit an ARMA(2,1) to the data and examine the t-table
x_arma_fit <- sarima(x,p =2,d=0,q=1)
dl_varve <- diff(log(varve))
acf(dl_varve)
pacf(dl_varve)
##
### Model choice
##
gnpgr <-diff(log(gnp))
x_ar<-sarima(gnpgr, p =1,d=0,q=0) ## fitting AR(1)
x_ar$AIC; x_ar$BIC
x_ma_2 <- sarima(gnpgr, p=0,d=0,q=2) ## fitting MA(2)
x_ma_2$AIC; x_ma_2$BIC
#
## Compare AIC and BIC between MA and ARMA
#
# Fit an MA(1) to dl_varve.
ma_1_fit <- sarima(dl_varve,p=0,d=0,q=1)
ma_1_fit$AIC; ma_1_fit$BIC
# Fit an MA(2) to dl_varve. Improvement?
ma_2_fit <- sarima(dl_varve,p=0,d=0,q=2)
ma_2_fit$AIC; ma_2_fit$BIC
# Fit an ARMA(1,1) to dl_varve. Improvement?
arma_fit<-sarima(dl_varve,p=1,d=0,q=1)
arma_fit$AIC; arma_fit$BIC
#
##Residual analysis to see MA or ARMA fit better
#
# Fit an MA(1) to dl_varve. Examine the residuals
sarima(dl_varve,0,0,1)
# Fit an ARMA(1,1) to dl_varve. Examine the residuals
sarima(dl_varve,1,0,1)
#
## ARMA get in, using ARMA in oil data
#
data(oil); attach(oil)
# Calculate approximate oil returns
oil_returns <- diff(log(oil))
# Plot oil_returns. Notice the outliers.
plot(oil_returns)
# Plot the P/ACF pair for oil_returns
plot(acf2(oil_returns))
# Assuming both P/ACF are tailing, fit a model to oil_returns
sarima(oil_returns,1,0,1)
|
d1f3754d51eafbc8fb070f5008d5a5239427b7f6
|
f6512598805a4b1e9885495fe76f5525f959392b
|
/Random_Forest/Randomforest.R
|
b0fed561df37a86b264c22bf5ba8ff2e1998aa4a
|
[] |
no_license
|
roshankr/DataScience
|
0ef1098438b157c78cd58ccf9cdd47703946f311
|
958366202db9c241bdaf4be81f47f2ed5d8362ae
|
refs/heads/master
| 2021-01-10T04:26:20.469942
| 2015-09-25T21:03:40
| 2015-09-25T21:03:40
| 43,172,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,358
|
r
|
Randomforest.R
|
library(randomForest)
library(randomForestSRC)
library(caret)
library(datasets)
#Implementation of Random Forest algorithm using bagging and
#random subspace methods. Another key concept is Out-of-bag error (OOB)
# look at the dataset
data(iris)
#assignment <-write.csv(iris,"iris.csv")
# visually look at the dataset
#qplot(Petal.Length,Petal.Width,colour=Species,data=iris)
#After loading the library, we will divide the population in two sets:
#Training and validation.
train <- createDataPartition(y=iris$Species,p=0.5,list=FALSE)
training <- iris[train,]
Validation <- iris[-train,]
#You can use the following code to generate a random forest model on the training dataset.
#modfit <- train(Species~.,method="rf",data=training)
modfit <- randomForest(training[,-5], training[,5], ntree=10000,prox=TRUE)
pred <- predict(modfit,training)
print(table(pred,training$Species))
#Having built such an accurate model, we will like to make sure that we are not over fitting the model on the training data. This is done by validating the same model on an independent data set. We use the following code to do the same :
pred.val<-predict(modfit,newdata=Validation)
print(table(pred.val,Validation$Species))
# predict a single val
#ValFinal = Validation[42,]
#pred.val<-predict(modfit,newdata=ValFinal)
#print(table(pred.val,ValFinal$Species))
|
f85f3a345068596a9b34475fd0f8063a24c936b1
|
fa782fbc6cef7e575fcd137a41eecbd6886812d1
|
/R/rowWeightedMeans.R
|
abdde52f56222d35ae7defbf793140dc470cfeea
|
[] |
no_license
|
kaushikg06/matrixStats
|
6f5ae3032cfeb30ade1d88d6b53e5effae9770bd
|
87d3a6eeefcd37571214af7f1700ca98be7b9703
|
refs/heads/master
| 2021-01-21T06:47:28.960648
| 2016-10-09T15:35:30
| 2016-10-09T15:35:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,122
|
r
|
rowWeightedMeans.R
|
###########################################################################/**
# @RdocFunction rowWeightedMeans
# @alias colWeightedMeans
#
# @title "Calculates the weighted means for each row (column) in a matrix"
#
# \description{
# @get "title".
# }
#
# \usage{
# @usage rowWeightedMeans
# @usage colWeightedMeans
# }
#
# \arguments{
# \item{x}{A @numeric NxK @matrix.}
# \item{w}{A @numeric @vector of length K (N).}
# \item{rows, cols}{A @vector indicating subset of rows (and/or columns)
# to operate over. If @NULL, no subsetting is done.}
# \item{na.rm}{If @TRUE, missing values are excluded from the calculation,
# otherwise not.}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @numeric @vector of length N (K).
# }
#
# \details{
# The implementations of these methods are optimized for both speed
# and memory.
# If no weights are given, the corresponding
# \code{rowMeans()}/\code{colMeans()} is used.
# }
#
# @examples "../incl/rowWeightedMeans.Rex"
#
# @author "HB"
#
# \seealso{
# See \code{rowMeans()} and \code{colMeans()} in @see "base::colSums"
# for non-weighted means.
# See also @see "stats::weighted.mean".
# }
#
# @keyword array
# @keyword iteration
# @keyword robust
# @keyword univar
#*/###########################################################################
rowWeightedMeans <- function(x, w=NULL, rows=NULL, cols=NULL, na.rm=FALSE, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'w':
hasWeights <- !is.null(w);
if (hasWeights) {
n <- ncol(x);
if (length(w) != n) {
stop("The length of argument 'w' is does not match the number of column in 'x': ", length(w), " != ", n);
}
if (!is.numeric(w)) {
stop("Argument 'w' is not numeric: ", mode(w));
}
if (any(!is.na(w) & w < 0)) {
stop("Argument 'w' has negative weights.");
}
}
# Apply subset on x
if (!is.null(rows) && !is.null(cols)) x <- x[rows,cols,drop=FALSE]
else if (!is.null(rows)) x <- x[rows,,drop=FALSE]
else if (!is.null(cols)) x <- x[,cols,drop=FALSE]
# Apply subset on w
if (!is.null(w) && !is.null(cols)) w <- w[cols]
if (hasWeights) {
# Allocate results
m <- nrow(x);
if (m == 0L)
return(double(0L));
# Drop entries with zero weight? ...but keep NAs
idxs <- which(is.na(w) | w != 0);
nw <- length(idxs);
if (nw == 0L) {
return(rep(NaN, times=m));
} else if (nw < n) {
w <- w[idxs];
x <- x[,idxs,drop=FALSE];
}
idxs <- NULL; # Not needed anymore
# Has missing values?
if (na.rm) {
# Really?
na.rm <- anyMissing(x);
}
if (na.rm) {
# Indices of missing values
nas <- which(is.na(x));
# Weight matrix
W <- matrix(w, nrow=nrow(x), ncol=ncol(x), byrow=TRUE);
w <- NULL; # Not needed anymore
W[nas] <- NA;
wS <- rowSums(W, na.rm=TRUE);
# Standarized weights summing to one w/out missing values
W[nas] <- 0;
W <- W / wS;
x[nas] <- 0;
nas <- NULL; # Not needed anymore
x <- W * x;
W <- NULL; # Not needed anymore
} else {
wS <- sum(w);
# Standardize weights summing to one.
w <- w / wS;
# Weighted values
## SLOW: for (rr in 1:m) x[rr,] <- w * x[rr,,drop=TRUE];
## FAST:
x <- t_tx_OP_y(x, w, OP="*", na.rm=FALSE)
w <- NULL; # Not needed anymore
}
# Here we know there are no missing value in the new 'x'
res <- rowSums(x, na.rm=FALSE);
} else {
res <- rowMeans(x, na.rm=na.rm);
}
res;
} # rowWeightedMeans()
colWeightedMeans <- function(x, w=NULL, rows=NULL, cols=NULL, na.rm=FALSE, ...) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'w':
hasWeights <- !is.null(w);
if (hasWeights) {
n <- nrow(x);
if (length(w) != n) {
stop("The length of argument 'w' is does not match the number of rows in 'x': ", length(w), " != ", n);
}
if (!is.numeric(w)) {
stop("Argument 'w' is not numeric: ", mode(w));
}
if (any(!is.na(w) & w < 0)) {
stop("Argument 'w' has negative weights.");
}
}
# Apply subset on x
if (!is.null(rows) && !is.null(cols)) x <- x[rows,cols,drop=FALSE]
else if (!is.null(rows)) x <- x[rows,,drop=FALSE]
else if (!is.null(cols)) x <- x[,cols,drop=FALSE]
# Apply subset on w
if (!is.null(w) && !is.null(rows)) w <- w[rows]
if (hasWeights) {
# Allocate results
m <- ncol(x);
if (m == 0L)
return(double(0L));
# Drop entries with zero weight? ...but keep NAs
idxs <- which(is.na(w) | w != 0);
nw <- length(idxs);
if (nw == 0L) {
return(rep(NaN, times=m));
} else if (nw < n) {
w <- w[idxs];
x <- x[idxs,,drop=FALSE];
}
idxs <- NULL; # Not needed anymore
# Has missing values?
if (na.rm) {
# Really?
na.rm <- anyMissing(x);
}
if (na.rm) {
# Indices of missing values
nas <- which(is.na(x));
# Weight matrix
W <- matrix(w, nrow=nrow(x), ncol=ncol(x), byrow=FALSE);
w <- NULL; # Not needed anymore
W[nas] <- NA;
wS <- colSums(W, na.rm=TRUE);
# Standarized weights summing to one w/out missing values
W[nas] <- 0;
for (cc in 1:m) {
W[,cc] <- W[,cc,drop=TRUE] / wS[cc];
}
x[nas] <- 0;
nas <- NULL; # Not needed anymore
x <- W * x;
W <- NULL; # Not needed anymore
} else {
wS <- sum(w);
# Standardize weights summing to one.
w <- w / wS;
# Weighted values
x <- w*x;
## SLIGHTLY SLOWER: x <- x_OP_y(x, w, OP="*");
w <- NULL; # Not needed anymore
}
# Here we know there are no missing value in the new 'x'
res <- colSums(x, na.rm=FALSE);
} else {
res <- colMeans(x, na.rm=na.rm);
}
res;
} # colWeightedMeans()
##############################################################################
# HISTORY:
# 2015-05-31 [DJ]
# o Supported subsetted computation.
# 2014-12-19 [HB]
# o CLEANUP: Made col- and rowWeightedMeans() plain R functions.
# 2013-11-29
# o BUG FIX: (col|row)WeightedMeans() with all zero weights gave an
# invalid result.
# 2013-11-23
# o MEMORY: Now (col|row)WeightedMeans() clean out allocated objects sooner.
# 2010-02-03
# o BUG FIX: (col|row)WeightedMeans(..., na.rm=TRUE) would incorrectly treat
# missing values as zeros. Thanks Pierre Neuvial for reporting this.
# 2008-02-01
# o Added special implementation for column version.
# o Added Rdoc comments.
# o Created.
##############################################################################
|
203af99b84232ca34c898c5dd63f99e934c53bae
|
cf9109f6e0a2c494bdc2f1f027ee2cd60cefc30f
|
/Spreading HAI data.R
|
03cf44d542ada8bfe6e8ac5707095f2ae8923a5b
|
[] |
no_license
|
ACC1029/MATH-571-HAI-Medicare
|
da717f9f2cae53038f87df44c620adcabd3c6172
|
eeb00d846af5c0501490f49615a1bb59787687c5
|
refs/heads/master
| 2021-04-28T01:38:14.108330
| 2018-04-29T04:49:06
| 2018-04-29T04:49:18
| 122,284,276
| 1
| 1
| null | 2018-04-28T20:26:47
| 2018-02-21T02:38:31
|
R
|
UTF-8
|
R
| false
| false
| 2,921
|
r
|
Spreading HAI data.R
|
#----------Testing example----------#
df <- data.frame(month=rep(1:3,2),
student=rep(c("Amy", "Bob"), each=3),
A=c(9, 7, 6, 8, 6, 9),
B=c(6, 7, 8, 5, 6, 7))
df %>%
gather(variable, value, -(month:student))
df %>%
gather(variable, value, -(month:student)) %>%
unite(temp, student, variable)
df %>%
gather(variable, value, -(month:student)) %>%
unite(temp, student, variable) %>%
spread(temp, value)
#-------------------------------------#
# These are the steps I took to spread the data out so that we'd get, per provider, a row for each
# HAI type, which means 6 rows per provider. There's probably a more efficient way of doing this.
# I only made new data frames at each step so that I'd be able to view them better. Only the last is
# what we need.
# Step 1: Make new identifier for provider-HAI type that we can use to get a row for each type per provider
hai_reduced %>%
filter(provider_id == 10005) %>%
unite(provider_hai, provider_id, Measure)
# Step 2: Gather
hai_reduced_gather <- hai_reduced %>%
filter(provider_id == 10005) %>%
unite(provider_hai, provider_id, Measure) %>%
select(provider_hai, Type, score, compared_to_national) %>%
gather(variable, value, -(provider_hai:Type))
# Step 3: Unite
hai_reduced_unite <- hai_reduced %>%
filter(provider_id == 10005) %>%
unite(provider_hai, provider_id, Measure) %>%
select(provider_hai, Type, score, compared_to_national) %>%
gather(variable, value, -(provider_hai:Type)) %>%
unite(temp, Type, variable)
# Step 4: Spread
hai_reduced_spread <- hai_reduced %>%
# filter(provider_id == 10005) %>%
unite(provider_hai, provider_id, Measure) %>%
select(provider_hai, Type, score, compared_to_national) %>%
gather(variable, value, -(provider_hai:Type)) %>%
unite(temp, Type, variable) %>%
spread(temp, value)
#Step 5: Break out provider-type again and drop provider-type column
hai_reduced_spread <- hai_reduced_spread %>%
mutate("provider_id"= as.integer(str_sub(provider_hai, 1, unlist(lapply(strsplit(provider_hai, ''),
function(provider_hai) which(provider_hai == '_')))[c(TRUE,FALSE)]-1)),
"Measure" = str_sub(provider_hai, unlist(lapply(strsplit(provider_hai, ''),
function(provider_hai) which(provider_hai == '_')))[c(TRUE,FALSE)]+1,
str_length(provider_hai)),
CI_LOWER_score = as.double(CI_LOWER_score),
CI_UPPER_score = as.double(CI_UPPER_score),
DOPC_DAYS_score = as.integer(DOPC_DAYS_score),
ELIGCASES_score = as.double(ELIGCASES_score),
NUMERATOR_score = as.integer(NUMERATOR_score),
SIR_score = as.double(SIR_score)) %>%
select(provider_id, Measure, everything(), -provider_hai) %>%
arrange(provider_id)
hai_reduced_spread
|
022413a4c90a500f9a4060a8814633fd2d24762d
|
e5d5a20e0986b6a23e9e2ab4ca1752a4e66a9f9c
|
/plot1.R
|
a331896139616b62c037fa322e93f08f13afbfd2
|
[] |
no_license
|
ppats15/ExData_Plotting1
|
33f70e7525b5717fd3eb6c6a61b71f42fc8aee8b
|
ac43c26bdeafa9fda0ca654179ec1bbe393b4576
|
refs/heads/master
| 2021-01-14T14:16:18.450351
| 2015-10-10T03:45:03
| 2015-10-10T03:45:03
| 43,987,441
| 0
| 0
| null | 2015-10-10T01:10:14
| 2015-10-10T01:10:13
| null |
UTF-8
|
R
| false
| false
| 1,042
|
r
|
plot1.R
|
##################################################################################################
# Project 1: Exploratory Data Analysis - Plotting (Plot1)
#
#
##################################################################################################
#### Load all necessary libraries
##################################################################################################
# Step 0: Necessary evil. Setup environment, download data files etc.
##################################################################################################
## Setup environment. Using local directory to make code portable different OS
setwd("./")
#### Source common functions
source("loadHPCData.R")
hpcData <- loadHPCData()
png("plot1.png", width = 480, height = 480, units = "px", bg = "transparent")
message("Global Active Power Histogram")
hist(hpcData$Global_active_power,
col="red",main="Global Active Power",
xlab = "Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
print("Plotting 1st Histogram")
|
fe72bb0dc3581ac4288a5ca9423fc06658f26fc4
|
944afac4093d8718922cfd87502484872e54d0c2
|
/predictVolume.R
|
2e2327e0450a56b7582fba5a6d092f808860fef8
|
[] |
no_license
|
cejecj/Predict-Sales-Volume
|
36f8e444990841440f321362107356e8aa7f6c23
|
c926fcae6eb39496f35dc0fee502395823caeb7f
|
refs/heads/master
| 2021-01-09T19:26:20.125222
| 2020-10-17T18:16:01
| 2020-10-17T18:16:01
| 242,430,197
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,424
|
r
|
predictVolume.R
|
# import libraries
library(caret)
library(ggplot2)
library(corrplot)
library(dplyr)
# load dataset
existingProducts <- read.csv('existingproductattributes2017.csv')
newProducts <- read.csv('newproductattributes2017.csv')
# explore data and convert data types
str(existingProducts)
str(newProducts)
existingProducts$Volume <- as.numeric(existingProducts$Volume)
newProducts$Volume <- as.numeric(newProducts$Volume)
ggplot(existingProducts, aes(x = ProductType, fill = ProductNum)) +
theme_bw() +
geom_bar() +
labs(x = 'Product type',
y = 'Number',
title = 'Products Reviewed')
# convert to dummy vars
existingProducts_new <- dummyVars(' ~ .', data = existingProducts)
readyData <- data.frame(predict(existingProducts_new, newdata = existingProducts))
newProducts_new <- dummyVars(' ~ .', data = newProducts)
readyData_new <- data.frame(predict(newProducts_new, newdata = newProducts))
# review new dataframe
str(readyData)
summary(readyData)
readyData$BestSellersRank <- NULL
# correlation matrix
corrplot(cor(readyData))
# feature engineering
corrplot(cor(readyData))
# regression model
set.seed(123)
trainSize <- round(nrow(readyData) * .7)
testSize <- nrow(readyData) - trainSize
trainingIndices <- sample(seq_len(nrow(readyData)), size = trainSize)
trainingSet <- readyData[trainingIndices,]
testingSet <- readyData[-trainingIndices,]
model <- lm(Volume ~ ., trainingSet)
summary(model)
# fit control
fitControl <- trainControl(method = 'repeatedcv', number = 10, repeats = 3)
# random forest model
rfGrid <- expand.grid(mtry = c(3))
system.time(rfFit <- train(Volume ~ ., data = trainingSet, method = 'rf', trControl = fitControl, tuneGrid = rfGrid))
rfPrediction <- predict(rfFit, newdata = testingSet)
rfFit
# gradient boosted
system.time(gbmFit <- train(Volume ~ ., data = trainingSet, method = 'gbm', trControl = fitControl, verbose = FALSE))
gbmPrediction <- predict(gbmFit, newdata = testingSet)
gbmFit
# support vector machines
system.time(svmFit <- train(Volume ~ ., data = trainingSet, method = 'svmLinear2',
trControl = fitControl, tuneLength = 10))
svmPrediction <- predict(svmFit, newdata = testingSet)
svmFit
# make final predictions
rfPredictionsNew <- predict(rfFit, newdata = readyData_new)
rfPredictionsNew
newProducts$Volume <- rfPredictionsNew
newProducts %>% View()
|
a1796ef034de8dd9baf92660a9640b7e043de82b
|
8c6689aab5e3312b92067b816108ad8e5fec986e
|
/R/EXP3/Function.R
|
2a22cf2341c7d746f194c5d263ec206a54183890
|
[] |
no_license
|
ProfessionalJK/ITPracticals
|
0eed690d0a0f4a8c2bcff8e7b9397af2500c48b6
|
127a585e69264267fb2d34b323189d6ca16a16dd
|
refs/heads/master
| 2022-12-11T04:41:27.771098
| 2020-08-26T13:58:52
| 2020-08-26T13:58:52
| 290,464,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
Function.R
|
area = function(l,w){
a=l*w
return(a)
}
print(area(3,5))
|
8cf2414191f43f33a93878a0a6e56fa3bb015b20
|
e8f0e923548e587ac05f267b1cf4e08f151438e9
|
/R/angular.r
|
3d22bf0626d16da1f800f86ca6c8b89cfcaafd34
|
[] |
no_license
|
minghao2016/aomisc
|
7059a9e00c6d4ed615255be6b7aac9e63dc64f5f
|
9b6b75ed23dec4042b1bb115aaae8d695593f477
|
refs/heads/master
| 2023-01-01T13:19:13.646065
| 2020-10-20T12:58:38
| 2020-10-20T12:58:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 109
|
r
|
angular.r
|
angularTransform <- function(percentage)
{
result <- asin(sqrt(percentage/100))*(180/pi)
return(result)
}
|
dce7e399bd545a37332534300b908ba840034242
|
f55a52069c7cda3a263f26a2fd5f47a6ab7830da
|
/tests/testthat/upload.twitter.com/1.1/media/metadata/create.json-e72d3c-POST.R
|
3676eb536ab41d4970b27bedc646205e8e5335a1
|
[
"Apache-2.0"
] |
permissive
|
jonkeane/phototweetr
|
ddb64daff03118b92a2f5fd825a0ec9529676ae8
|
9828470bc2c8ce633aa66ce47a6483eb5ed2913b
|
refs/heads/main
| 2023-01-13T18:10:50.560772
| 2023-01-07T17:30:22
| 2023-01-07T17:30:22
| 244,178,804
| 0
| 0
|
NOASSERTION
| 2021-06-13T16:45:03
| 2020-03-01T15:59:25
|
R
|
UTF-8
|
R
| false
| false
| 4,462
|
r
|
create.json-e72d3c-POST.R
|
structure(list(url = "https://upload.twitter.com/1.1/media/metadata/create.json",
status_code = 200L, headers = structure(list(`cache-control` = "no-cache, no-store, must-revalidate, pre-check=0, post-check=0",
`content-length` = "0", `content-security-policy` = "default-src 'self'; connect-src 'self'; font-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com data:; frame-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com; img-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com data:; media-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com; object-src 'none'; script-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com; style-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com; report-uri https://twitter.com/i/csp_report?a=OBZG6ZTJNRSWE2LSMQ%3D%3D%3D%3D%3D%3D&ro=false;",
`content-type` = "text/html;charset=utf-8", date = "Sun, 13 Jun 2021 15:55:11 GMT",
expires = "Tue, 31 Mar 1981 05:00:00 GMT", `last-modified` = "Sun, 13 Jun 2021 15:55:11 GMT",
pragma = "no-cache", server = "tsa_b", status = "200 OK",
`strict-transport-security` = "max-age=631138519", `timing-allow-origin` = "https://twitter.com, https://mobile.twitter.com",
vary = "Origin", `x-access-level` = "read-write", `x-connection-hash` = "335b724d638fd53bf7959ad0450860e3",
`x-frame-options` = "SAMEORIGIN", `x-rate-limit-limit` = "2000",
`x-rate-limit-remaining` = "1997", `x-rate-limit-reset` = "1623602338",
`x-response-time` = "17", `x-transaction` = "00ad205e0054dc86",
`x-tsa-request-body-time` = "1", `x-twitter-response-tags` = "BouncerCompliant",
`x-xss-protection` = "1; mode=block"), class = c("insensitive",
"list")), all_headers = list(list(status = 200L, version = "HTTP/2",
headers = structure(list(`cache-control` = "no-cache, no-store, must-revalidate, pre-check=0, post-check=0",
`content-length` = "0", `content-security-policy` = "default-src 'self'; connect-src 'self'; font-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com data:; frame-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com; img-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com data:; media-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com; object-src 'none'; script-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com; style-src 'self' https://*.twimg.com https://twitter.com https://ton.twitter.com; report-uri https://twitter.com/i/csp_report?a=OBZG6ZTJNRSWE2LSMQ%3D%3D%3D%3D%3D%3D&ro=false;",
`content-type` = "text/html;charset=utf-8", date = "Sun, 13 Jun 2021 15:55:11 GMT",
expires = "Tue, 31 Mar 1981 05:00:00 GMT", `last-modified` = "Sun, 13 Jun 2021 15:55:11 GMT",
pragma = "no-cache", server = "tsa_b", status = "200 OK",
`strict-transport-security` = "max-age=631138519",
`timing-allow-origin` = "https://twitter.com, https://mobile.twitter.com",
vary = "Origin", `x-access-level` = "read-write",
`x-connection-hash` = "335b724d638fd53bf7959ad0450860e3",
`x-frame-options` = "SAMEORIGIN", `x-rate-limit-limit` = "2000",
`x-rate-limit-remaining` = "1997", `x-rate-limit-reset` = "1623602338",
`x-response-time` = "17", `x-transaction` = "00ad205e0054dc86",
`x-tsa-request-body-time` = "1", `x-twitter-response-tags` = "BouncerCompliant",
`x-xss-protection` = "1; mode=block"), class = c("insensitive",
"list")))), cookies = structure(list(domain = c(".twitter.com",
"upload.twitter.com", ".twitter.com"), flag = c(TRUE, FALSE,
TRUE), path = c("/", "/", "/"), secure = c(TRUE, FALSE, TRUE
), expiration = structure(c(1686671483, Inf, 1686671483), class = c("POSIXct",
"POSIXt")), name = c("personalization_id", "lang", "guest_id"
), value = c("REDACTED", "REDACTED", "REDACTED")), row.names = c(NA,
-3L), class = "data.frame"), content = charToRaw(""), date = structure(1623599711, class = c("POSIXct",
"POSIXt"), tzone = "GMT"), times = c(redirect = 0, namelookup = 0.000044,
connect = 0.000046, pretransfer = 0.000164, starttransfer = 0.000184,
total = 0.057233)), class = "response")
|
7e4574c2150936625576c6bfde3f0be0580e699e
|
4adbd5b00d14eba6b814f0d52bddbec57d623fed
|
/scripts/figures/BW_coloc_spider.R
|
b6b4bb1695966c30e2abdc4ad7014ce08667ea30
|
[
"MIT"
] |
permissive
|
PerinatalLab/metaGWAS
|
809f6ebaec7a6321f3fc6b4ed11a8f732c71a34d
|
494ef021b0d17566389f6f3716d1e09f15e50383
|
refs/heads/master
| 2023-04-08T08:50:31.042826
| 2022-11-10T21:14:33
| 2022-11-10T21:14:33
| 267,803,075
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,548
|
r
|
BW_coloc_spider.R
|
library(scales)
library("dplyr")
library("knitr")
library("tidyr")
library(cowplot)
library(ggrepel)
library("data.table")
library('showtext')
library(tidyverse)
library(fmsb)
colorBlindBlack8= c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
font_add("arial", "arial.ttf", bold= 'arial_bold.ttf')
showtext_opts(dpi = 300)
showtext_auto(enable = TRUE)
flist= snakemake@input #list.files('/mnt/hdd/common/pol/metaGWAS/colocalization/GAraw/', 'pph_BW_', full.names=T)
funk= function(x){
d= fread(x)
d= filter(d, PP.H1.abf + PP.H2.abf + PP.H3.abf + PP.H4.abf + PP.H0.abf> 0)
fname= gsub('.txt', '', gsub('pph_', '', unlist(strsplit(x, '/'))[9]))
d= separate(d, locus, into= c('chrom', 'locus'), sep= '_')
d$sloc= d$PP.H4.abf + d$PP.H3.abf
d= select(d, PP.H4.abf, sloc, locus)
names(d)= c(fname, paste0(fname, '_sloc'), 'locus')
return(d)
}
d= lapply(flist, funk)
d= reduce(d, full_join, by = "locus")
d= arrange(d, BW_maternal_effect)
# Spider plot maternal
x= as.data.frame(matrix(d$BW_maternal_effect, ncol= nrow(d)))
x=rbind(x, as.data.frame(matrix(d$BW_maternal_effect_sloc, ncol= nrow(d))))
names(x)= d$locus
rownames(x)= c('BW maternal effect', 'BW maternal effect ')
x= rbind(rep(1,nrow(d)) , rep(0,nrow(d)) , x)
png(snakemake@output[[1]], width= 60, height= 60, res= 300, units= 'mm')
par(mar=c(0,0,0,0))
radarchart(x, axistype= 0,
#custom polygon
pcol= c(colorBlindBlack8[4], colorBlindBlack8[2]) , pfcol= c(alpha(colorBlindBlack8[4], 0.4), alpha(colorBlindBlack8[2], 0.4)) , plwd=1, pty= 32, plty= 1,
#custom the grid
cglcol="grey", cglty=1, axislabcol="#525252", caxislabels= seq(0, 1, 0.25), caxisoffset= 0.1, cglwd=0.8, calcex= 0.4,
#custom labels
vlcex= 0.43
)
dev.off()
# Spider plot fetal
x= as.data.frame(matrix(d$BW_fetal_effect, ncol= nrow(d)))
x=rbind(x, as.data.frame(matrix(d$BW_fetal_effect_sloc, ncol= nrow(d))))
names(x)= d$locus
rownames(x)= c('BW fetal effect', 'BW fetal effect ')
x= rbind(rep(1,nrow(d)) , rep(0,nrow(d)) , x)
png(snakemake@output[[2]], width= 60, height= 60, res= 300, units= 'mm')
par(mar=c(0,0,0,0))
radarchart(x, axistype= 0,
#custom polygon
pcol= c(colorBlindBlack8[4], colorBlindBlack8[2]) , pfcol= c(alpha(colorBlindBlack8[4], 0.4), alpha(colorBlindBlack8[2], 0.4)) , plwd=1, pty= 32, plty= 1,
#custom the grid
cglcol="grey", cglty=1, axislabcol="#525252", caxislabels= seq(0, 1, 0.25), caxisoffset= 0.1, cglwd=0.8, calcex= 0.4,
#custom labels
vlcex= 0.43
)
dev.off()
|
0b3eaa6fcd92f2a41b3ccd879cd28b99f56b7049
|
4eb66a194563cb6a6c9147f4de120e5cb13611be
|
/R/TrialLevelMA.R
|
8a38ebfdd31dbc51756be3679296b74ba238572f
|
[] |
no_license
|
cran/Surrogate
|
8971061189573d24cb402e553f99b24fb7ba8834
|
463b16b365810c637073a7b6e9f3948913007354
|
refs/heads/master
| 2023-07-13T20:56:33.106817
| 2023-06-22T05:20:02
| 2023-06-22T05:20:02
| 17,919,925
| 1
| 1
| null | 2023-08-22T08:14:40
| 2014-03-19T20:50:31
|
R
|
UTF-8
|
R
| false
| false
| 1,875
|
r
|
TrialLevelMA.R
|
TrialLevelMA <- function(Alpha.Vector, Beta.Vector, N.Vector, Weighted=TRUE,
Alpha=.05){
# stage 2
if (Weighted==FALSE){
Results.Stage.2 <- lm(Beta.Vector ~ Alpha.Vector)
}
if (Weighted==TRUE){
Results.Stage.2 <- lm(Beta.Vector ~ Alpha.Vector, weights=N.Vector)
}
# Trial R2
Trial.R2.value <- as.numeric(summary(Results.Stage.2)[c("r.squared")])
Trial.R2.sd <- sqrt((4*Trial.R2.value*(1-Trial.R2.value)^2)/(length(N.Vector)-3))
Trial.R2.lb <- max(0, Trial.R2.value + qnorm(Alpha/2) *(Trial.R2.sd))
Trial.R2.ub <- min(1, Trial.R2.value + qnorm(1-Alpha/2)*(Trial.R2.sd))
Trial.R2 <- data.frame(cbind(Trial.R2.value, Trial.R2.sd, Trial.R2.lb, Trial.R2.ub), stringsAsFactors = TRUE)
colnames(Trial.R2) <- c("R2 Trial", "Standard Error", "CI lower limit", "CI upper limit")
rownames(Trial.R2) <- c(" ")
# Trial R
Trial.R.value <- sqrt(as.numeric(summary(Results.Stage.2)[c("r.squared")]))
Z <- .5*log((1+Trial.R.value)/(1-Trial.R.value))
Trial.R.lb <- max(0, (exp(2*(Z-(qnorm(1-Alpha/2)*sqrt(1/(length(N.Vector)-3)))))-1)/(exp(2*(Z-(qnorm(1-Alpha/2)*sqrt(1/(length(N.Vector)-3)))))+1))
Trial.R.ub <- min(1, (exp(2*(Z+(qnorm(1-Alpha/2)*sqrt(1/(length(N.Vector)-3)))))-1)/(exp(2*(Z+(qnorm(1-Alpha/2)*sqrt(1/(length(N.Vector)-3)))))+1))
Trial.R.sd <- sqrt((1-Trial.R.value**2)/(length(N.Vector)-2))
Trial.R <- data.frame(cbind(Trial.R.value, Trial.R.sd , Trial.R.lb, Trial.R.ub), stringsAsFactors = TRUE)
colnames(Trial.R) <- c("R Trial", "Standard Error", "CI lower limit", "CI upper limit")
row.names(Trial.R) <- c(" ")
fit <-
list(Alpha.Vector=Alpha.Vector, Beta.Vector=Beta.Vector, N.Vector=N.Vector, Trial.R2=Trial.R2,
Trial.R=Trial.R,
Model.2.Fit=summary(Results.Stage.2), Call=match.call())
class(fit) <- "TrialLevelMA"
fit
}
|
34ce67310c163d75759b49451ad573aeb919589d
|
3eb676d9d01c996c0c65cbcb6276cfcedc08e203
|
/FourierMotzkin/FourierMotzkin.R
|
9db37f19f3a52f57bd5902256dee437a4860fbe8
|
[] |
no_license
|
tonytony99/Machine-Learning
|
639be7d8d37f4dbe12346ca1ee7eaa0f181e02c1
|
bf1cea0c5224232ef664c3b0bc35164fdf6a3fec
|
refs/heads/master
| 2021-01-01T18:24:45.764425
| 2017-12-22T21:27:11
| 2017-12-22T21:27:11
| 98,330,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,450
|
r
|
FourierMotzkin.R
|
# Tolerance for floating point comparison
epsilon = 1e-6
# Given a matrix, inequalities, of the form
# a1,1 a1,2 ... a1,n b1
# ...
# am,1 am,2 ... am,n bm
# And a list, solutions, of the form
# x1, x2, ... , xn
# Return TRUE if all of the inequalities below hold, FALSE otherwise
# a1,1 * solution[1] + a1,2 * solution[2] + ... a1,n * solution[n] <= b1
# ...
# am,1 * solution[1] + am,2 * solution[2] + ... am,n * solution[n] <= bm
# EXAMPLE
# inequalities = matrix(data = c(3,2,4,5,1,6), nrow = 2, byrow = TRUE)
# solution = c(2,1)
# holds(inequalities,solution) # FALSE
# solution = c(-8,1)
# holds(inequalities,solution) # TRUE
holds = function(inequalities,solution) {
m = nrow(inequalities)
n = ncol(inequalities) - 1
for (i in 1:m){
# If we find an unsatisfied inequality then return FALSE
if (sum(inequalities[i,1:n]*solution) > inequalities[i,n+1] + epsilon) {
return(FALSE)
}
}
# No inequality is unsatisfied so return true
return(TRUE)
}
# Given a matrix of the form
# a1 b1
# ...
# am bm
# Return a value x such that
# a1 x <= b1
# ...
# am x <= bm
# EXAMPLE
# inequalities = matrix(data = c(2,3,-4,-5,1,6), ncol = 2, byrow = TRUE)
# satisfy(inequalities) # 1.5
satisfy = function(inequalities) {
lMin = Inf
gMax = - Inf
negCoeff = posCoeff = 0
m = nrow(inequalities)
for (i in 1:m){
if(inequalities[i,1] < 0){
negCoeff = negCoeff + 1
gMax = max(gMax,inequalities[i,2]/inequalities[i,1])
}
if(inequalities[i,1] > 0){
posCoeff = posCoeff + 1
lMin = min(lMin,inequalities[i,2]/inequalities[i,1])
}
if(inequalities[i,1] == 0 && inequalities[i,2] < 0) {
return(NULL)
}
}
# If all coefficients are positive then return the lowest RHS
if (posCoeff > 0 && negCoeff == 0){
return(lMin)
}
# If all coefficients are negative then return the highest RHS
if (posCoeff == 0 && negCoeff > 0){
return(gMax)
}
# If there are a mix of coefficients then return a value x such that
# highest RHS for negative coeff <= x <= lowest RHS for positive coefficient
if (posCoeff > 0 && negCoeff > 0){
if(gMax <= lMin + epsilon){
return(lMin)
}
else {
# Highest RHS for negative coeff > lowest RHS for positive coefficient
# so there are no possible solutions
return(NULL)
}
}
}
# Given a matrix, inequalities, of the form
# a1,1 a1,2 ... a1,n b1
# ...
# am,1 am,2 ... am,n bm
# Return a list, of the form
# x1, x2, ... , xn
# Such that
# a1,1 * solution[1] + a1,2 * solution[2] + ... a1,n * solution[n] <= b1
# ...
# am,1 * solution[1] + am,2 * solution[2] + ... am,n * solution[n] <= bm
# EXAMPLE
# E = c(c(1,2,-4,6),c(-5,3,2,3),c(3,-2,4,8),c(5,-9,1,8),c(-4,-9,1,8))
# E = matrix(data = E, nrow = 4, ncol = 4, byrow=TRUE)
# solution = fourierMotzkin(E)
# hold(E,solution) # TRUE
fourierMotzkin = function(inequalities) {
Slist = list() # A list to hold systems of inequalities x <= ...
Tlist = list() # A list to hold systems of inequalities x >= ...
Elist = list() # A list to hold systems of inequalities a1 ... an <= b
Elist[[1]] = inequalities
print(inequalities)
V = ncol(inequalities) - 1
n = ncol(inequalities) - 1
# For variables x1 ... xn-1
for (v in 1:(n-1)) {
# counters
ge = le = numInequalities = 0
T = S = newInequalities = NULL
# For each inequality
for (i in 1:nrow(inequalities)){
# If the coefficient of xv is 0
# Then add the inequality directly to the next system of inequalities
if(inequalities[i,1] == 0) {
inequality = inequalities[i,2:ncol(inequalities)]
newInequalities = c(newInequalities, inequality)
numInequalities = numInequalities + 1
}
# If the coefficient of xv > 0
# Then add the inequality to S (rearranged in terms of x)
if(EMatrix[i,1] > 0) {
# only variable coefficients are subtracted
inequality = -inequalities[i,2:(ncol(inequalities)-1)]/inequalities[i]
inequality = c(inequality, inequalities[i,ncol(inequalities)]/inequalities[i])
S = c(S, inequality)
le = le + 1
}
# If the coefficient of xv < 0
# Then add the inequality to T (rearranged in terms of x)
if(inequalities[i,1] < 0) {
# Only variable coefficients are subtracted
inequality = -inequalities[i,2:(ncol(inequalities)-1)]/inequalities[i]
inequality = c(inequality, inequalities[i,ncol(inequalities)]/inequalities[i])
T = c(T, inequality)
ge = ge + 1
}
}
TMatrix = matrix(data = T, nrow = ge, ncol = V-v+1, byrow=TRUE)
SMatrix = matrix(data = S, nrow = le, ncol = V-v+1, byrow=TRUE)
Tlist[[v]] = TMatrix
Slist[[v]] = SMatrix
# For every combination of inequality from S and T
for (t in 1:nrow(TMatrix)){
for (s in 1:nrow(SMatrix)){
# Because T <= x <= S, it's the case that x <= S - T
inequality = TMatrix[t,1:(ncol(TMatrix)-1)]-SMatrix[s,1:(ncol(TMatrix)-1)]
inequality = c(inequality,SMatrix[s,ncol(SMatrix)]-TMatrix[t,ncol(TMatrix)])
newInequalities = c(newInequalities, inequality)
numInequalities = numInequalities + 1
}
}
newInequalities = matrix(data = newInequalities, nrow = numInequalities, ncol = V-v+1, byrow=TRUE)
inequalities = newInequalities
Elist[[v+1]] = inequalities
print(inequalities)
}
# By this point we have eliminated every variable except from xn
# Create a vector of length n to hold the value for x1 ... xn
solution = rep(0,V)
# Find a value for xn
x = satisfy(Elist[[V]])
solution[V] = x
if (is.null(x)) {return(NULL)}
# Now go back through previous systems of inequalities
# each time using all the variables that we have values for so far
# to reduce the system of inequalities to one solvable by the satisfy function
numVariables = 2
for (i in (V-1):1){
newLHS = Elist[[i]][,1]
newRHS = Elist[[i]][,V-i+2]
currentValue = 0
for (j in numVariables:1){
newRHS = newRHS - solution[V-currentValue] * Elist[[i]][,j]
currentValue = currentValue + 1
}
Elist[[i]] = matrix(c(newLHS, newRHS),ncol = 2)
x = satisfy(Elist[[i]])
solution[i] = x
if (is.null(x)) {return(NULL)}
numVariables = numVariables + 1
}
print("Solution : ")
print(solution)
return(solution)
}
|
96c8f80212f9ee0b0144582c2e59778db2ccf286
|
d81a933a93e226722463ca4de4ac03ffe584eb79
|
/LAB 5/LAB 5 1.R
|
189131c3de634c0c55522cc6461ed550dde5fe96
|
[] |
no_license
|
rilika/rilika_DSR
|
93e223494f46b623ef46d076a71e9919692bd1fe
|
b635e59c1de5619e01bd1ca5b8d514e113ea86f7
|
refs/heads/master
| 2023-02-01T02:26:51.790862
| 2020-12-10T12:23:33
| 2020-12-10T12:23:33
| 297,918,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 257
|
r
|
LAB 5 1.R
|
EMPID<-c(10,20,30,40,50,60,70,80,90,100)
SALARY<-c(22000,43000,26500,21000,34600,32000,65000,32200,30000,15000)
MARITAL_STATUS<-c('M','M','U','M','U','U','U','M','U','U')
df<-data.frame(EMPID,SALARY,MARITAL_STATUS)
df
boxplot(SALARY~MARITAL_STATUS,data=df)
|
d3c18101aa6e06b092e4e556cb797c4880b437db
|
ab79177ad95b0e89d70210a3478b91f98cdb6b30
|
/man/fmri_mem_dataset.Rd
|
6891822ff9e62455601038d03434f90be7826a81
|
[] |
no_license
|
bbuchsbaum/fmrireg
|
93e69866fe8afb655596aa23c6f9e3ca4004a81c
|
2dd004018b3b7997e70759fc1652c8d51e0398d7
|
refs/heads/master
| 2023-05-10T17:01:56.484913
| 2023-05-09T14:38:24
| 2023-05-09T14:38:24
| 18,412,463
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,811
|
rd
|
fmri_mem_dataset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fmri_dataset.R
\name{fmri_mem_dataset}
\alias{fmri_mem_dataset}
\title{Create an fMRI Memory Dataset Object}
\usage{
fmri_mem_dataset(
scans,
mask,
TR,
run_length = sapply(scans, function(x) dim(x)[4]),
event_table = data.frame(),
base_path = ".",
censor = NULL
)
}
\arguments{
\item{scans}{A list of objects of class \code{\linkS4class{NeuroVec}}.}
\item{mask}{A binary mask of class \code{\linkS4class{NeuroVol}} indicating the set of voxels to include in analyses.}
\item{TR}{Repetition time (TR) of the fMRI acquisition.}
\item{run_length}{A numeric vector specifying the length of each run in the dataset. Default is the length of the scans.}
\item{event_table}{An optional data frame containing event information. Default is an empty data frame.}
\item{base_path}{An optional base path for the dataset. Default is "." (current directory).}
\item{censor}{An optional numeric vector specifying which time points to censor. Default is NULL.}
}
\value{
An fMRI memory dataset object of class c("fmri_mem_dataset", "volumetric_dataset", "fmri_dataset", "list").
}
\description{
This function creates an fMRI memory dataset object, which is a list containing information about the scans, mask, TR, number of runs, event table, base path, sampling frame, and censor.
}
\examples{
# Create a NeuroVec object
d <- c(10, 10, 10, 10)
nvec <- neuroim2::NeuroVec(array(rnorm(prod(d)), d), space=neuroim2::NeuroSpace(d))
# Create a NeuroVol mask
mask <- neuroim2::NeuroVol(array(rnorm(10*10*10), d[1:3]), space=neuroim2::NeuroSpace(d[1:3]))
mask[mask < .5] <- 0
# Create an fmri_mem_dataset
dset <- fmri_mem_dataset(list(nvec), mask, TR=2)
# Create an iterator with 100 chunks
iter <- data_chunks(dset, nchunks=100)
}
|
60c854e9147dd383fb4ef2276de818301b3ac65f
|
2dda9ad40309327d13c53996a59a51802f14abe5
|
/ilg1/varscan/control_gene_region/kaks_analysis.R
|
dd9010c3e286dde1c2f36900f331a7b9b0b1ff4d
|
[] |
no_license
|
Kazuki526/gdc_il
|
42e2d043b86c2f9fd58ab6983dfb789f215afa03
|
61665b25da95d66b6e8ef4ddbd7072221bf91f61
|
refs/heads/master
| 2021-01-11T23:36:36.893751
| 2019-01-07T04:45:28
| 2019-01-07T04:45:28
| 78,609,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,255
|
r
|
kaks_analysis.R
|
loadNamespace('cowplot')
control_genes = read_tsv("/Volumes/areca42TB/GRCh38_singlefasta/control_genes.tsv") %>>%
filter(gene_symbol != "OR8U1") %>>% #GRCh37とGRCh38でゲノム上での向きが逆転しているから
mutate(focal="yes")
all_maf_for_cumulative_cont = read_tsv("all_patient/all_maf_for_cumulative_control.tsv.gz")
################################################################################################################
################################################################################################################
################################################################################################################
################################################################################################################
#可能性1:control geneにoncogeneが含まれていた。
#可能性2:essential geneに傷が入った状態でガンになる際passenger mutationがこれらの遺伝子で起こると
# ガンですら生存が危うくなるため発症が遅くなる。
#可能性2を確かめるためhuman - mouse でKaKsが低い遺伝子のみで発症年齢をみてみよう!
mean_kaks = 0.193
# human - mouse の平均KaKsは0.192551524673489
enstXucsc = read_tsv("/Volumes/areca42TB/primates/ensembl2ucsc_Xref.tsv.gz",comment = "#")
primate_kaks = read_tsv("/Volumes/areca42TB/primates/ucid_primatesKAKS.tsv.gz")
control_genes_kaks = left_join(control_genes %>>%dplyr::select(gene_symbol,Transcript_ID),
enstXucsc,by = c("gene_symbol" = "HGNC_symbol")) %>>%
filter(!is.na(UCSC_ID)) %>>%
separate(UCSC_ID,into = c("ucsc_id","ucsc_id_num"),sep = "\\.") %>>%
left_join(primate_kaks%>>%
separate(ucid, into = c("ucsc_id","ucid_num"), sep = "\\.") %>>%
dplyr::select(ucsc_id,ucid_num,Mouse)) %>>%
filter(!is.na(Mouse)) %>>%
separate(Mouse,into = c("kaks","cds_length"),sep = ":") %>>%
mutate(kaks = parse_double(kaks), cds_length = parse_double(cds_length)) %>>%
group_by(gene_symbol) %>>%
summarise(cds_length = max(cds_length),kaks =kaks[which.max(cds_length)]) %>>%ungroup() %>>%
filter(cds_length>100)
.plot = control_genes_kaks %>>%
ggplot()+
geom_histogram(aes(x=kaks),binwidth = 0.01)+
geom_vline(xintercept = mean_kaks,color="blue")+
geom_hline(yintercept = 0)+
theme_bw()
ggsave("age_plot/fig/further_research/control_gene_kaks.pdf",.plot,height = 5,width = 10)
# low_kaks_regression_table = all_maf_for_cumulative_cont %>>%
# left_join(control_genes_kaks %>>%dplyr::select(gene_symbol,kaks,cds_length)) %>>%
# filter(!is.na(kaks),kaks < mean_kaks,cds_length >100) %>>%
# make_regression_tabel_cont()
# high_kaks_regression_table = all_maf_for_cumulative_cont %>>%
# left_join(control_genes_kaks %>>%dplyr::select(gene_symbol,kaks,cds_length)) %>>%
# filter(!is.na(kaks),kaks > mean_kaks,cds_length >100) %>>%
# make_regression_tabelcont()
#
# .plot_low = low_kaks_regression_table %>>%
# regression_tbl_plot(.maf_max = 10,.bl_ln = NULL,.red_ln = NULL,.expand = 0.15)
# .plot_high = high_kaks_regression_table %>>%
# regression_tbl_plot(.maf_max = 10,.bl_ln = NULL,.red_ln = NULL,.expand = 0.15)
#
# .plot = cowplot::plot_grid(.plot_low +ggtitle("low kaks control gene (271 gene)")+
# theme(axis.text = element_text(size = 10),axis.title = element_text(size = 15)),
# .plot_high +ggtitle("high kaks control gene (354 gene)")+
# theme(axis.text = element_text(size = 10),axis.title = element_text(size = 15)),
# ncol = 1, labels = "auto",label_size = 20)
# .plot
# ggsave("age_plot/fig/further_research/control_hl_kaks_regression.pdf",height = 10,width = 10)
############################################################################################################
maf_focal=all_maf_for_cumulative_cont%>>%
filter(MAF<0.0005,mutype=="missense")
lm_reg_coef_cont=function(.gene_list){
.missense_count = maf_focal%>>%inner_join(.gene_list,by="gene_symbol")%>>%
group_by(patient_id) %>>%
summarise(missense_num=sum(MAC))%>>%
right_join(patient_list,by="patient_id")%>>%
mutate(missense_num=ifelse(is.na(missense_num),0,missense_num))
lm(age/365.25 ~ missense_num, data = .missense_count)$coefficients["missense_num"]
}
toplow_lm_r=function(.kaks_list,.gene_num=100){
.top=.kaks_list %>>%
mutate(rank=min_rank(kaks))%>>%
filter(rank<=.gene_num)%>>%
lm_reg_coef_cont()
.low=.kaks_list %>>%
mutate(rank=min_rank(desc(kaks)))%>>%
filter(rank<=.gene_num)%>>%
lm_reg_coef_cont()
tibble::tibble(top=as.numeric(.top),low=as.numeric(.low))
}
#100
observe100=toplow_lm_r(control_genes_kaks)
permute_lm100 = tibble::tibble(times=1:10000) %>>%
mutate(toplow = purrr::pmap(., function(times){
if(times %%1000 ==0){print(paste0("now ",times," times"))}
toplow_lm_r(control_genes_kaks%>>%
mutate(kaks=sample(kaks,length(kaks))))
}))%>>%unnest()
permute_lm100%>>%mutate(toplow=top/low)%>>%
filter(toplow > observe100$top/observe100$low)
#200
observe200=toplow_lm_r(control_genes_kaks,200)
permute_lm200 = tibble::tibble(times=1:10000) %>>%
mutate(toplow = purrr::pmap(., function(times){
if(times %%1000 ==0){print(paste0("now ",times," times"))}
toplow_lm_r(control_genes_kaks%>>%
mutate(kaks=sample(kaks,length(kaks))),200)
}))%>>%unnest()
permute_lm200%>>%mutate(toplow=top-low)%>>%
filter(toplow < observe200$top-observe200$low)
permute_lm200 %>>%ggplot()+
geom_histogram(aes(x=top/low))
###############################################################################################################
plot_lm_sliding_kaks_rank = function(.w_size){
gene_num=length(control_genes_kaks$gene_symbol)
.R=tibble(group=c(0:(( gene_num-.w_size+10)%/%10))) %>>%
mutate(min_rank=group*10 +1,
max_rank=ifelse(group*10 +.w_size>gene_num,gene_num,group*10 +.w_size)) %>>%
mutate(reg_coef = purrr::pmap_dbl(.,function(min_rank,max_rank,...){
lm_reg_coef_cont(control_genes_kaks%>>%
mutate(rank=row_number(kaks))%>>%
filter(rank>=min_rank,rank<=max_rank))
})) %>>%
mutate(group_=paste0(min_rank,"-",max_rank)) %>>%
ggplot()+
geom_point(aes(x=reorder(group_,group),y=reg_coef))+
theme(axis.text.x = element_blank(),axis.ticks.x = element_blank(),
axis.title.x = element_blank())
}
.plot = plot_lm_sliding_kaks_rank(200)+
labs(x= "Rank of Normal genes KaKs",y= "Regression Coeffisient")+
theme_bw()+
theme(axis.text.x = element_text(angle = 90))
ggsave("age_plot/fig/further_research/control_kaks_slidingwindow.pdf",width = 7,height = 4)
plot_lm_sliding_kaks = function(.w_size){
tibble(min_kaks=c(0:(1/.w_size))) %>>%
mutate(max_kaks = min_kaks +.w_size) %>>%
mutate(reg_coef=)
}
meanvar_by_patient=function(.num_tbl){
patient_list%>>%left_join(.num_tbl,by="patient_id")%>>%
mutate(n=ifelse(is.na(n),0,n))%>>%
{tibble(mean=mean(.$n),variance=var(.$n))}
}
maf_focal %>>%count(gene_symbol,patient_id)%>>%
nest(-gene_symbol)%>>%
mutate(data_ = purrr::map(data,~meanvar_by_patient(.))) %>>%
dplyr::select(-data)%>>%unnest() %>>%
mutate(value=mean/variance)%>>%
ggplot()+
geom_point(aes(x=mean,y=variance))
geom_histogram(aes(x=value))
left_join(control_genes_kaks)%>>%mutate(rank=min_rank(kaks))%>>%View()
gather(meanvar,value,-gene_symbol) %>>%
ggplot()+
geom_histogram(aes(x=value))+
facet_grid( meanvar ~ . )
.mutation=tibble(group=c(0:(( gene_num-.w_size+10)%/%10))) %>>%
mutate(min_rank=group*10 +1,
max_rank=ifelse(group*10 +.w_size>gene_num,gene_num,group*10 +.w_size)) %>>%
mutate(mut_num = purrr::pmap_dbl(.,function(min_rank,max_rank,...){
maf_focal %>>% inner_join(control_genes_kaks%>>%mutate(rank=min_rank(kaks))%>>%
filter(rank >= min_rank, rank <= max_rank))
})) %>>%
mutate(group_=paste0(min_rank,"-",max_rank)) %>>%
ggplot()+
geom_line(aes(x=reorder(group_,group),y=mut_num))+
theme(axis.text.x = element_text(angle = 90))
cowplot::plot_grid(.R,.mutation,.ncol=1,rel_heights = c(1,1.2))
|
cf6b57b0c2d7ca5bf27b5d15e3d9f45abae5ccdc
|
be80867918c3a03ef99220c91f6cabcb71ff9c5a
|
/man/run.whr.Rd
|
6de44a1b59b1153aa507baf1581a321778c82b2d
|
[] |
no_license
|
CLPS0900/CLPS0900R
|
fa0bfbb32876642828e719e9e63d771368ed92d6
|
395ea6453ff98e4b8121510a0703318fef557580
|
refs/heads/master
| 2020-04-17T14:59:09.432772
| 2020-03-09T23:04:51
| 2020-03-09T23:04:51
| 166,680,501
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 333
|
rd
|
run.whr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.whr.r
\name{run.whr}
\alias{run.whr}
\title{manipulate one-way anova and related methods using WHR_DATA}
\usage{
run.whr()
}
\value{
none
}
\description{
manipulate one-way anova and related methods using WHR_DATA
}
\examples{
\dontrun{
run.whr()
}
}
|
fe58a9b8cec503125e0b811de9621776ca39f2c6
|
8fd809b6ca419341e1e40a99acb319148469c7d7
|
/plot2.R
|
f0257ba2e9000ee5d2a258dd9608435c2f8384be
|
[] |
no_license
|
ivanlis/course04week04assignment
|
40c7d7eacf93e79382cd9e0d38ce94ff572a5764
|
189ffc3d149231e757f2f27583ef70b11a00ebe1
|
refs/heads/master
| 2020-03-16T12:09:33.531578
| 2018-05-09T22:05:24
| 2018-05-09T22:05:24
| 132,660,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 996
|
r
|
plot2.R
|
# Unpack files and read data
if (!file.exists("./data"))
dir.create("./data")
if (!file.exists("./data/summarySCC_PM25.rds") ||
!file.exists("./data/Source_Classification_Code.rds"))
unzip("exdata_data_NEI_data.zip", exdir = "./data")
nei <- readRDS("./data/summarySCC_PM25.rds")
#scc <- readRDS("./data/Source_Classification_Code.rds")
library(dplyr)
# First, only select the rows related to Baltimore,
# then, compute emission totals by year.
neiTotalBaltimore <- nei %>% filter(fips == "24510") %>%
group_by(year) %>% summarize(totalEmissions = sum(Emissions))
# Plot the computed summary.
png(file = "plot2.png", width = 512, height = 512)
plot(neiTotalBaltimore$year, neiTotalBaltimore$totalEmissions / 1e3,
type = "l", xlab = "year",
ylab = "total emissions, thousand tons",
xaxp = c(range(neiTotalBaltimore$year), 3),
yaxp = c(range(neiTotalBaltimore$totalEmissions / 1e3), 3))
title(main = "Total PM2.5 Emissions, Baltimore City")
dev.off()
|
bdc1fd98523fbee9ace3cc13e32d301a6372a24e
|
05915847084946cb6540392262fe5560692413aa
|
/man/sampledat.Rd
|
2a7013b1748b1cad13fd6d72ccd959225f60dc67
|
[] |
no_license
|
akeyel/flm_NE_WNV
|
eac0b003cefd89b1e16d2970869b9ac3efc13cd2
|
22954a0d298ac510f8be0bd7e46c4b6b3a6b86a5
|
refs/heads/master
| 2021-01-03T00:14:53.886054
| 2020-06-09T21:45:17
| 2020-06-09T21:45:17
| 239,830,178
| 0
| 0
| null | 2020-03-25T15:59:01
| 2020-02-11T18:06:11
|
R
|
UTF-8
|
R
| false
| true
| 726
|
rd
|
sampledat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flm.R
\docType{data}
\name{sampledat}
\alias{sampledat}
\title{sampledat}
\format{
An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 1674 rows and 5 columns.
}
\usage{
sampledat
}
\description{
simulated data on annual numbers of human cases of neuro-invasive and non-neuro-invasive
West Nile Virus in Nebraska counties. It is predictions of a model that was trained on
actual numbers of cases as recorded in CDC's Arbonet database. It excludes Arthur County,
because no cases have been recorded there to date, and we had to exclude it from our
modeling to get it to work.
}
\keyword{datasets}
|
091d4471ac0fd93d95b09ba43975c01f84075e7b
|
37a70a2a8c84f353d45cd678f059cbe5446d5346
|
/day4/hw0831.R
|
b12fda71b666abf076a9f957d42ff735b0019acb
|
[] |
no_license
|
jee0nnii/DataITGirlsR
|
a27f7ce1c3f90765366f120ff85cd7f2cee60e8c
|
cc6e7b3f2d30c690a41e4ca5a165d32de47d3c3f
|
refs/heads/master
| 2021-03-19T16:49:01.412022
| 2017-11-06T15:21:56
| 2017-11-06T15:21:56
| 109,706,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,376
|
r
|
hw0831.R
|
# 과제1 - 색깔이 있는 물고기 종류별 boxplot 그리기 (p19 참고)
# 과제2 - 이쁘게 산점도 그리기(correlation plot) (p22~23 참고)
# * Hint : Google Search (패키지를 이용하면 됩니다!)
setwd("C:/Users/joo/Documents/dataitgirls4")
library(readxl)
library(RColorBrewer)
list.files()
fishcatch <- read_xls("fishcatch_revise.xls",sheet = 2) #2번째 시트에 있는 데이터 불러옴
head(fishcatch)
fc <- subset(fishcatch, select =c(-obs,-sex)) #필요한 컬럼만 쓸 겅
head(fc)
attach(fc)
####이거아님####
# spe01 <- subset(fc, subset = (Species == '1'))
# spe02 <- subset(fc, subset = (Species == '2'))
# spe03 <- subset(fc, subset = (Species == '3'))
# spe04 <- subset(fc, subset = (Species == '4'))
# spe05 <- subset(fc, subset = (Species == '5'))
# spe06 <- subset(fc, subset = (Species == '6'))
# spe07 <- subset(fc, subset = (Species == '7'))
# wplt <- boxplot(fc$Weight~fc$Species,fc, col = brewer.pal(7,"Set1"))
# l1plt <- boxplot(fc$Length1~fc$Species,fc, col = brewer.pal(7,"Set2"))
# l2plt <- boxplot(fc$Length2~fc$Species,fc, col = brewer.pal(7,"Set3"))
# l3plt <- boxplot(fc$Length3~fc$Species,fc, col = brewer.pal(7,"Reds"))
# hplt <- boxplot(fc$Height~fc$Species,fc, col = brewer.pal(7,"Blues"))
# w2plt <- boxplot(fc$Width~fc$Species,fc, col = brewer.pal(7,"Greens"))
# install.packages('ggplot2')
# library(ggplot2)
# ggplot(fc)+geom_boxplot(aes(x=Species,y=Weight,group =1))
####no.1####
# par function 사용
# mfrow : number of multiple figures(use row-wise)
# 1 , 2 , 3
# 4 , 5 , 6
# mfcol : number of multiple figures(use column-wise)
# 1 , 4
# 2 , 5
# 3 , 6
par(mfrow = c(2,3), # make frame by 2 row, 3column
mar = c(4,3,3,1), #내부 마진
oma = c(0.5,0.5,3,0.5)) #외부 마진
#아래, 왼쪽, 위쪽, 오른쪽
#물고기 종류별 각 특징에 대한 boxplot
boxplot(Weight~Species,data = fc, col = brewer.pal(7,"Set1"), main = "Weight")
boxplot(Length1~Species,fc, col = brewer.pal(7,"Set2"), main = "Length1")
boxplot(Length2~Species,fc, col = brewer.pal(7,"Set3"), main = "Length2")
boxplot(Length3~Species,fc, col = brewer.pal(7,"Reds"), main = "Length3")
boxplot(Height~Species,fc, col = brewer.pal(7,"Blues"), main = "Height")
boxplot(Width~Species,fc, col = brewer.pal(7,"Greens"), main = "Width")
mtext("*Boxplot HW*", outer =TRUE, cex =1.5 , col ="black")
#par reset 하는 방법 1
par(mfrow=c(1,1),oma = c(0,0,0,0))
#par reset 하는 방법 2 : not working.....
op <- par(no.readonly = TRUE)
par(op)
colnames(fc)
detach(fc)
str(fc) # class 3가지 형식을 가짐 / as.data.frame으로 데이터프레임형식으로 바꿔주
fc <- as.data.frame(fc)
par(mfrow = c(2,3))
for (i in 2:7){
boxplot(fc[,i] ~ fc$Species, ylab = colnames(fc)[i]
,xlab="species",col = brewer.pal(7,"Set3"), main =paste0(colnames(fc)[i],"Box plot"))
}
####no.1 ggplot####
#발표자 풀이.1
dat <- read_xls("fishcatch_revise.xls",sheet = 2)
dat$Species <- as.factor(dat$Species)
#물고기 종류를 나누기 위해num을 factor로 바꿔줌
str(dat) #바뀜
library(ggplot2)
#install.packages("ggplot2")
plot1<-ggplot(dat, aes(x=Species, y=Weight, fill=Species))
+theme_bw() # 배경화면 흰색으로 변경
+geom_boxplot()
+ggtitle("Weight")
+theme(axis.title.x=element_blank()
,axis.title.y=element_blank()
,legend.position = "none"
, plot.title = element_text(hjust = 0.5))
plot2<-ggplot(dat, aes(x=Species, y=Length1, fill=Species))+theme_bw()+geom_boxplot()+ggtitle("Length1")+theme(axis.title.x=element_blank(),axis.title.y=element_blank(),legend.position = "none", plot.title = element_text(hjust = 0.5))
plot3<-ggplot(dat, aes(x=Species, y=Length2, fill=Species))+theme_bw()+geom_boxplot()+ggtitle("Length2")+theme(axis.title.x=element_blank(),axis.title.y=element_blank(),legend.position = "none", plot.title = element_text(hjust = 0.5))
plot4<-ggplot(dat, aes(x=Species, y=Length3, fill=Species))+theme_bw()+geom_boxplot()+ggtitle("Length3")+theme(axis.title.x=element_blank(),axis.title.y=element_blank(),legend.position = "none", plot.title = element_text(hjust = 0.5))
plot5<-ggplot(dat, aes(x=Species, y=Height, fill=Species))+theme_bw()+geom_boxplot()+ggtitle("Height")+theme(axis.title.x=element_blank(),axis.title.y=element_blank(),legend.position = "none", plot.title = element_text(hjust = 0.5))
plot6<-ggplot(dat, aes(x=Species, y=Width, fill=Species))+theme_bw()+geom_boxplot()+ggtitle("Width")+theme(axis.title.x=element_blank(),axis.title.y=element_blank(),legend.position = "none", plot.title = element_text(hjust = 0.5))
library(gridExtra)
install.packages("gridExtra")
grid.arrange(plot1,plot2,plot3,plot4,plot5,plot6,nrow=2)
####no.2####
# 상관분석(Correlation Analysis)은 두 변수간에 어떤 선형적 관계를 갖고 있는 지를 분석하는 방법이다.
# 두변수는 서로 독립적인 관계로부터 서로 상관된 관계일 수 있으며,
# 이때 두 변수간의 관계의 강도를 상관관계(Correlation, Correlation coefficient)라 한다.
?cor
install.packages("PerformanceAnalytics")
library(PerformanceAnalytics)
head(fc)
mulchart <- fc[,c(2,3,4,5,6,7)] # => fc[,c(2:7)]
mulchart #결측치가 있음 근데 아래를 실행해도 수행이 됨 뭐임??/....
chart.Correlation(mulchart,histogram = TRUE, pch=19)
mulchart2 <- na.omit(mulchart)
mulchart2
chart.Correlation(mulchart2,histogram = TRUE, pch=19)
#save plot
png(filename = "correlationmatrix.png") #make empty file
chart.Correlation(mulchart2,histogram = TRUE, pch=19) #run
dev.off() #check the previous file
# cor(mulchart)
####no.3####
# 결측치를 제외하고 상관분석 시행 : complete.obs
# http://rfriend.tistory.com/126
# http://rstudio-pubs-static.s3.amazonaws.com/27134_f8052fbae4fe4402824ebb9fe080d876.html
install.packages("corrplot")
library(corrplot)
?corrplot
fccor<-cor(mulchart,use = "complete.obs")
fccor
#colorRampPalette 색깔 부여해주는 함수
col4 <- colorRampPalette(c("#7F0000", "red", "#FF7F00", "yellow", "#7FFF7F",
"cyan", "#007FFF", "blue", "#00007F"))
corrplot(fccor,type="upper", order = "hclust", addrect = 2, col = col4(10))
#no2 에서 미리 결측치를 처리해서 use 옵션을 줄 필요가 없음
fccor2 <-cor(mulchart2)
corrplot(fccor2,type="upper",order="AOE",tl.col = "black", tl.srt = 45)
|
c3a7bf2e1ccb6b3423317236bdfbe6f21195ff64
|
cb4cd286ae1e5e78bce36934140254f5802d43ba
|
/Statistical_Analysis/func.R
|
6eb60c343682d06db15f87e8e0e0994bb1b8ea1f
|
[] |
no_license
|
HuangHam/DT-RLWM
|
066fe7644da932a5f332cf82c1ba5a5aea8b73df
|
44a4125bdd37fee83cf11ff70e452b72c437002f
|
refs/heads/master
| 2022-07-09T20:01:39.265523
| 2020-05-15T22:22:37
| 2020-05-15T22:22:37
| 264,278,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,313
|
r
|
func.R
|
#functions for DT-RLWM analysis
library(plotrix)
se <- function(v,...){
std.error(v,...)
}
nonNA <- function(v){length(na.omit(v))/length(v)}
sigTrans <- function(beta){(2/(1+exp(-beta)))-1}
shapeMean <- function(df, ...){
#group_vars = enquos(...)
df %>%
group_by_(...) %>%
summarise_all(mean,na.rm = T)
}
shapeBoth <- function(df, ...){
#group_vars = enquos(...)
df %>%
group_by_(...) %>%
summarise_all(list(m = mean, se = se, sd = sd), na.rm = T)
}
boxplt <- function(df, x_var, y_var,condition = NULL, title, xlabel, ylabel) {
ggplot(df, aes_(x=substitute(x_var), y=substitute(y_var), fill=substitute(condition))) +
#geom_point(aes(color=as.factor(subject)), alpha=0.2)+
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE)+
ggtitle(title) +
xlab(xlabel) +
ylab(ylabel)
}
barplt1 <- function(df, x_var, y_var,condition, title, xlabel, ylabel) {
ggplot(df, aes_(x=substitute(x_var), y=substitute(y_var), fill = substitute(condition))) +
geom_bar(stat="identity", color="black",
position=position_dodge()) +
ggtitle(title) +
xlab(xlabel) +
ylab(ylabel)
}
barplt <- function(df, x_var, y_var,condition, title, xlabel, ylabel) {
ggplot(df, aes_(x=substitute(x_var), y=substitute(y_var), fill = substitute(condition))) +
geom_bar(stat="identity", color="black",
position=position_dodge())+
labs(fill = "dt condition")+
scale_fill_manual("dt condition", values = c("0" = "blue", "B" = "yellow"))+
ggtitle(title) +
xlab(xlabel) +
ylab(ylabel)
}
lineplt <-function(df, x_var, y_var,condition, title, xlabel, ylabel) {
ggplot(df, aes_(x=substitute(x_var), y=substitute(y_var), color = substitute(condition), group=substitute(condition))) +
geom_line() +
geom_point()+
ggtitle(title) +
xlab(xlabel) +
ylab(ylabel)
}
violinplt <- function(df, x_var, y_var,condition, title, xlabel, ylabel) {
ggplot(df, aes_(x=substitute(x_var), y=substitute(y_var), fill = substitute(condition))) +
geom_violin(scale = "count", color="black",
position=position_dodge()) +
ggtitle(title) +
xlab(xlabel) +
ylab(ylabel)
}
blank2na <- function(x){
z <- gsub("\\s+", "", x) #make sure it's "" and not " " etc
x[z==""] <- NA
return(x)
}
Cort1 <- function(Data){
data = Data
attach(data)
for (i in 1:nrow(data)){
sub = subject[i]
b = block[i]
o = order[i]+1
idx = which(subject == sub&block == b&order == o)
if (length(idx) == 0){
data$Cor1[i] = NaN
}else{
data$Cor1[i] = data[idx,]$Cor
}
}
detach(data)
return(data)
}
ttest = function(coeffs, twoTail){
for (i in 1:ncol(coeffs)){
if (twoTail){
diff = t.test(coeffs[,i], mu=0, paired = F)
name = colnames(coeffs)[i]
p = diff$p.value
t = diff$statistic
df = diff$parameter
print(paste0('In exp3, two-tail pvalue of ', name,' is ', t,", ",df,", ", p))
if (p <= 0.05){print('It is sigificant')}
}else{
diff = t.test(coeffs[,i], mu=0, alternative = 'less', paired = F)
name = colnames(coeffs)[i]
print(paste0('In exp3, one-tail pvalue of ', name,' is ', diff$p.value))
if (p <= 0.05){print('It is sigificant')}
}
}
}
|
774738e40c17b2b35982824a19ca6d0e971156c3
|
81498a41a2c4c01b6005aa45847770a72cd2f84c
|
/R/ontTermAgg.R
|
79e08e1f62474cafaeac01b1c0a2048e8582a76e
|
[] |
no_license
|
KrishnaTO/OntologyTermAggregator
|
8ccee7e553dcef94e7782524a2190a3868761819
|
4c71ac664e720d3dae159a0d9958ab1370e3c95d
|
refs/heads/main
| 2023-01-10T19:00:24.360047
| 2020-11-14T03:29:47
| 2020-11-14T03:29:47
| 312,690,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,216
|
r
|
ontTermAgg.R
|
#' Get all results of term
#'
#' Higher order function to essentially run the same code from the vignette as a function
#'
#' @param searchTerm Enter the term you want the combined data for
#' @param yourAPIKey manually include you API key for BioOntology here
#' @param format Choose which format to get the results in; default is JSON, or XML
#'
#'
#' @export
ontTermAgg <- function(searchTerm, yourAPIKey, format = "json"){
# Retrieved function cat_lists: https://stackoverflow.com/a/57171812
cat_lists <- function(list1, list2) {
keys <- unique(c(names(list1), names(list2)))
map2(list1[keys], list2[keys], c) %>%
set_names(keys)
}
res.search <- postForm('http://data.bioontology.org/search',
q = searchTerm,
also_search_obsolete=FALSE ,
inlcude="all",
pagesize=1000,
apikey = yourAPIKey
)
res.json <- fromJSON(res.search)$collection %>%
filter(ontologyType == "ONTOLOGY") %>%
select(-obsolete, -ontologyType, -provisional, -`@context`, -cui, -semanticType)
res.json$links$`@context` <- NULL
if(searchTerm == "Neurofibroma" & file.exists("../data/Searchtoclasses_json.RData")){
load("../data/Searchtoclasses_json.RData")
} else{
rdfs <- lapply(res.json$links$self, getFullEntity)
}
combined_output <- reduce(rdfs, cat_lists)
combined_output.parents <- reduce(combined_output$parents, cat_lists)
combined_output$parents <- NULL
cols <- c("subClassOf", "definition", "synonym", "cui", "semanticType", "label")
combined_output[cols] <- lapply(combined_output[cols], listUnique)
combined_output.parents[cols] <- lapply(combined_output.parents[cols], listunique)
complex_cols <- c("properties", "links", "@context")
combined_output.complex <- combined_output[complex_cols]
combined_output[complex_cols] <- NULL
combined_output <- lapply(combined_output, unique)
combined_output$id <- unique(combined_output$id)
combined_output$prefLabel <- unique(combined_output$prefLabel)
lapply(combined_output, function(x) write.table(data.frame(x), "combined_output.csv", append= T, sep=',' ))
saveRDS(combined_output.complex, file = "combined_output.complex.RData")
}
|
1904bd2c7285e1924670b1a2a1ccae6ab2f12f37
|
f18e1210ca120c9116e356a8549e89e04219dc75
|
/tests/testthat/test_filters.R
|
41539b13f70d10051b306cba59c502eea9a33971
|
[
"BSD-2-Clause"
] |
permissive
|
EMSL-Computing/ftmsRanalysis
|
46c73a727d7c5d5a5320bf97a07e9dac72abd281
|
dd3bc3afbf6d1250d1f86e22b936dcc154f4101d
|
refs/heads/master
| 2023-07-21T18:13:26.355313
| 2023-02-09T17:03:09
| 2023-02-09T17:03:09
| 122,233,846
| 14
| 10
|
NOASSERTION
| 2023-07-11T16:34:15
| 2018-02-20T17:52:18
|
R
|
UTF-8
|
R
| false
| false
| 9,461
|
r
|
test_filters.R
|
## Basic functionality tests for peakData objects
library(ftmsRanalysis)
context("filtering on peakData objects")
test_that("mass filters work correctly on peakData", {
data("examplePeakData")
# test with a random mass column name and make sure it is unique
tmp_masscol = sample(LETTERS, 10)
tmp_masscol = make.unique(c(colnames(examplePeakData$e_meta), tmp_masscol))[ncol(examplePeakData$e_meta) + 1]
examplePeakData$e_meta[[tmp_masscol]] <- examplePeakData$e_meta[[getMassColName(examplePeakData)]]
attributes(examplePeakData)$cnames$mass_cname = tmp_masscol
filtData <- mass_filter(examplePeakData)
expect_true(inherits(filtData, "massFilt"))
expect_true(inherits(filtData, "data.frame"))
expect_true(ncol(filtData) == 2)
peakObj2 <- applyFilt(filtData, examplePeakData, min_mass = 200, max_mass = 900)
expect_true(inherits(peakObj2, "peakData"))
new.masses <- as.numeric(peakObj2$e_meta[, getMassColName(peakObj2)])
expect_true(all(new.masses >= 200))
expect_true(all(new.masses <= 900))
expect_true(nrow(examplePeakData$e_data) >= nrow(peakObj2$e_data))
expect_true(!is.null(attr(peakObj2, "filter")))
expect_true(!is.null(attr(peakObj2, "filter")$massFilt))
## TODO do we want to test more things about the attribute here?
# summary method
filtSumm <- summary(filtData)
expect_true(inherits(filtSumm, "summaryDefault"))
expect_true(is.numeric(filtSumm))
filtSumm2 <- summary(filtData, min_mass=200, max_mass=900)
expect_true(inherits(filtSumm2, "summaryDefault"))
expect_true(is.numeric(filtSumm2))
filtSumm3 <- summary(filtData, min_mass=200)
expect_true(inherits(filtSumm3, "summaryDefault"))
expect_true(is.numeric(filtSumm3))
filtSumm4 <- summary(filtData, max_mass=900)
expect_true(inherits(filtSumm4, "summaryDefault"))
expect_true(is.numeric(filtSumm4))
# test some things that should fail
expect_error(tmp <- applyFilt(filtData, peakObj2, min_mass = 500, max_mass = 600))
expect_error(tmp <- applyFilt(filtData, examplePeakData, min_mass = "hello", max_mass = 600))
})
test_that("molecule filters work correctly on peakData", {
data("examplePeakData")
filtData <- molecule_filter(examplePeakData)
expect_true(inherits(filtData, "moleculeFilt"))
expect_true(inherits(filtData, "data.frame"))
expect_true(ncol(filtData) == 2)
expect_true(all(c(getEDataColName(examplePeakData), "Num_Observations") %in% colnames(filtData)))
peakObj2 <- applyFilt(filtData, examplePeakData, min_num = 2)
expect_true(inherits(peakObj2, "peakData"))
retainedPeaks <- as.vector(dplyr::filter(filtData, Num_Observations >=2)[, getEDataColName(peakObj2)])
expect_true(all(retainedPeaks %in% peakObj2$e_data[, getEDataColName(peakObj2)]))
expect_true(nrow(examplePeakData$e_data) >= nrow(peakObj2$e_data))
expect_true(!is.null(attr(peakObj2, "filter")))
expect_true(!is.null(attr(peakObj2, "filter")$moleculeFilt))
## TODO do we want to test more things about the attribute here?
# summary method
filtSumm <- summary(filtData)
expect_true(inherits(filtSumm, "summaryDefault"))
filtSumm2 <- summary(filtData, min_num=2)
expect_true(inherits(filtSumm2, "summaryDefault"))
# test some things that should fail
expect_error(tmp <- applyFilt(filtData, examplePeakData, min_num=-1))
expect_error(tmp <- applyFilt(filtData, examplePeakData, min_num="hello"))
})
test_that("formula filters work correctly on peakData", {
data("examplePeakData")
filtData <- formula_filter(examplePeakData)
expect_true(inherits(filtData, "formulaFilt"))
expect_true(inherits(filtData, "data.frame"))
expect_true(ncol(filtData) == 2)
expect_true(all(c(getEDataColName(examplePeakData), "Formula_Assigned") %in% colnames(filtData)))
## Remove peaks WITHOUT formulas
peakObj2 <- applyFilt(filtData, examplePeakData, remove = 'NoFormula')
expect_true(inherits(peakObj2, "peakData"))
retainedPeaks <- as.vector(dplyr::filter(filtData, Formula_Assigned)[, getEDataColName(peakObj2)])
expect_true(all(retainedPeaks %in% peakObj2$e_data[, getEDataColName(peakObj2)]))
expect_true(nrow(examplePeakData$e_data) >= nrow(peakObj2$e_data))
expect_true(!is.null(attr(peakObj2, "filter")))
expect_true(!is.null(attr(peakObj2, "filter")$formulaFilt))
## TODO do we want to test more things about the attribute here?
# summary method
filtSumm <- summary(filtData)
expect_true(inherits(filtSumm, "summaryDefault"))
filtSumm2 <- summary(filtData, remove="NoFormula")
expect_true(inherits(filtSumm2, "summaryDefault"))
filtSumm3 <- summary(filtData, remove="Formula")
expect_true(inherits(filtSumm3, "summaryDefault"))
## Remove peaks WITH formulas
peakObj3 <- applyFilt(filtData, examplePeakData, remove = 'Formula')
expect_true(inherits(peakObj3, "peakData"))
retainedPeaks <- as.vector(dplyr::filter(filtData, !Formula_Assigned)[, getEDataColName(peakObj3)])
expect_true(all(retainedPeaks %in% peakObj3$e_data[, getEDataColName(peakObj3)]))
expect_true(nrow(examplePeakData$e_data) >= nrow(peakObj3$e_data))
expect_true(!is.null(attr(peakObj3, "filter")))
expect_true(!is.null(attr(peakObj3, "filter")$formulaFilt))
## TODO do we want to test more things about the attribute here?
# test some things that should fail
expect_error(tmp <- applyFilt(filtData, examplePeakData, remove="invalid"))
})
test_that("emeta filters work correctly on peakData", {
data("examplePeakData")
# select 3 random numeric columns and 1 categorical (fixed as MolForm) to test.
# I excluded C13 since currently it only has 1 level.
cols <- sapply(examplePeakData$e_meta[,which(!(colnames(examplePeakData$e_meta) %in% c(getEDataColName(examplePeakData), "C13")))], is.numeric) %>%
which() %>%
sample(3) %>%
names() %>% c("MolForm")
filterlist <- lapply(cols, function(colname){
emeta_filter(examplePeakData, colname)
})
expect_true(all(sapply(filterlist, inherits, what = c("emetaFilt", "data.frame"))))
expect_true(all(sapply(filterlist, ncol) == 2))
expect_true(all(sapply(1:4, function(i){
class(filterlist[[i]]$emeta_value) == class(examplePeakData$e_meta[,cols[i]])
})),
info = "type mismatch between peakData column and filter column")
### test numeric ###
# max/min for each of the three numeric filters
maxlist <- sapply(cols[1:3], function(col){
max(examplePeakData$e_meta[col], na.rm = TRUE)
})
minlist <- sapply(cols[1:3], function(col){
min(examplePeakData$e_meta[col], na.rm = TRUE)
})
for(i in 1:3){
filtered_obj <- applyFilt(filterlist[[i]], examplePeakData, minlist[[i]], maxlist[[i]])
# max/min values in the filter should not affect e_meta
expect_true(all(filtered_obj$e_meta == examplePeakData$e_meta, na.rm = TRUE))
# numeric range that is a subset of the range of the column
newmax = runif(1, median(examplePeakData$e_meta[,cols[i]], na.rm = TRUE), maxlist[[i]])
newmin = runif(1, minlist[[i]], median(examplePeakData$e_meta[,cols[i]], na.rm = TRUE))
filtered_obj <- applyFilt(filterlist[[i]], examplePeakData, newmin, newmax)
expect_equal(nrow(filtered_obj$e_meta),
nrow(filterlist[[i]] %>% dplyr::filter(emeta_value <= newmax, emeta_value >= newmin)))
filtSumm <- summary(filterlist[[i]])
expect_true(inherits(filtSumm, "summaryDefault"))
filtSumm2 <- summary(filterlist[[i]], min_val = newmin, max_val=newmax)
expect_true(inherits(filtSumm2, "summaryDefault"))
filtSumm3 <- summary(filterlist[[i]], min_val = newmin)
expect_true(inherits(filtSumm3, "summaryDefault"))
filtSumm4 <- summary(filterlist[[i]], max_val=newmax)
expect_true(inherits(filtSumm4, "summaryDefault"))
expect_true(!is.null(attr(filtered_obj, "filter")))
expect_true(!is.null(attr(filtered_obj, "filter")[[paste0("emetaFilt_", cols[i])]]))
# test some things that should fail
expect_error(tmp <- applyFilt(filterlist[[i]], examplePeakData, min_val = maxlist[[i]], max_val = minlist[[i]]))
expect_error(tmp <- applyFilt(filterlist[[i]], examplePeakData, min_val = minlist[[i]] - 100, max_val = minlist[[i]] - 0.1^10))
}
### test categorical ###
cats = filterlist[[4]]$emeta_value %>% unique() %>% setdiff(NA)
filtered_obj <- applyFilt(filterlist[[4]], examplePeakData, cats = cats, na.rm = FALSE)
# returns same object if all NON-NA levels specified and na.rm = FALSE
expect_true(all(filtered_obj$e_meta == examplePeakData$e_meta, na.rm = TRUE))
# subset of categories
cats <- sample(cats, ceiling(length(cats)/2))
filtered_obj <- applyFilt(filterlist[[4]], examplePeakData, cats = cats, na.rm = FALSE)
expect_equal(nrow(filtered_obj$e_meta),
nrow(filterlist[[4]] %>% dplyr::filter(emeta_value %in% c(cats, NA))))
filtSumm <- summary(filterlist[[4]])
expect_true(inherits(filtSumm, "summaryDefault"))
filtSumm2 <- summary(filterlist[[4]], cats=cats)
expect_true(inherits(filtSumm2, "summaryDefault"))
expect_true(!is.null(attr(filtered_obj, "filter")))
expect_true(!is.null(attr(filtered_obj, "filter")$emetaFilt_MolForm))
# test some things that should fail
expect_error(tmp <- applyFilt(filterlist[[4]], examplePeakData, cats = "_31234___RIDICULOUS-^^^--***#$#% CHAR ARG"))
})
|
d3bc0cefa1c1467f49f12e077766acb56b1841c2
|
e75a40843a8738b84bd529a549c45776d09e70d9
|
/samples/client/petstore/R-httr2/test_petstore.R
|
7aa5a16c639ae2c0564ea16aad5aaa49dd353ba4
|
[
"Apache-2.0"
] |
permissive
|
OpenAPITools/openapi-generator
|
3478dbf8e8319977269e2e84e0bf9960233146e3
|
8c2de11ac2f268836ac9bf0906b8bb6b4013c92d
|
refs/heads/master
| 2023-09-02T11:26:28.189499
| 2023-09-02T02:21:04
| 2023-09-02T02:21:04
| 133,134,007
| 17,729
| 6,577
|
Apache-2.0
| 2023-09-14T19:45:32
| 2018-05-12T09:57:56
|
Java
|
UTF-8
|
R
| false
| false
| 5,007
|
r
|
test_petstore.R
|
install.packages("petstore_1.0.0.tar.gz",repos=NULL, type="source")
library(petstore)
library(jsonlite)
t <- Tag$new()
t$id <- 123
#t$additional_properties <- c("abc" = 849)
print(t$toJSON())
print(t$toJSONString())
print("done tag")
t <- OneOfPrimitiveTypeTest$new()
#t$fromJSONString("[1,2,3]")
var_pet <- Pet$new("name_example", list("photoUrls_example"), 56, Category$new(56, "name_example"), list(Tag$new(56, "name_example")), "available") # Pet | Pet object that needs to be added to the store
print(var_pet)
###
####Add a new pet to the store
###api_instance <- PetApi$new()
#### Configure OAuth2 access token for authorization: petstore_auth
###api_instance$api_client$access_token <- 'TODO_YOUR_ACCESS_TOKEN';
###result <- tryCatch(
### # to save the result into a file, simply add the optional `data_file` parameter, e.g.
### # api_instance$AddPet(var_pet, data_file = "result.txt"),
### api_instance$add_pet(var_pet),
### ApiException = function(ex) ex
### )
###
###var_pet_id <- 56 # integer | ID of pet to return
###
###pet_response <- api_instance$get_pet_by_id(var_pet_id, data_file = "get_pet_by_id.json")
###response <- read_json("get_pet_by_id.json")
###dput(response)
###
#### test streaming
###api_instance$get_pet_by_id_streaming(var_pet_id, stream_callback = function(x) { print(x) })
##Find pet by ID (streaming)
#api_instance <- PetApi$new()
## Configure API key authorization: api_key
#api_instance$api_client$api_keys['api_key'] <- 'TODO_YOUR_API_KEY';
#result <- tryCatch(
# # to save the result into a file, simply add the optional `data_file` parameter, e.g.
# # api_instance$GetPetByIdStreaming(var_pet_id, data_file = "result.txt"),
# api_instance$GetPetByIdStreaming(var_pet_id, stream_callback = function(x) { print(x) }),
# ApiException = function(ex) ex
# )
# In case of error, print the error object
#if (!is.null(result$ApiException)) {
# cat(result$ApiException$toString())
#} #else {
# # deserialized response object
# response.object <- result$content
# # response headers
# response.headers <- result$response$headers
# # response status code
# response.status.code <- result$response$status_code
#}
##errorMsg <- "{\"code\":1,\"type\":\"error\",\"message\":\"Pet not found\"}"
###errorMsg <- '{"code": 404, "message": "Not found"}'
##a <- ModelApiResponse$new()$fromJSONString(errorMsg)
##dput(a)
##
##var_pet_id <- 1231256 # integer | ID of pet to return
##
###Find pet by ID
##api_instance <- PetApi$new()
### Configure API key authorization: api_key
##api_instance$api_client$api_keys['api_key'] <- 'TODO_YOUR_API_KEY';
##result <- tryCatch(
## api_instance$GetPetById(var_pet_id),
## ApiException = function(ex) ex
## )
### In case of error, print the error object
##if(!is.null(result$ApiException)) {
## cat(result$ApiException$toString())
##} else {
## # deserialized response object
## response.object <- result$content
## # response headers
## response.headers <- result$response$headers
## # response status code
## response.status.code <- result$response$status_code
##}
#
#json2 <-
#'{"name": "pet", "photoUrls" : ["http://a.com", "http://b.com"]}'
#
#jsonlite::minify(json2)
#
#pet_api <- PetApi$new()
#pet_id <- 123321
#pet <- Pet$new("name_test",
# photoUrls = list("photo_test", "second test"),
# category = Category$new(id = 450, name = "test_cat"),
# id = pet_id,
# tags = list(
# Tag$new(id = 123, name = "tag_test"), Tag$new(id = 456, name = "unknown")
# ),
# status = "available"
#)
#
##jsonlite::minify(pet$toJSONString())
##cat(pet$toJSONString())
#toString(pet$toString())
#
##json <-
##'[
## {"Name" : "Mario", "Age" : 32, "Occupation" : "Plumber"},
## {"Name" : "Peach", "Age" : 21, "Occupation" : "Princess"},
## {},
## {"Name" : "Bowser", "Occupation" : "Koopa"}
##]'
##
##
###Pet$public_methods
###Pet$public_methods$fromJSON(json)
###Pet$public_methods$toJson()
###Pet$public_methods$validateJSON(json2)
###Pet$public_methods$validateJson(json)
###Pet$my_static_method <- function(x) { x + 2}
###Pet$public_methods$my_static_method(1)
##
# basque_pig_json <-
# '{"className2": "BasquePig", "color": "red"}'
##
## danish_pig_json <-
## '{"className2": "DanishPig", "size": 7}'
##
## wrong_json <-
## '[
## {"Name" : "Tom", "Age" : 32, "Occupation" : "Consultant"},
## {},
## {"Name" : "Ada", "Occupation" : "Engineer"}
## ]'
##
## print("==========")
# pig <- Pig$new()
# basque_pig <- pig$fromJSON(basque_pig_json)
## #print(basque_pig$actual_instance$color)
## #expect_equal(basque_pig$actual_type, "BasquePig")
## pig$fromJSON(danish_pig_json)
## #pig$fromJSON(wrong_json)
## pig$toJSON()
##
## #d <- DanishPig$new()
## #dp <- d$validateJSON(danish_pig_json)
##
##
#
## test nested oneOf
#nested_oneof <- NestedOneOf$new()
#nested_oneof$nested_pig <- pig
#nested_oneof$size <- 15
#
#cat(nested_oneof$toJSONString())
#
|
fafb2497abcbe8b46c5f3bab92a8fe4e16286222
|
9bd311ddca0e60ee4007a806c19e5e764c2c8558
|
/man/circ.dens.Rd
|
eeac827b144d7e1e85ec0ec7b78bd05911095074
|
[] |
no_license
|
raim/segmenTools
|
8b34fc62c4e10d2ffa4423cc972367b9def2c689
|
7d356916b09a0cc019baf152de5dbf778130c0a1
|
refs/heads/master
| 2023-08-18T15:50:03.200348
| 2023-08-09T14:27:16
| 2023-08-09T14:27:16
| 77,826,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,961
|
rd
|
circ.dens.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/osciTools.R
\name{circ.dens}
\alias{circ.dens}
\title{circular density}
\usage{
circ.dens(
x,
bw = 18,
n = 512,
freq = FALSE,
units = "degrees",
high.mem = FALSE,
na.rm = TRUE,
...
)
}
\arguments{
\item{x}{phases in degrees and of class numeric}
\item{bw}{the smoothing bandwith parameter}
\item{n}{the number of equally spaced points at which density
will be estimated}
\item{freq}{if TRUE densities \code{y} will be scaled by the total
number of measurement \code{N} as \code{N * y/sum(y)}; the resulting
density function will integrate to \code{N}}
\item{units}{phase angle units, 'degrees' or 'radians'}
\item{high.mem}{use \code{\link[stats:density]{density}} to calculate
kernel densities, based on copying data from -360 to 0 and 360 to
720 degrees}
\item{na.rm}{remove NA values before calculation}
\item{...}{further arguments to
\code{\link[circular:density.circular]{density.circular}}
or \code{\link[stats:density]{density}}}
}
\description{
calculates kernel density estimates for circular data (`phase angles').
It is a convenience wrapper around the
\code{\link[circular:density.circular]{density.circular}}
function from package \code{circular} that (a) coerces results to numeric
types to avoid automatic handling of the S3 class circular data
(eg. polar plots), (b) allows to scale densities by the absolute number
(argument \code{freq}), and (c) can alternatively invoke
\code{\link[stats:density]{density}} from the \code{stats} package
(see option \code{high.mem}) for long phase vectors (`genome-wide')
due to high memory usage of
\code{\link[circular:density.circular]{density.circular}}.
NOTE: results are equivalent only for the scaled version freq=TRUE;
TODO: find out why densities differ by a factor of ~100 between the 2
}
\seealso{
\code{\link[circular:density.circular]{density.circular}},
\code{\link[stats:density]{density}}
}
|
ebae08ba82a30098f10a7fb931aabb5b28872651
|
afd66cd57fcd3e064d2c0a9c64c5d3f1bb5fbf3e
|
/MergeDEMs.R
|
dc4ab15310928cb731465ca9e04b96f3cb1a404f
|
[
"MIT"
] |
permissive
|
bo2we/R-geos
|
dca3cf612aa4066ee52ccf579408eae131b71888
|
747d462ada4567457cdfbaaa0febb8fbf81f4327
|
refs/heads/master
| 2022-08-31T14:17:50.169579
| 2020-05-29T08:08:33
| 2020-05-29T08:08:33
| 267,775,163
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,146
|
r
|
MergeDEMs.R
|
# This script is used to mosaic tif files in a folder
# Bowei Chen @ 2018, contact: rs.cbw@foxmail.com
if (!require("gdalUtils")) { install.packages("gdalUtils"); require("gdalUtils") }
if (!require("raster")) { install.packages("raster"); require("raster") }
# uses all tiffs in the current folder
f <- list.files(path = "TIFF_DEM", pattern = ".tif$", full.names = TRUE)
plot(raster(f[101]))
# build a virtual raster file (vrt)
gdalbuildvrt(gdalfile = f, output.vrt = "dem.vrt")
# returns the raster as Raster*Object
dem <- gdal_translate(src_dataset = "dem.vrt",
dst_dataset = "dem.tif",
output_Raster = TRUE,
options = c("BIGTIFF=YES", "COMPRESSION=LZW"))
# assign coordinates
utmsysz51n <- "+proj=utm +zone=51 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"
crs(dem) <- utmsysz51n
# check if the coordinates are correct
dem
# display in grey colour scale
plot(dem, col = grey.colors(255))
# display in colour scale
plot(dem, col = bpy.colors(255))
# write the geotiff
writeRaster(dem, filename='0720_DEM.tif', format="GTiff", overwrite=TRUE)
plot(raster("QJY_DEM.tif"))
|
df082fe512f7cbb05f0755d6c442b48d1724b52e
|
0129942d834860967aaa7cc2eb02a7d1d2190dad
|
/slides/forests/rsrc/fig-cart_forest_fimp_1.R
|
ea154ccf12616647aeb2a916f802b78231704487
|
[
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
11813147ak/lecture_i2ml
|
eac7bdd87e2d8987c3fdc2dc4b08624cb8db2fdc
|
01ed75190311484800f1cdf6a9551716fbfdc71c
|
refs/heads/master
| 2022-12-24T23:49:41.865456
| 2020-10-01T06:54:10
| 2020-10-01T06:54:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 989
|
r
|
fig-cart_forest_fimp_1.R
|
library(knitr)
library(mlbench)
library(mlr)
library(OpenML)
library(ggplot2)
library(viridis)
library(gridExtra)
library(ggrepel)
library(repr)
library(data.table)
library(BBmisc)
library(party)
library(rpart)
library(rpart.plot)
library(randomForest)
library(rattle)
library(smoof)
library(kableExtra)
library(kknn)
library(e1071)
library(rattle)
library(plyr)
library(kernlab)
options(digits = 3, width = 65, str = strOptions(strict.width = "cut", vec.len = 3))
scale_c_d <- scale_colour_discrete <- scale_color_discrete <-
function(...) {
viridis::scale_color_viridis(..., end = .9, discrete = TRUE, drop = TRUE)
}
set.seed(600000)
pdf("../figure/cart_forest_fimp_1.pdf", width = 8, height = 2.6)
library(tidyr)
library(kernlab)
model = randomForest(Species ~ ., data = iris, importance = TRUE)
randomForest::varImpPlot(model, main = "")
ggsave("../figure/cart_forest_fimp_1.pdf", width = 8, height = 2.6)
dev.off()
|
4b63e60dad8dda693b6c8ffef9abab957c5433d2
|
58691c7e1dd0c241de7ec2898ea66b5d2e5f5f4a
|
/R/mfd.R
|
5dbf1acbf1f83906b2399d30f4646cdd5f22f6f1
|
[] |
no_license
|
PSegaert/mrfDepth
|
b7fefd1bc3e897c9b095fac0e0f4c2cf9b3ad45a
|
a1118ddeef1997c72aedbc65dc83b48deda807e3
|
refs/heads/master
| 2021-01-18T22:23:13.435472
| 2018-10-12T08:55:47
| 2018-10-12T08:55:47
| 87,052,317
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,512
|
r
|
mfd.R
|
mfd <- function(x,
z = NULL,
type = "hdepth",
alpha = 0,
time = NULL,
diagnostic = FALSE,
depthOptions = NULL) {
######
# Check input.
if (missing(x)) {
stop("Input argument x is required.")
}
#Check x
if (!is.array(x)) {
stop("x must be a three dimensional array.")
}
if (length(dim(x)) != 3) {
stop("x must be a three dimensional array.")
}
if (sum(is.nan(x)) != 0) {
stop("x contains missing cases.")
}
t1 <- dim(x)[1]
n1 <- dim(x)[2]
p1 <- dim(x)[3]
#Check z
if (is.null(z)) {
z <- x
}
if (!is.array(z)) {
stop("z must be a three dimensional array.")
}
if (length(dim(z)) != 3) {
stop("z must be a three dimensional array.")
}
if (sum(is.nan(z)) != 0) {
stop("z contains missing cases.")
}
t2 <- dim(z)[1]
n2 <- dim(z)[2]
p2 <- dim(z)[3]
#Check dimension match between x and z
if (p1 != p2) {
stop("The p dimension of x and z must match.")
}
if (t1 != t2) {
stop("The t dimension of x and z must match.")
}
#Check type
Indtype <- match(type, c("hdepth", "projdepth",
"sprojdepth", "dprojdepth", "sdepth"))[1]
if (is.na(Indtype)) {
stop("type should be one of hdepth, projdepth , sprojdepth, dprojdepth or sdepth.")
}
if (Indtype == 5 && p1 > 2) {
stop("sdepth depth only implemented for p<=2.")
}
#Check alpha
if (!is.numeric(alpha)) {
stop("alpha must be numeric")
}
if (is.vector(alpha)) {
if (alpha < 0) {
stop("alpha should be part of [0,1]")
}
if (alpha > 1) {
stop("alpha should be part of [0,1]")
}
}
else if (is.matrix(alpha)) {
NRowAlpha <- dim(alpha)[1]
NColAlpha <- dim(alpha)[2]
if (NRowAlpha != 1 || NColAlpha != t1) {
stop("alpha must be a (1xt)-row matrix.")
}
}
else{
stop("alpha must be either a number or a (1xt)-row matrix.")
}
#Check time
if (is.null(time)) {
time <- 1:t1
}
if (!is.numeric(time) || !is.vector(time)) {
stop("time should be a numeric vector.")
}
if (length(time) != t1) {
stop("time should contain t elements")
}
if (length(time) != 1) {
dTime <- diff(c(time[1], time, time[t1]), lag = 2)
if (min(dTime) <= 0) {
stop("time should be strictly increasing.")
}
}
else{
dTime <- 1
}
#check diagnostic
if (!is.logical(diagnostic)) {
stop("diagnostic should be a logical")
}
#check depthOptions
if (is.null(depthOptions)) {
depthOptions <- list()
}
if (!is.list(depthOptions)) {
stop("depthOptions must be a list")
}
weights <- rep(1, t1)
depthsTimeX <- matrix(NA, nrow = n1, ncol = t1)
depthsTimeZ <- matrix(0.0, nrow = n2, ncol = t2)
locOutlX <- matrix(NA, nrow = n1, ncol = t1)
locOutlZ <- matrix(NA, nrow = n1, ncol = t1)
if (is.matrix(alpha)) {
weights <- alpha
}
warningFlagFit <- warningFlagBag <- warningFlagIso <- warningFlagAlpha <- 0
warningIndFit <- warningIndBag <- warningIndIso <- warningIndAlpha <- c()
Original <- options(warn = -1)
for (j in 1:t1) {
exactfit <- 0
#R has standard dimension dropping, we need to be carefull
if (p1 == 1) {
xTimePoint <- matrix(x[j,,1])
zTimePoint <- matrix(z[j,,1])
}
else{
xTimePoint <- x[j,,,drop = TRUE]
zTimePoint <- z[j,,,drop = TRUE]
}
#Find cross-sectional depth
if (type == "hdepth") {
temp <- hdepth(x = xTimePoint, z = zTimePoint, options = depthOptions)
if (!is.list(temp)) {
temp <- list()
}
if (!is.null(temp$depthZ)) {
depthsTimeX[,j] <- temp$depthX
depthsTimeZ[,j] <- temp$depthZ
}
else{
exactfit <- 1
}
#If requested find local halfspace outliers
if (diagnostic & p1 == 2 & exactfit == FALSE) {
temp <- compBagplot(x = xTimePoint, type = type)
if (sum(is.nan(temp$flag)) == 0) {
locOutlX[,j] <- temp$flag
}
else{
warningFlagBag <- 1
warningIndBag <- c(warningIndBag, j)
}
}
}
else if (type == "projdepth") {
temp <- projdepth(x = xTimePoint, z = zTimePoint, options = depthOptions)
if (!is.null(temp$depthZ)) {
depthsTimeX[,j] <- temp$depthX
depthsTimeZ[,j] <- temp$depthZ
locOutlX[,j] <- as.numeric(!temp$flagX)
locOutlZ[,j] <- as.numeric(!temp$flagZ)
}
else{
exactfit <- 1
}
}
else if (type == "sprojdepth") {
temp <- sprojdepth(x = xTimePoint, z = zTimePoint, options = depthOptions)
if (!is.null(temp$depthZ)) {
depthsTimeX[,j] <- temp$depthX
depthsTimeZ[,j] <- temp$depthZ
locOutlX[,j] <- as.numeric(!temp$flagX)
locOutlZ[,j] <- as.numeric(!temp$flagZ)
}
else{
exactfit <- 1
}
}
else if (type == "dprojdepth") {
temp <- dprojdepth(x = xTimePoint, z = zTimePoint, options = depthOptions)
if (!is.null(temp$depthZ)) {
depthsTimeX[,j] <- temp$depthX
depthsTimeZ[,j] <- temp$depthZ
locOutlX[,j] <- as.numeric(!temp$flagX)
locOutlZ[,j] <- as.numeric(!temp$flagZ)
}
else{
exactfit <- 1
}
}
else{
temp <- sdepth(x = xTimePoint, z = zTimePoint)
if (!is.null(temp$depth)) {
depthsTimeZ[,j] <- temp$depth
}
else{
exactfit <- 1
}
}
#Check if exact fit needs handling later on
if (exactfit) {
weights[j] <- 0
warningFlagFit <- 1
warningIndFit <- c(warningIndFit, j)
}
#Calculate the area of depth region at time T
#Do not calculate when alpha is row-matrix
if (!is.matrix(alpha)) {
#Only for non-constant weights, no point in calculating for exact fits
if (alpha != 0 && exactfit == 0) {
temp <- depthContour(x = xTimePoint, alpha, type = type)
Vert <- temp[[1]]$vertices
if (sum(is.nan(Vert)) == 0) {
if (nrow(Vert) != nrow(unique(Vert))) {
warningFlagIso <- 1
warningIndIso <- c(warningIndIso, j)
}
else{
if (p1 == 1) {
temp <- max(Vert) - min(Vert)
} else {
temp <- try(convhulln(matrix(Vert, ncol = p1), "FA")$vol,
silent = TRUE)
}
if (!is.numeric(temp)) {
warningFlagAlpha <- 1
warningIndAlpha <- c(warningIndAlpha, j)
}
else{
weights[j] <- temp
}
}
}
else{
weights[j] <- 0
warningFlagIso <- 1
warningIndIso <- c(warningIndIso, j)
}
}
}
}
options(warn = Original$warn)
weights <- weights * dTime
weights <- weights / sum(weights)
depthsX <- depthsTimeX %*% weights
depthsZ <- depthsTimeZ %*% weights
#Assemble the results
Result <- list(MFDdepthX = depthsX,
MFDdepthZ = depthsZ,
weights = weights)
if (diagnostic) {
Result$crossdepthX <- depthsTimeX
Result$crossdepthZ <- depthsTimeZ
Result$locOutlX <- locOutlX
Result$locOutlZ <- locOutlZ
}
Result$depthType <- type
class(Result) <- c("mrfDepth", "mfd")
#Handle all warnings
if (warningFlagFit == 1) {
warning(paste("Exact fits were detected for certain time points.",
"Their weights will be set to zero.",
"Check the returned results"),
call. = FALSE)
Result$IndFlagExactFit <- warningIndFit
}
if (warningFlagBag == 1) {
warning(paste("The bagplot could not be computed at all time points.",
"Their weights will be set to zero.",
"Check the returned results"),
call. = FALSE)
Result$IndFlagBag <- warningIndBag
}
if (warningFlagIso == 1) {
warning(paste("The isohdepth contours could not be computed at all",
"time points. Their weights will be set to zero.",
"Check the returned results"),
call. = FALSE)
Result$IndFlagIso <- warningIndIso
}
if (warningFlagAlpha == 1) {
warning(paste("The specified alpha is too large at all time points.",
"Their weights will be set to zero.",
"Check the returned results"),
call. = FALSE)
Result$IndFlagAlpha <- warningIndAlpha
}
return(Result)
}
|
18d59fb177af501a5b767864989f31b70acf6d17
|
ba29c87ff287c34a1946e2023a0107b6327c7e0b
|
/plot_man.r
|
2c2574c80aa37bc45853c682057cc11d41446449
|
[] |
no_license
|
apsteinmetz/rays
|
c167eb378dfec041c86db787424f394bc8162461
|
ee99803824eab2a1148379067110345c220e891a
|
refs/heads/master
| 2023-06-23T07:17:04.748249
| 2023-06-12T17:22:37
| 2023-06-12T17:22:37
| 168,065,794
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 880
|
r
|
plot_man.r
|
# plot manhattan
library(tidyverse)
library(rayshader)
skyline <- as.array(man_raster)
elev_matrix <- matrix(
raster::extract(
man_raster,
raster::extent(man_raster),
buffer = 1000,
fun = mean,
na.rm = TRUE
),
nrow = ncol(man_raster),
ncol = nrow(man_raster)
)
zscale = 30
#ambmat <- ambient_shade(elev_matrix, zscale = zscale,
# multicore = TRUE)
raymat <- ray_shade(elev_matrix, sunaltitude = 45,zscale = zscale,
lambert = TRUE,
multicore = TRUE)
# show one view
elev_matrix %>%
# add_overlay(array_img, alphalayer = 1.0) %>%
add_shadow(raymat, max_darken = 0.9) %>%
# add_shadow(ambmat, max_darken = 0.5) %>%
plot_map()
#Plot in 3D
rgl::clear3d()
hillshade_img %>%
add_shadow(raymat,0.3) %>%
# add_shadow(ambmat,0) %>%
plot_3d(elev_matrix,zscale=zscale,zoom = .5)
|
2c0965f961441cded264d2176df5d3291d56a81b
|
880a3d1e88a31a36cc76669ed4b7e70db5c02e54
|
/Required Packages.R
|
f4548459340ba2309f777686e615817e019c39ca
|
[] |
no_license
|
Suggestions-Only/Logistic-Regression--Grad-School-Admit
|
a22cae23769277d7ff8c475ff2bc759d3fde9279
|
89f5e50874d92a449aca6af801f1bb317377d596
|
refs/heads/master
| 2022-07-25T12:11:57.628440
| 2020-05-14T05:08:54
| 2020-05-14T05:08:54
| 263,820,279
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88
|
r
|
Required Packages.R
|
install.packages("mlbench")
install.packages("caret")
install.packages("ModelMetrics")
|
ad29fc1f955d6de5ee93edfc4a972e10f3c9ad9e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/morgenstemning/examples/morgenstemning.Rd.R
|
3cb9c312ca1c7b378917c9de0daf9622254e5fd0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 242
|
r
|
morgenstemning.Rd.R
|
library(morgenstemning)
### Name: morgenstemning
### Title: Create a colorblind-safe vector of 'n' contiguous colors.
### Aliases: morgenstemning
### ** Examples
require(graphics)
# A color wheel
pie(rep(1,12), col=morgenstemning(12))
|
2028ee18778ee46b5f95182a44721c83f8713dd6
|
ff05af7e6997878635a935fd4b7bbc89f2b4c971
|
/plot3.R
|
4def2b52f3919fbb1abf29b84e95ff9ff271b64c
|
[] |
no_license
|
LStepanek/ExData_Plotting1
|
6954c81454bbfa333bbb66b1ca9db17733b1abfb
|
a8489d65dccd05f0e83b30247741dda33a44bb7a
|
refs/heads/master
| 2020-05-25T14:18:18.447739
| 2014-05-12T01:10:39
| 2014-05-12T01:10:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,029
|
r
|
plot3.R
|
# a working directory is set and data are loaded into "data" dataframe
setwd("C:/Users/Lubomír Štěpánek/Documents/Data Science Specialization/Exploratory Data Analysis/Week 1")
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";")
str(data)
# new data called "data2" are extracted from the original set "data" and indicator variable of time of the two days (from 1 to length of data2) is added
data[which(data$Date=="1/2/2007"|data$Date=="2/2/2007"),]
data2<-data[which(data$Date=="1/2/2007"|data$Date=="2/2/2007"),]
data2<-cbind(data2,c(1:dim(data2)[1]))
# the plot is created
png("plot3.png",units="px",width=480,height=480)
plot(data2[,10],data2$Sub_metering_1,type="s",xaxt="n",xlab="",ylab="energy sub metering")
points(data2$Sub_metering_2~data2[,10],col="red",type="s")
points(data2$Sub_metering_3~data2[,10],col="blue",type="s")
axis(1,at=c(1,1440,2880),labels=c("Thu","Fri","Sat"))
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),fill=c("black","red","blue"))
dev.off()
|
4fee5fff845d06ea282e7d1bf587bdd5b5ab5a0e
|
c3f02c0f6a03e32bc752cbd2922b023ad01d6d9b
|
/iscience_functions.R
|
b1eefebb69c41b4df69b14b1e366bcc5746fed50
|
[] |
no_license
|
twkim-0510/SARS-CoV-2_viral_competition
|
231c58f02ec33fd5aea567a4bec91296d1ae8bcd
|
d027d54d1537b7ad3e0a47654992a426390091ef
|
refs/heads/main
| 2023-04-19T03:25:47.509470
| 2022-10-20T12:15:08
| 2022-10-20T12:15:08
| 552,268,403
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,421
|
r
|
iscience_functions.R
|
################################################################################
# Functions used for analysis in the research paper of Kim et al., (2022)
# Relative infectivity of the SARS-CoV-2 Omicron variant in human alveolar cells Biorxiv
################################################################################
############################################################################################################################################
## Figure 1E: average coverage of 10X and SMART-seq3
############################################################################################################################################
gene_draw <- function(site_short_vocs, voc, index){
test <- site_short_vocs %>% dplyr::filter(!is.na(.data[[voc]])) %>% dplyr::mutate(POS2 = POS + nchar(ALT) - nchar(REF))
color_values <- colorset_final
print(voc)
print(index)
ggplot() + geom_gene_arrow(gtf %>% dplyr::filter(type == "transcript") %>% dplyr::mutate(seqnames = voc),
arrowhead_height = unit(1, "cm"), arrowhead_width = unit(0.13, "cm"), arrow_body_height = unit(1,"cm"),
mapping = aes(xmin = start, xmax = end, y = seqnames)) +
geom_segment(data = test, aes(x = POS, xend = POS2, y = 0.5, yend = 1.5, color = "blue")) + scale_color_manual(values = color_values[index]) +
theme_void() + theme(legend.position = "none")
}
############################################################################################################################################
## Figure 1F-G: Cosine similarity between two methods
############################################################################################################################################
extract_value <- function(line, name){ line %>% dplyr::pull(name)}
cosine_sim_func <- function(data_frame){
nmf_colnames <- c("GR_ratio", "Alpha_ratio", "Delta_ratio", "Omicron_ratio")
avg_vaf_colnames <- c("gr_assume", "alpha_assume", "delta_assume", "omicron_assume")
nmf <- lapply(nmf_colnames, function(x,dat = data_frame){ extract_value(dat, x)}) %>% unlist()
avg_vaf <- lapply(avg_vaf_colnames, function(x,dat = data_frame){ extract_value(dat, x)}) %>% unlist()
na <- cosine(nmf, avg_vaf)
avg_vaf[is.na(avg_vaf)] <- 0
nazero <- cosine(nmf, avg_vaf)
return(data.frame(cell = data_frame[["cell"]], na2zero= nazero, na_original = na))
}
|
8b7243b6098f20c5e7ddd2512628ff320739b4e7
|
3212f464f29865080db1d74d37acc4d0e4968bc7
|
/Yuqiao Xu/PP_4A Yuqiao Xu.R
|
b39e8f859cac5c1defb169352296dc0f7c03128e
|
[] |
no_license
|
davinia1991/Yuqiao-Xu-Quant-Assignment
|
4f4e2ad9382586471ba38700c16864aca25caf00
|
3e4adb50ba0e1af262e9d55940b8d623ab8723ac
|
refs/heads/master
| 2020-03-29T17:06:00.727847
| 2018-11-12T22:04:47
| 2018-11-12T22:04:47
| 150,143,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 121
|
r
|
PP_4A Yuqiao Xu.R
|
library("lpSolveAPI")
x <- read.lp("PP_4A Yuqiao Xu.lp")
x
solve(x)
get.objective (x)
get.constraints(x)
get.variables(x)
|
ee7ba224f9cd725c5c75d89a9541145778a7fb76
|
ceed12ffef504f28fcee30ec31a9f00f922f39a6
|
/r/roundingAndChartLimitFunctions.R
|
e7904a26590b5557ba29fe74ac417f8d485404fd
|
[] |
no_license
|
andrw-jns/qipp
|
91beb9a53b8d9c62afc038bac5748f884cd4fcc3
|
a2e327046a9475368de1563ff266548f1ac72c19
|
refs/heads/master
| 2021-01-15T19:17:44.089413
| 2018-01-11T11:06:49
| 2018-01-11T11:06:49
| 99,814,416
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,534
|
r
|
roundingAndChartLimitFunctions.R
|
cat("roundingAndChartLimitFunctions.R")
cat("Rounding functions")
cat("roundTo(x, roundTo, roundingDirection = \"nearest\")
rounds a number up/down/to the nearest multiple of roundTo")
roundTo <- function(x, roundTo, roundingDirection = "nearest"){
## Takes a number, rounds it to the nearest specified number, in the relevant direction
ans <- round(as.double(x) / (roundTo)) * (roundTo)
if(!is.na(roundingDirection)){
if(tolower(roundingDirection) == "down"){
if(ans > x) {
ans <- ans - roundTo
}
} else if(tolower(roundingDirection) == "up") {
if(ans < x) {
ans <- ans + roundTo
}
}
}
return(ans)
}
cat("roundToChartLimits(x, roundTo, textBuffer = FALSE)
rounds the min and max of a given series for plotting purposes")
roundToChartLimits <- function(x, roundTo, textBuffer = FALSE){
ans <- c(roundTo(min(x, na.rm = T), roundTo, "down")
, roundTo(max(x, na.rm = T), roundTo, "up"))
if(textBuffer){# pad the limits
ans <- c(
ifelse(ans[1] < 0, ans[1] - roundTo, ans[1]) #only pad if min is less than zero.
, ans[2] + roundTo)
}
return(ans)
}
cat("\nFinancial Year functions for plotting")
cat("FYearIntToChar(x)
Returns character from integer financial year (e.g. 201415 becomes 2014/15)")
FYearIntToChar <- function(x){
# Returns character from integer financial year (e.g. 201415)
return(paste0(substring(x, 1, 4), "-", substring(x, 5, 6)))
}
cat("FYearDateToInt(x)
Returns integer financial year from date (e.g. 2015-01-01 becomes 201415)")
FYearDateToInt <- function(x){
#Returns integer financial year from Date
y <- year(x)
ans <-
ifelse(
month(x) < 4
, paste0(y - 1, substring(y, 3, 4))
, paste0(y, substring(y + 1, 3, 4))
) %>% as.integer
return(ans)
}
cat("FYearDateToChar(x)
Returns integer financial year from date (e.g. 2015-01-01 becomes 2014/15)")
FYearDateToChar <- function(x){
# Returns character financial year from Date
y <- year(x)
ans <-
ifelse(
month(x) < 4
, paste0(y - 1, "/", substring(y, 3, 4))
, paste0(y, "/", substring(y + 1, 3, 4))
)
return(ans)
}
cat("\nPlotting functions")
cat("chartBestTick(max, mostTicks = 8)
returns the best tick value for a series with a given max and maximum number of ticks")
chartBestTick <- function(max, mostTicks = 8){
minimum <- max / mostTicks
magnitude <- 10 ^ round(log(minimum, base = 10))
residual <- minimum / magnitude
if(residual > 5){tick = 10 * magnitude
} else if(residual > 2){tick = 5 * magnitude
} else if(residual > 1){tick = 2 * magnitude
} else{tick = magnitude}
return(tick)
}
cat("chartLimits(minimum, maximum, mostTicks = 8)
returns a sensible minimum and maximum for chart limits")
chartLimits <- function(minimum, maximum, mostTicks = 8){
range <- maximum - minimum
exponent <- round(log(range, base = 10))
magnitude <- 10 ^ exponent
tick <- chartBestTick(maximum, mostTicks)
adjMin <- roundTo(minimum, tick, "down")
adjMax <- roundTo(maximum, tick, "up")
chartLimits <- c(adjMin, adjMax)
names(chartLimits) <- c("Min", "Max")
return(chartLimits)
}
cat("chartBreaks(minimum, maximum, mostTicks = 8)
returns a sensible minimum, and maximum, and breaks for charts.")
chartBreaks <- function(minimum, maximum, mostTicks = 8){
limits <- chartLimits(minimum, maximum)
tick <- chartBestTick(maximum, mostTicks)
breaks <- seq(limits["Min"], limits["Max"], tick)
return(breaks)
}
|
1112022e58bda023c216543988450e96d83520cf
|
a13d6aa828fbb33b8d8d32ad250e99ae81d5a76c
|
/R/NWalgorithm.R
|
cd2fafd261a872f7cc70d2599bc196c798365120
|
[] |
no_license
|
ygu427/seqAlign
|
e5bacaa1d3c2ced556bbea05d0f4dc9b930671f5
|
3cdc671a4af39922b967118335da03932fbc9127
|
refs/heads/master
| 2021-01-10T01:16:26.506621
| 2015-12-16T23:50:48
| 2015-12-16T23:50:48
| 48,104,915
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,211
|
r
|
NWalgorithm.R
|
### NWalgorithm.R
###
### Implement the dynamic programming algorithm
### Needleman-Wunsch algorithm for global alignment
### Pairwise protein sequence global alignment
### User input: two GeneBank identifier for proteins
### or input sequences directly (check box)
### or read FASTA file (if no input for identifieer)
### gap-opening and gap-extension penalties
### score matrix
###
### Result: Global Optimal Paths (list for multiple paths)
### the best score
### the F Score Matrix
### Result would be written into a text file
###
### Required packages:
### "biomaRt" -- for sequence query
### "tcltk" -- for user input interface
### "seqinr" -- for read FASTA file
###
### Written by Yu Gu
### 11-24-2015
###############################################
## ##
## Global Alignment Algorithm ##
## ##
###############################################
NWalgorithm <- function(seq1,seq2,subMatrix,gapOpening = 8, gapExtension = 8){
###
#
# Conver the sequences to upper case.
# Doing this to make sure all chars in the string is upper case
#
###
toupper(seq1)
toupper(seq2)
###
#
# Create vectors and matrices to store temp and final results
# alignX & alignY store sequences as character vector
# fMatrix: stores the scores
# track: stores the postion from which getting to the best score
# index on fMatrix
# gapEx & gapEy: store number of gap extension
#
###
lenX <- nchar(seq1)
lenY <- nchar(seq2)
alignX <- vector()
alignY <- vector()
for (i in 1:lenX) {
alignX[i] <- substr(seq1,i,i)
}
for (j in 1:lenY) {
alignY[j] <- substr(seq2,j,j)
}
fMatrix <- matrix(0,lenY+1,lenX+1)
fMatrix[1,1:(lenX+1)]<-seq(from=0,by=-8,length.out=(lenX+1))
fMatrix[1:(lenY+1),1]<-seq(from=0,by=-8,length.out=(lenY+1))
track <- list()
gapEx <- matrix(0,lenY,lenX)
gapEy <- matrix(0,lenY,lenX)
###
#
# Initialization:
# initial coordinate (1,1), but for fMatrix, it's F(2,2)
# bestScore = 0 indicates starting here
#
###
bestScore <- 0
###
#
# Filling out the F Matrix
# Note the current cell is F(j+1,i+1)
# scoreI: F(j,i+1) -> F(j+1,i+1) i.e. gap in X (column)
# scoreJ: F(j+1,i) -> F(j+1,i+1) i.e. gap in Y (row)
# if F(j,i)<0, set F(j,i)=0
#
###
for (j in 1:lenY) {
for (i in 1:lenX){
# get the symbols from both sequences
X <- alignX[i]
Y <- alignY[j]
# score from diagonal
# not for the first row and fist column
s <- subMatrix[X,Y]
score <- fMatrix[j,i]+s
bestScore <- score
pair <- as.character(c(j,i))
# score from upper cell, which causes gap in Xi
# gapX stores the number of gap extension on X
# compared with current best score from diagonal:
# if better, replace the current one with the new score
# if same, keep both of the current one and the new one
# if worse, discard the new score
if (j==1) {
gapX <- 0
} else {
gapX <- gapEx[j-1,i]
}
if (gapX==0) {
scoreI <- fMatrix[j,i+1] - gapOpening
} else {
scoreI <- fMatrix[j,i+1] - gapX*gapExtension
}
if (scoreI > bestScore) {
bestScore <- scoreI
pair <- as.character(c(j,i+1))
gapEx[j,i] <- gapX+1
map <- matrix(pair,ncol=2,byrow=TRUE)
colnames(map)<-c("J","I")
gap<-"X"
map<-cbind(map,gap)
track[[(j-1)*lenX+i]]<-map
} else if (scoreI==bestScore) {
pair <-c(pair,c(j,i+1))
gapEx[j,i] <- gapX+1
map <- matrix(pair,ncol=2,byrow=TRUE)
colnames(map)<-c("J","I")
gap<-c(NA,"X")
map<-cbind(map,gap)
track[[(j-1)*lenX+i]]<-map
} else {
map <- matrix(pair,ncol=2,byrow=TRUE)
colnames(map)<-c("J","I")
gap<-NA
gapEx[j,i]<-0
map<-cbind(map,gap)
track[[(j-1)*lenX+i]]<-map
} # end of checking upper cell
# score from left cell, which causes gap in Yj
# gapY store the number of gap extension on Y
# compared with the current best score (diagonal, upper, or both)
# similar process as the one for upper cell
if (i==1) {
gapY <- 0
} else {
gapY <- gapEy[j,i-1]
}
if (gapY==0) {
scoreJ <- fMatrix[j+1,i] - gapOpening
} else {
scoreJ <- fMatrix[j+1,i] - gapY*gapExtension
}
if (scoreJ > bestScore) {
bestScore <- scoreJ
pair <- c(j+1,i)
gapEy[j,i] <- gapY+1
map <- matrix(pair,ncol=2,byrow=TRUE)
colnames(map)<-c("J","I")
gap<-"Y"
map<-cbind(map,gap)
track[[(j-1)*lenX+i]]<-map
} else if (scoreJ==bestScore) {
pair <-c(pair,c(j+1,i))
gapEy[j,i] <- gapY+1
map <- matrix(pair,ncol=2,byrow=TRUE)
colnames(map)<-c("J","I")
gap<-c(gap,"Y")
map<-cbind(map,gap)
track[[(j-1)*lenX+i]]<-map
} else {
map <- matrix(pair,ncol=2,byrow=TRUE)
colnames(map)<-c("J","I")
gap<-gap
gapEy[j,i]<-0
map<-cbind(map,gap)
track[[(j-1)*lenX+i]]<-map
} # end of checking left cell
fMatrix[j+1,i+1]<-bestScore
#track
} # end of i
} # end of j
###
#
# Track back
# done <logic value>: 0 indicates track back not done yet;
# 1 indicates track back done
# Starting track back from the lower-right cell
# retrieve gap info. from track list
# fill out the path matrix
#
###
fScore <- fMatrix[j+1,i+1]
#start.matrix <- which(fMatrix==fScore,arr.ind=TRUE)
#nStart <- nrow(start.matrix)
path.list <- list()
#for (init in 1:nStart) {
# Initial Status
done <- 0
x.index <- lenX+1
y.index <- lenY+1
path.index <- matrix(c(y.index,x.index),ncol=2)
count <- as.integer((y.index-1-1)*lenX + x.index-1)
# find the local optimal path
while(!done) {
trBack <- track[[count]][1,]
x.index <-as.integer(trBack[2])
y.index <-as.integer(trBack[1])
path.index <- rbind(path.index,c(y.index,x.index))
count <- (y.index-1-1)*lenX + x.index-1
if (x.index==1 & y.index!=1) {
while (y.index!=1){
path.index <- rbind(path.index,c(y.index-1,x.index))
y.index <- y.index-1
}
} else if (x.index!=1 & y.index==1){
while (x.index!=1){
path.index <- rbind(path.index,c(y.index,x.index-1))
x.index <- x.index-1
}
}
if (y.index==1 & x.index==1) done=1
} # end of while
l<-nrow(path.index)
for (k in 1:floor(l/2)) {
temp <- path.index[k,]
path.index[k,] <- path.index[(l-k+1),]
path.index[(l-k+1),] <- temp
}
path <- matrix(NA,ncol=2)
colnames(path)<-c("X.align","Y.align")
for (m in 1:l) {
symX.index <-path.index[m,2]
symY.index <-path.index[m,1]
if (symX.index==1 & symY.index==1) {
X <- '*'
Y <- '*'
path <- rbind(path,c(X,Y))
next
} else if(symX.index == 1) {
X <- '_'
Y <- alignY[symY.index-1]
path <- rbind(path,c(X,Y))
next
} else if(symY.index == 1) {
X <- alignX[symX.index-1]
Y <- '_'
path <- rbind(path,c(X,Y))
next
}
count <- (symY.index-1-1)*lenX + symX.index-1
gapD <- track[[count]][1,3]
if (is.na(gapD)) {
X <- alignX[symX.index-1]
Y <- alignY[symY.index-1]
path <- rbind(path,c(X,Y))
} else if (gapD == 'X') {
X <- '_'
Y <- alignY[symY.index-1]
path <- rbind(path,c(X,Y))
} else {
X <- alignX[symX.index-1]
Y <- '-'
path <- rbind(path,c(X,Y))
}
}
path <- path[-1,]
path <- rbind(path,c('*','*'))
# } # end of for
rownames(fMatrix) <- c(" ", alignY)
colnames(fMatrix) <- c(" ", alignX)
return (list(path = t(path), fMatrix = fMatrix))
}
|
a0653aef00a3610de55fcb63e52a81c967367d18
|
b3fcb957bb3e00a1a8f01564ce518769d60c73dc
|
/man/buildcorrsigned.Rd
|
783d997678dd45507a6cf100e8b85fe1f9a930f4
|
[] |
no_license
|
hyenaproject/vullioud2018
|
a5587ccf67fc39b0d166efd2131d4ba247025d36
|
f5e9cb4b42fa6b03568d9c7999c8168c7064e2b8
|
refs/heads/master
| 2021-08-14T15:44:19.467145
| 2021-08-02T11:15:36
| 2021-08-02T11:15:36
| 133,958,243
| 2
| 2
| null | 2021-08-02T11:15:37
| 2018-05-18T13:30:26
|
R
|
UTF-8
|
R
| false
| true
| 462
|
rd
|
buildcorrsigned.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{buildcorrsigned}
\alias{buildcorrsigned}
\title{Create the correlation matrix (used internally)}
\usage{
buildcorrsigned(id1, id2, type)
}
\arguments{
\item{id1}{focal id}
\item{id2}{other id}
\item{type}{type of interaction}
}
\value{
a list with pairsID, correlation Matrix, and decomposed matrices
}
\description{
Create the correlation matrix (used internally)
}
|
14eef433f785d8c6b8002c20693f6d1acc1d1c06
|
83c96349935fd5de4da4c447c036a5920937ba82
|
/packagecasnit/R/euclidean.R
|
f83b0849a71d696710f3a427538cfd28fec93fbc
|
[] |
no_license
|
nitinsverige/lthree
|
38cab2784d2039c32e7c7e5e3d5af38c2eb05874
|
517a241f20f28832f98ad6672d6b5c2308a7ab75
|
refs/heads/master
| 2020-03-28T20:43:46.473893
| 2018-09-17T09:09:07
| 2018-09-17T09:09:07
| 149,071,460
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 555
|
r
|
euclidean.R
|
#' Calculated GCD for 2 numbers
#'
#' @param x1 The first input
#' @param x2 The second input
#'
#' return
#' export
#'
#' @examples
#' euclidiean(45,135)
#' \dontrun{
#' euclidiean(45,45)
#' }
euclidiean <- function(x1,x2){
is.scalar <- function(femto) is.numeric(femto) && length(femto) == 1L
if(is.scalar(x1)==FALSE || is.scalar(x2)==FALSE){
stop("The input is not correct")}
if(x1>x2){
smallern<-x2
} else {
smallern<-x1
}
for(i in 1:smallern){
if((x1%%i==0)&&(x2%%i==0)){
euclid=i
}}
return(euclid)
}
|
98bf73db6b6e103782ca75cd16785e9dd94426d5
|
3be300600d37093644591b80814ced43af84d76f
|
/man/getUniprotProteinNames.Rd
|
5488e348fb728b4aacf3c3bd83424015acf51bea
|
[] |
no_license
|
pcm32/proteinSetReport
|
66550176706e98425109ed56cb6eb06039ec3505
|
2089f9110c6bea986607ed2f7b011cb380827c13
|
refs/heads/master
| 2020-12-14T07:32:14.248761
| 2015-09-23T12:54:48
| 2015-09-23T12:54:48
| 26,854,428
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 818
|
rd
|
getUniprotProteinNames.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/proteinSetReport.R
\name{getUniprotProteinNames}
\alias{getUniprotProteinNames}
\title{Get UniProt Protein Names}
\usage{
getUniprotProteinNames(ensemblIDs, species = "mmusculus_gene_ensembl")
}
\arguments{
\item{ensemblIDs}{The identifiers to search names for. These should belong to the
data set specifief by \code{species}}
\item{species}{The name of the ENSEMBL Biomart data set to use}
}
\value{
A data.table with rows \code{ensembl_gene_id} and \code{uniprot_genename}.
}
\description{
Retrieves the UniProt Gene name for the provided ENSEMBL identifiers. The ENSEMBL
Biomart data set is choosen through the species parameter. Multiple names are collapsed
into a comma separated list, so that ENSEMBL gene ids are unique.
}
|
7ee597d4babcf580411098a404a3831a3d5bc363
|
cf41e386480cf7bdf780fc6f4cba3ff99a7a7194
|
/Course-Project1/Plot4.R
|
3c4cdc75ea85e651cab7e16a256dc66998d63f1b
|
[] |
no_license
|
jonneyliu/Exploratory-Data-Analysis
|
1e32c9b26e6adabe87caa7895eae8223cc80f60f
|
2ee9b5717e140d93cd5b0c2e4d1360e9949510e7
|
refs/heads/master
| 2020-12-24T15:23:04.800023
| 2015-04-23T09:17:49
| 2015-04-23T09:17:49
| 33,600,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,291
|
r
|
Plot4.R
|
##Exploratory Dat Analysis Course Project 1
####Plot 4
#Read full data
dt_full <- read.csv("./exploratory analysis/household_power_consumption.txt",sep = ';',header = TRUE,na.strings="?")
#take subset with below conditions
dt <- dt_full[dt_full$Date=="1/2/2007"|dt_full$Date=="2/2/2007",]
#convert date format using ?strptime example in new column
dt$DateTime <- strptime(paste(dt$Date, dt$Time), "%d/%m/%Y %H:%M:%S")
##mfrow allows for 2 by 2 graphs, mar adjusts the graph margins
par(mfrow=c(2, 2),oma=c(0,0,0,0),mar=c(2,4,3,1))
#plot graphs
plot(dt$DateTime, dt$Global_active_power, type="l", ylab="Global active power",xlab="")
plot(dt$DateTime, dt$Voltage, type="l", ylab="Global active power",xlab="datetime")
plot(dt$DateTime,as.numeric(dt$Sub_metering_1),type ="l",ylab="Energy sub metering",xlab="")
lines(dt$DateTime,as.numeric(dt$Sub_metering_2),col="Red")
lines(dt$DateTime,as.numeric(dt$Sub_metering_3),col="Blue")
#add legend
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = c(1, 1, 1), col=c("black", "red", "blue"),cex=0.5)
plot(dt$DateTime, dt$Global_reactive_power, type="l", ylab="Global_reactiv_power",xlab="datetime")
dev.copy(png, file="./exploratory analysis/Course project1/plot4.png", height=480, width=480)
dev.off()
|
96a5d88712121d34b9be0a67c23665f54371f58d
|
d2f7fd6a08ae2bb6fe21f2b4681f85ea6378361d
|
/Use Likert Scale/app.R
|
32b9d0dc844d0d5bc56867cac456e5e32657c8dc
|
[] |
no_license
|
dorayin12/R-Shiny_Annual-Survey
|
46d3def1c4906cf529dac0170cc35fe209832cd8
|
44292798c53842a3882eeba21492d58d39df3461
|
refs/heads/master
| 2020-03-21T20:43:55.053932
| 2018-06-28T14:20:13
| 2018-06-28T14:20:13
| 139,024,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,661
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#author: Wenjuan Sang
############################################################################################################
#cat(file=stderr(), "Starting app", "\n")
library(shiny)
#setwd('/srv/shiny-server/uitssur/')
source('DataClean_likert.R')
# Define UI for application that draws a plot
ui <- fluidPage(
# Application title
titlePanel("UITS Survey"),
# Sidebar with selectors
sidebarLayout(
sidebarPanel(
radioButtons("qset",
"Question type:",
c("Cyber Security"="agree", "General Assessment"="help", "Technology Services"="satisfy"),
selected="agree"),
selectInput("dispara",
"Display Parameter",
choices = c("Mean Score"="mnscore",
"Satisfaction Rate (%)"="satrate",
"Highly Satisfied Rate (%)"="hisatrate",
"Usage Rate (%)"="userate",
"Number of Respondents"="nresp"), selected="mnscore"),
checkboxGroupInput("campus",
"Campus:",
c("IUB"="IUB", "IUK"="IUK", "IUPUC"="IUPUC", "IUPUI"="IUPUI", "IUS"="IUS"),
selected = c("IUB"="IUB", "IUK"="IUK", "IUPUC"="IUPUC", "IUPUI"="IUPUI", "IUS"="IUS")),
checkboxGroupInput("status",
"Status:",
c("Faculty"="Faculty", "Staff"="Staff", "Graduate"="Graduate",
"Undergraduate"="Undergraduate", "Student*"="Student"),
selected = c("Faculty"="Faculty", "Staff"="Staff", "Graduate"="Graduate",
"Undergraduate"="Undergraduate", "Student*"="Student")),
p("*Please select Student for IUK, IUPUC, and IUS cases."),
width = 2
),
# Show a plot
mainPanel(
tabsetPanel(
tabPanel("Yearly Data", plotOutput("distPlot1")),
tabPanel("Multi-Year Data",NULL)
),
width=10
)
)
)
#server
server <- function(input, output) {
#qset
scale <- reactive({
get(input$qset)
})
output$dat <- renderPrint({
scale()
})
observe({output$distPlot1 <- renderPlot({
testfun(scale(), input$qset, c(input$campus), c(input$status), input$dispara)},
width=1500,
if(input$qset=="satisfy"){height=2500}
else{height=600})
})
}
#run
shinyApp(ui = ui, server = server)
|
46725e364af739bb95afc562040fbe4f624aa82b
|
c9fd1e0b9810374d5a6747bdad47412b309102c6
|
/man/ToxicoSet.Rd
|
714d338104f99cb8806d99c9b3225c7eefecb933
|
[] |
no_license
|
bbyun28/ToxicoGx
|
ad6de798e180fb54a4aef508826c01dc84c3d448
|
22f857f0036eb0b6fc9bd6838bad324cc33065c4
|
refs/heads/master
| 2022-07-11T19:50:14.958048
| 2020-05-04T20:08:14
| 2020-05-04T20:08:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,455
|
rd
|
ToxicoSet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ToxicoSet.R
\name{ToxicoSet}
\alias{ToxicoSet}
\title{ToxicoSet constructor}
\usage{
ToxicoSet(
name,
molecularProfiles = list(),
cell = data.frame(),
drug = data.frame(),
sensitivityInfo = data.frame(),
sensitivityRaw = array(dim = c(0, 0, 0)),
sensitivityProfiles = matrix(),
sensitivityN = matrix(nrow = 0, ncol = 0),
perturbationN = array(NA, dim = c(0, 0, 0)),
curationDrug = data.frame(),
curationCell = data.frame(),
curationTissue = data.frame(),
datasetType = c("sensitivity", "perturbation", "both"),
verify = TRUE
)
}
\arguments{
\item{name}{A \code{character} string detailing the name of the dataset}
\item{molecularProfiles}{A \code{list} of ExpressionSet objects containing
molecular profiles}
\item{cell}{A \code{data.frame} containing the annotations for all the cell
lines profiled in the data set, across all data types}
\item{drug}{A \code{data.frame} containing the annotations for all the drugs
profiled in the data set, across all data types}
\item{sensitivityInfo}{A \code{data.frame} containing the information for the
sensitivity experiments}
\item{sensitivityRaw}{A 3 Dimensional \code{array} contaning the raw drug
dose – response data for the sensitivity experiments}
\item{sensitivityProfiles}{\code{data.frame} containing drug sensitivity profile
statistics such as IC50 and AUC}
\item{sensitivityN, perturbationN}{A \code{data.frame} summarizing the
available sensitivity/perturbation data}
\item{curationCell, curationDrug, curationTissue}{A \code{data.frame} mapping
the names for cells, drugs, and tissues used in the data set to universal
identifiers used between different ToxicoSet objects}
\item{datasetType}{A \code{character} string of "sensitivity",
"perturbation", or both detailing what type of data can be found in the
ToxicoSet, for proper processing of the data}
\item{verify}{\code{boolean} Should the function verify the ToxicoSet and
print out any errors it finds after construction?}
}
\value{
An object of class \code{ToxicoSet}
}
\description{
A constructor that simplifies the process of creating ToxicoSets, as well
as creates empty objects for data not provided to the constructor. Only
objects returned by this constructor are expected to work with the ToxicoSet
methods. For a much more detailed instruction on creating ToxicoSets, please
see the "CreatingToxicoSet" vignette.
}
|
8a99990bdea6d336c27a6e82f4082ca4a97cf12c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rcompanion/examples/wilcoxonPairedR.Rd.R
|
ab3341e8fd976d9ddd33d8a9759e531c6950072b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
r
|
wilcoxonPairedR.Rd.R
|
library(rcompanion)
### Name: wilcoxonPairedR
### Title: r effect size for Wilcoxon two-sample paired signed-rank test
### Aliases: wilcoxonPairedR
### ** Examples
data(Pooh)
wilcox.test(Likert ~ Time, data=Pooh, paired=TRUE, exact=FALSE)
wilcoxonPairedR(x = Pooh$Likert, g = Pooh$Time)
|
9db0386897ec998a55c5f6c275e0fbe3f49d7676
|
edf83ee07b5466ebfa5876be024d104e66e56973
|
/ui.R
|
63f7e93fe9aaa112ad28a4ce6ac148638742565f
|
[] |
no_license
|
jeeachoi/EBSeq_MultiPattern
|
dcd68e3c9efd845d1ec6dc7a7e569d5eafff44b9
|
92b97c3531ce10aff55a0d8b9b6fd24da08fc834
|
refs/heads/master
| 2020-07-29T14:22:20.633273
| 2016-12-16T18:57:01
| 2016-12-16T18:57:01
| 73,663,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,708
|
r
|
ui.R
|
library(shiny)
library(shinyFiles)
#library(gdata)
options(shiny.maxRequestSize=500*1024^2)
# Define UI for slider demo application
shinyUI(pageWithSidebar(
# Application title
headerPanel("All patterns for EBSeq multiple condition analysis"),
# Sidebar with sliders that demonstrate various available options
sidebarPanel(width=8,height=10,
# grouping vector
fileInput("ConditionVector", label = "Condition vector \n file name (support .csv, .txt, .tab)"),
column(4,
# output dir
tags$div(tags$b("Please select a folder for output :")),
shinyDirButton('Outdir', label ='Select Output Folder', title = 'Please select a folder'),
tags$br(),
tags$br(),
# export DE gene list with p-value
textInput("plot",
label = "Export file name - Plot of all possible patterns for given conditions",
value = "PatternPlot"),
# export DE gene list with p-value
textInput("txtfile",
label = "Export file name - Text file of all possible patterns for given conditions",
value = "PatternTxt")
),
br(),
actionButton("Submit","Submit for processing")
),
# Show a table summarizing the values entered
mainPanel(
h4(textOutput("print0")),
#tableOutput("values")
dataTableOutput("tab")
)
))
|
bb976a402df20b885e02c0eb583bccc73ca123df
|
53fea4d7928676ee05ca2d3242b0bef6c5ec5c09
|
/SimplePerceptron/R/Perceptron.R
|
73f8b4901d5ee8fcb7f02f25fac7ab228d778576
|
[] |
no_license
|
sm2k2010/SimplePerceptron
|
9dacad8196f0f552ef962919cad7a09653834c4a
|
77b27263ddefbdb247be4defaf6f5ec57f1a9bd9
|
refs/heads/master
| 2021-08-28T16:33:13.595820
| 2017-12-12T19:18:40
| 2017-12-12T19:18:40
| 114,029,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,289
|
r
|
Perceptron.R
|
library("devtools")
library(roxygen2)
setwd("parent_directory")
create("SimplePerceptron")
# The function accepts a dataframe from the user
# It also accepts the learning rate - learn, and number of iterations needed - iterations
simple_perceptron <- function(x, y, learn, iterations)
{
# initialize weight vector
weight_vector <- rep(0, dim(x)[2] + 1)
err <- rep(0, iterations)
for (i in 1:iterations)
{
# looping through training data set
for (j in 1:length(y))
{
z <- sum(weight_vector[2:length(weight_vector)] *
as.numeric(x[j, ])) + weight_vector[1]
if(z < 0)
{
pred_y <- -1
}
else
{
pred_y <- 1
}
# Change weight - the formula doesn't do anything
# if the predicted value is correct
weightdiff <- learn * (y[j] - pred_y) * c(1, as.numeric(x[j, ]))
weight_vector <- weight_vector + weightdiff
# Updating error function
if ((y[j] - pred_y) != 0.0)
{
err[i] <- err[i] + 1
}
}
}
print(weight_vector)
plot(1:10, err, type="l", lwd=2, col="blue", xlab="epoch #", ylab="errors")
title("Errors vs epoch - learning rate eta = 1")
return(err)
}
|
f3630866dcedf968187b134741a3a93a355b381f
|
e82c2b367e34c7e3e9877b24213ed716910dedb4
|
/string_functions.R
|
ce754bffc918dfc353978f189f174c99363510a6
|
[] |
no_license
|
pcsinu/Acadgild-Assignment-4.3
|
d2b63801df314e8a05d0b224695347912c93cd95
|
277f672a5719396d96c2cdad6248cf25e61e1a74
|
refs/heads/master
| 2020-05-28T04:12:49.247026
| 2019-05-27T16:44:51
| 2019-05-27T16:44:51
| 188,875,987
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 514
|
r
|
string_functions.R
|
library(stringr)
states=rownames(USArrests)
print(states)
small_w <- 'w'
big_w <- 'W'
nameswith_w <- states[str_detect(states , small_w )]
nameswith_W <- states[str_detect(states , big_w )]
#Name with w
print(nameswith_w)
#Name with W
print(nameswith_W)
#Number of charactors
charactor_numbers <- nchar(states)
print(charactor_numbers)
png(file = "citynames.png")
# Create the histogram.
hist(charactor_numbers,xlab = "Number of Charectors",col = "yellow",border = "blue")
# Save the file.
dev.off()
|
6a29c46d13662496d28a915589089d3d2ca5fc3b
|
078778377c7d923f21ce0836165e7e6b03a91aa0
|
/man/check_mc_arguments.Rd
|
56da4b908e0ae26c1c94dace7545005385e3b024
|
[
"MIT"
] |
permissive
|
frietchr/SQualtrics
|
1e4a1430dfd837e167aa58dcba62839ecf51382c
|
ddfb75058abaaa3b020d85bf962c25331a6eb970
|
refs/heads/main
| 2023-02-07T19:09:57.915427
| 2020-12-21T00:52:51
| 2020-12-21T00:52:51
| 308,368,088
| 0
| 0
| null | 2020-12-10T02:17:10
| 2020-10-29T15:19:18
|
R
|
UTF-8
|
R
| false
| true
| 1,186
|
rd
|
check_mc_arguments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qualtrics_function.R
\name{check_mc_arguments}
\alias{check_mc_arguments}
\title{Title Check Qualtrics Multiple Choice Arguments.}
\usage{
check_mc_arguments(questions, answer_scale)
}
\arguments{
\item{questions}{Required. The questions parameter is the stem, or the questions, that you would like to present in the matrix. This should be a vector, list, or column in a dataset that contains character strings.}
\item{answer_scale}{Required. The answer_scale parameter is the options that you want to give your participants for each of the questions presented in the matrix. This should be a vector or a list of length one that contains all the answer choices you would like.}
}
\value{
The function will return any error messages relevant to making a multiple choice question that can be imported into Qualtrics.
}
\description{
Title Check Qualtrics Multiple Choice Arguments.
}
\examples{
\dontrun{
stem <- c("I am sad", "I am mad", "I am happy")
options <- c("Yes", "No")
check_mc_arguments(stem, options)
question <- c("I am sad", "I am mad", "I am happy")
options <- rep(list(c("Yes", "No")), 2)
}
}
|
1400a64b57aa25c9b620b14740b76fe2baade26d
|
79c7597505de4b8ead82ae0c8cd7447790f9de32
|
/R/oh.campaign.create.R
|
b7c1e3e6a1b8379518cd4c2680d261971b61e229
|
[] |
no_license
|
jeroen/Ohmage
|
e313734e9502a6cdf8ba223c09d41c496b8955ac
|
5d46cd4ec39ff5fa39ab7ebb171720ce2e2fd4f4
|
refs/heads/master
| 2021-05-28T16:30:21.448739
| 2014-11-18T18:27:36
| 2014-11-18T18:27:36
| 2,479,978
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 637
|
r
|
oh.campaign.create.R
|
#' Create a new campaign
#' @param xml campaign.xml
#' @param running_state if campaign is running or not
#' @param privacy_state private or shared
#' @param class_urn_list classes to add
#' @param description a description
#' @param ... other stuff passed to the server
#' @export
oh.campaign.create <- function(xml, running_state = 'running', privacy_state='shared', class_urn_list='', description="My campaign.", ...){
xhr <- oh.call("/campaign/create", style="httppost", xml=xml, running_state=running_state, privacy_state=privacy_state, class_urn_list=class_urn_list, description=description, ...);
message("Campaign created!")
}
|
142c239874a856f6d5d17b6240d4e70ab51cbac9
|
9e5de09ab170bfd301f96129736ce7101de6fac2
|
/data/continuous tensor/NIPSdata.R
|
2c3871c481970d35dc68b7694e6b55a91aa44caa
|
[] |
no_license
|
Miaoyanwang/signTensor
|
1a48b1262456a1cff8caf5d89fa4bc6fe3b15dd9
|
24db67e8af4611688dab9548dd5772f14b6d1eb5
|
refs/heads/main
| 2023-04-16T11:39:05.244577
| 2021-01-18T07:44:02
| 2021-01-18T07:44:02
| 330,591,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,498
|
r
|
NIPSdata.R
|
####################################################
# data preprocessing ###############################
papers = read.table("~/mode-1-papers.map")
papers = papers$V1
authors = read.table("~/mode-2-authors.map")
authors = authors$V1
words = read.table("~/mode-3-words-2.map")
words = words$V1
years = read.table("~/mode-4-years.map")
years = years$V1
length(papers) #2482
length(authors) #2862
length(words) #14036
length(years) #17
tns = read.table("~/nips.tns")
str(tns)
# Authors and papers are corresponding each other (one variable is enough)
# Merge authors because multiple authors make words counted multiple times.
ntns = aggregate(tns[2],tns[-2],unique)
# V1: papers, V2: words, V3: years, V4: counts,
NIPS = ntns[1:4]
names(NIPS) = c("papers","words","years","counts")
range(NIPS$papers)
range(NIPS$words)
range(NIPS$years)
# reshape dataframe to tensor
library(reshape2)
tnsNIPS = acast(NIPS,papers~words~years,value.var = "counts")
save(tnsNIPS,NIPS,papers,words,years,file = "NIPS.RData")
#################################################################
### getting smaller size tensor (2482,14036,17) =>(500,510,17)
# want to reduce the size
############################
# reducing # of words to 500
par(mfrow = c(2,1))
num_w = aggregate(NIPS[4],NIPS[2],sum)
hist(num_w$counts,breaks = 100,xlab = "# of words",main = "words (full)")
range(num_w$counts) #6~23921
words[order(num_w$counts,decreasing = T)[1:500]]
rwindex = sort(order(num_w$counts,decreasing = T)[1:500])
rwords = words[rwindex]
hist(num_w[rwindex,]$counts,breaks = 100,xlab = "# of words",main = "words (reduced)")
range(num_w[rwindex,]$counts) #1373~23921
#############################
# reducing # of papers to 510
num_w_in_p = aggregate(NIPS[4],NIPS[1],sum)
hist(num_w_in_p$counts,breaks = 100,xlab = "# of words in a paper",main = "paper (full)",
xlim = range(num_w_in_p$counts))
p_y = NULL
for(i in 1:17){
p_y = c(p_y,length(unique(NIPS[NIPS$year==i,1])))
}
# randomly sample 30 papers for each year
a = cumsum(p_y)
rpindex = NULL
s = 0
for(i in a){
s = s+1
rpindex = c(rpindex,sort(sample((c(0,a)[s]+1):c(0,a)[s+1],30)))
}
length(rpindex)
rpapers = papers[rpindex]
hist(num_w_in_p[rpindex,]$counts,breaks = 100,xlab = "# of words in a paper",
main = "paper (reduced)",xlim = range(num_w_in_p$counts))
## Reducing tensor
rtnsNIPS = tnsNIPS[rpindex,rwindex,]
dimnames(rtnsNIPS) = NULL
rwords
rpapers
ryears = years
save(rtnsNIPS,rwords,rpapers,ryears,file = "rNIPS.RData")
|
ffc9db3081a24769ae0a67afdddf921b1064a8b4
|
d5342032c561e7085b40116c53c0bc56a95c7394
|
/violentcrimes.R
|
51139366e81702cab254e8c39d398a9a2076d1b9
|
[] |
no_license
|
gerwindekruijf/BayesianNetworks
|
872f0ac7e18970c174d0175c225fd36a057e066c
|
cd22299d931e978b12db1b6ada995ede6311594a
|
refs/heads/master
| 2023-01-23T03:20:23.257780
| 2020-12-09T13:18:59
| 2020-12-09T13:18:59
| 296,850,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,326
|
r
|
violentcrimes.R
|
# Olivier Brahma s1061745
# Gerwin de Kruijf s1063465
# Dirren van Vlijmen s1009852
# Read the data
communities <- read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data",
na.strings = "?", header=FALSE)
# All the vars which have to be kept, these are explained in the README
myvars <- c("V6", "V8", "V9", "V10", "V11", "V14", "V26", "V36",
"V37", "V38", "V91", "V96", "V111", "V112", "V113",
"V114", "V128")
# Remove all other vars from data
df_sub <- communities[myvars]
# Change column names
colnames(df_sub) <- c("population", "racePctB", "racePctW", "racePctA", "racePctH",
"agePct16t24", "perCapInc",
"pctNotHSGrad", "pctBSorMore", "pctUnemployed", "medRent",
"numStreet", "pctPoliceW", "pctPoliceB",
"pctPoliceH", "pctPoliceA", "violentCrimes"
)
# Keep the records which don't have missing values
df_final <- df_sub[complete.cases(df_sub),]
# Keep only the records which have missing values
df_na <- df_sub[!complete.cases(df_sub),]
# Import dagitty
library(dagitty)
# Directed Acyclic Graph
g <- dagitty('dag {
bb="-6.781,-7.29,8.351,9.149"
agePct16t24 [pos="2.670,-4.006"]
medRent [pos="-3.560,-3.600"]
numStreet [pos="-1.121,2.015"]
pctBSorMore [pos="-1.733,-6.248"]
pctNotHSGrad [pos="2.638,-6.531"]
pctPoliceA [pos="7.327,-3.335"]
pctPoliceB [pos="7.422,2.810"]
pctPoliceH [pos="7.084,1.062"]
pctPoliceW [pos="7.158,-1.781"]
pctUnemployed [pos="-0.498,-0.121"]
perCapInc [pos="-4.585,1.327"]
population [pos="-2.958,7.295"]
racePctA [pos="5.965,-3.388"]
racePctB [pos="5.869,3.004"]
racePctH [pos="6.049,1.133"]
racePctW [pos="5.922,-1.039"]
violentCrimes [pos="3.494,7.436"]
agePct16t24 -> medRent
agePct16t24 -> pctUnemployed
agePct16t24 -> perCapInc
agePct16t24 -> violentCrimes
medRent -> numStreet
medRent -> pctUnemployed
numStreet -> violentCrimes
pctBSorMore -> medRent
pctBSorMore -> pctPoliceB
pctBSorMore -> pctPoliceH
pctBSorMore -> pctPoliceW
pctBSorMore -> pctUnemployed
pctBSorMore -> perCapInc
pctNotHSGrad -> medRent
pctNotHSGrad -> numStreet
pctNotHSGrad -> pctPoliceB
pctNotHSGrad -> pctPoliceH
pctNotHSGrad -> pctPoliceW
pctNotHSGrad -> pctUnemployed
pctNotHSGrad -> perCapInc
pctPoliceA -> numStreet
pctPoliceA -> violentCrimes
pctPoliceB -> pctUnemployed
pctPoliceB -> perCapInc
pctPoliceH -> medRent
pctPoliceW -> perCapInc
pctUnemployed -> numStreet
pctUnemployed -> violentCrimes
perCapInc -> medRent
perCapInc -> numStreet
perCapInc -> pctUnemployed
perCapInc -> violentCrimes
population -> medRent
population -> numStreet
population -> pctPoliceB
population -> pctPoliceH
population -> pctPoliceW
population -> perCapInc
population -> racePctA
population -> racePctB
population -> racePctH
population -> racePctW
racePctA -> medRent
racePctA -> numStreet
racePctA -> pctBSorMore
racePctA -> pctNotHSGrad
racePctA -> pctPoliceA
racePctA -> pctUnemployed
racePctA -> perCapInc
racePctA -> violentCrimes
racePctB -> medRent
racePctB -> pctBSorMore
racePctB -> pctNotHSGrad
racePctB -> pctPoliceB
racePctB -> pctUnemployed
racePctB -> perCapInc
racePctB -> violentCrimes
racePctH -> medRent
racePctH -> pctBSorMore
racePctH -> pctNotHSGrad
racePctH -> pctPoliceH
racePctH -> perCapInc
racePctH -> violentCrimes
racePctW -> pctBSorMore
racePctW -> pctNotHSGrad
racePctW -> pctPoliceW
racePctW -> pctUnemployed
racePctW -> perCapInc
racePctW -> violentCrimes
}
')
# Plot the DAG
plot(g)
# test_results <- localTests(g, df_final)
# above_p_value <- test_results[test_results$p.value < 0.05,]
# above_p_value <- above_p_value[,1:2]
# print(above_p_value)
# Import bnlearn
library(bnlearn)
# Fit the DAG to the data
net <- model2network(toString(g,"bnlearn"))
fit <- bn.fit(net,as.data.frame(df_final))
# Acquire predicted probabilities
predictions <- predict(fit, node="violentCrimes",
data=subset(df_na, select =
c("numStreet","pctUnemployed", "racePctB", "racePctW")),
method = "bayes-lw")
predictions_forall <- predict(fit, node="violentCrimes",
data = subset(df_na, select =
c("population", "racePctB", "racePctW", "racePctA", "racePctH",
"agePct16t24", "perCapInc", "pctNotHSGrad", "pctBSorMore",
"pctUnemployed", "medRent", "numStreet")),
method = "bayes-lw")
# Calculate RMSE for the predictions; RMSE_4 is only for the 4 main factors
RMSE_4 <- sqrt(sum((df_na$violentCrimes - predictions)**(2)) / nrow(df_na))
RMSE <- sqrt(sum((df_na$violentCrimes - predictions_forall)**(2)) / nrow(df_na))
# Predict values for all the nodes without a direct edge to crime rate
seq_0_1 = seq(from = 0, to = 0.99, by = 0.01)
population_pred <- predict(fit,node="violentCrimes", data=data.frame(population = as.double(seq_0_1)), method = "bayes-lw")
perCapInc_pred <- predict(fit,node="violentCrimes", data=data.frame(perCapInc = as.double(seq_0_1)), method = "bayes-lw")
pctNotHSGrad_pred <- predict(fit,node="violentCrimes", data=data.frame(pctNotHSGrad = as.double(seq_0_1)), method = "bayes-lw")
pctBsorMore_pred <- predict(fit,node="violentCrimes", data=data.frame(pctBSorMore = as.double(seq_0_1)), method = "bayes-lw")
medRent_pred <- predict(fit,node="violentCrimes", data=data.frame(medRent = as.double(seq_0_1)), method = "bayes-lw")
pctPoliceB_pred <- predict(fit,node="violentCrimes", data=data.frame(pctPoliceB = as.double(seq_0_1)), method = "bayes-lw")
pctPoliceH_pred <- predict(fit,node="violentCrimes", data=data.frame(pctPoliceH = as.double(seq_0_1)), method = "bayes-lw")
pctPoliceW_pred <- predict(fit,node="violentCrimes", data=data.frame(pctPoliceW = as.double(seq_0_1)), method = "bayes-lw")
# The 4 variables with "significant" indirect effect on crime rate
# plot(population_pred)
# plot(pctNotHSGrad_pred)
# plot(pctPoliceB_pred)
# plot(pctPoliceW_pred)
pc_graph <- pc.stable(df_final, undirected = FALSE)
plot(pc_graph)
mmhc_graph <- mmhc(df_final)
plot(mmhc_graph)
|
eeae1c227a41cf8a46b7add64916090989350a76
|
00d006eb2a619cad00be4ac330fb7d3f144e5c4b
|
/R/mcmh_mc.R
|
7abea1c4ca78786b52aaadcc86d3e1eee124e93e
|
[] |
no_license
|
mlthom/CogIRT
|
da38be78583681e9584f5f9275a5f04d882cb014
|
ed60801a9753029d27fe32338827b66dd54ffba9
|
refs/heads/master
| 2022-06-23T21:26:38.894571
| 2022-05-30T22:56:48
| 2022-05-30T22:56:48
| 203,634,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,095
|
r
|
mcmh_mc.R
|
#-------------------------------------------------------------------------------
#' MCMH Parameter Estimates for Multiple Chains
#'
#' This function calculates MCMH parameter estimates for multiple chains. See
#' documentation for mcmh_sc.R for more information.
#'
#' @param chains Number of chains in the MCMH sampler (scalar).
#' @param y Matrix of item responses (K by IJ).
#' @param obj_fun A function that calculates predictions and log-likelihood
#' values for the selected model (character).
#' @param est_omega Determines whether omega is estimated (logical).
#' @param est_nu Determines whether nu is estimated (logical).
#' @param est_zeta Determines whether zeta is estimated (logical).
#' @param lambda Matrix of item structure parameters (IJ by JM).
#' @param kappa Matrix of item guessing parameters (K by IJ).
#' @param gamma Matrix of experimental structure parameters (JM by MN).
#' @param omega0 Starting values for omega.
#' @param nu0 Starting values for nu.
#' @param zeta0 Starting values for zeta.
#' @param omega_mu Vector of means prior for omega (1 by MN).
#' @param omega_sigma Covariance matrix prior for omega (MN by MN).
#' @param zeta_mu Vector of means prior for zeta (1 by JM).
#' @param zeta_sigma@ Covariance matrix prior for zeta (JM by JM).
#' @param nu_mu Prior mean for nu (scalar).
#' @param nu_sigma@ Prior variance for nu (scalar).
#' @param burn Number of iterations at the beginning of an MCMC run to discard
#' (scalar).
#' @param thin Determines every nth observation retained (scalar).
#' @param min_tune Determines when tunning begins (scalar).
#' @param tune_int MCMH tuning interval (scalar).
#' @param max_tune Determines when tunning ends (scalar).
#' @param niter Number of iterations of the MCMH sampler.
#'
#' @return List with elements omega_draws (draws from every saved iteration of
#' the MCMH sampler), omegaEAP (expected a posteriori estimates for omega),
#' omegaPSD (posterior standard deviation estimates for omega), omega_psrf
#' (potential scale reduction factor for omega), nuEAP (expected a posteriori
#' estimates for nu), nuPSD (posterior standard deviation estimates for nu),
#' nu_psrf (potential scale reduction factor for nu), zetaEAP (expected a
#' posteriori estimates for zeta), zetaPSD (posterior standard deviation
#' estimates for zeta), zeta_psrf (potential scale reduction factor for zeta).
#'
#' @examples
#'mcmh_mc(chains = 3, y = sdirt$y, obj_fun = dich_response_model, est_omega = TRUE,
#' est_nu = TRUE, est_zeta = TRUE, lambda = sdirt$lambda, kappa = sdirt$kappa,
#' gamma = sdirt$gamma, omega0 = array(data = 0, dim = dim(sdirt$omega)),
#' nu0 = array(data = 0, dim = c(ncol(sdirt$nu), 1)),
#' zeta0 = array(data = 0, dim = dim(sdirt$zeta)),
#' omega_mu = sdirt$omega_mu, omega_sigma2 = sdirt$omega_sigma2,
#' nu_mu = matrix(sdirt$nu_mu), nu_sigma2 = matrix(sdirt$nu_sigma2),
#' zeta_mu = sdirt$zeta_mu, zeta_sigma2 = sdirt$zeta_sigma2,
#' burn = 0, thin = 10, min_tune = 50, tune_int = 50, max_tune = 1000,
#' niter = 2000)
#'
#' @export mcmh_mc
#-------------------------------------------------------------------------------
mcmh_mc <- function(
chains=NULL, y = y, obj_fun = NULL, est_omega = TRUE, est_nu = TRUE, est_zeta = TRUE,
lambda = NULL, kappa = NULL, gamma = NULL, omega0 = NULL, nu0 = NULL,
zeta0 = NULL, omega_mu = NULL, omega_sigma2 = NULL, nu_mu = NULL,
nu_sigma2 = NULL, zeta_mu = NULL, zeta_sigma2 = NULL, burn = NULL,
thin = NULL, min_tune = NULL, tune_int = NULL, max_tune = NULL, niter = NULL
) {
if (!requireNamespace("abind", quietly = TRUE)) {
stop("Package \"abind\" needed for the mcmh_mc function to work. Please
install.",
call. = FALSE)
stop("Package \"parallel\" needed for the mcmh_mc function to work. Please
install.",
call. = FALSE)
stop("Package \"coda\" needed for the mcmh_mc function to work. Please
install.",
call. = FALSE)
}
draws <- parallel::mclapply(
mc.cores = 2, X = 1:chains, FUN = mcmh_sc, y = y,
obj_fun = obj_fun, est_omega = est_omega, est_nu = est_nu,
est_zeta = est_zeta, lambda = lambda, kappa = kappa, gamma = gamma,
omega0 = omega0, nu0 = nu0, zeta0 = zeta0, omega_mu = omega_mu,
omega_sigma2 = omega_sigma2, nu_mu = nu_mu, nu_sigma2 = nu_sigma2,
zeta_mu = zeta_mu, zeta_sigma2 = zeta_sigma2, burn = burn, thin = thin,
min_tune = min_tune, tune_int = tune_int, max_tune = max_tune, niter = niter
)
names(draws) <- paste("chain", 1:chains, sep = "")
if (est_omega) {
omegadraws <- abind::abind(
lapply(X = draws, FUN = function(x) {
x[["omega_draws"]]
}
),
along = 1
)
omegaeap <- t(apply(X = omegadraws, MARGIN = c(2, 3), FUN = mean))
omegapsd <- t(apply(X = omegadraws, MARGIN = c(2, 3), FUN = sd))
if (chains > 1) {
omega_psrf <- matrix(
data = unlist(
lapply(X = seq_len(length.out = nrow(omega0)), FUN = function(i) {
coda::gelman.diag(
x = coda::mcmc.list(
lapply(
X = 1:chains,
FUN = function(ch) {
coda::mcmc(data = lapply(
X = draws,
FUN = function(x) {
x[["omega_draws"]]
}
)[[ch]][, , i])
}
)
),
multivariate = F
)[["psrf"]][, 1]})
),
nrow = nrow(omega0),
ncol = ncol(omega0),
byrow = T
)
} else {
omega_psrf <- NULL
}
} else {
omegaeap <- NULL
omegapsd <- NULL
omega_psrf <- NULL
}
if (est_nu) {
nudraws <- abind::abind(
lapply(X = draws, FUN = function(x) {
x[["nu_draws"]]
}),
along = 1
)
nueap <- t(apply(X = nudraws, MARGIN = c(2, 3), FUN = mean))
nupsd <- t(apply(X = nudraws, MARGIN = c(2, 3), FUN = sd))
if (chains > 1) {
nu_psrf <- matrix(
data = unlist(
lapply(X = seq_len(nrow(nu0)), FUN = function(i) {
coda::gelman.diag(
x = coda::mcmc.list(
lapply(
X = 1:chains,
FUN = function(ch) {
coda::mcmc(data = lapply(X = draws, FUN = function(x) {
x[["nu_draws"]]
})[[ch]][, , i]
)}
)
),
multivariate = F
)[["psrf"]][, 1]})
),
nrow = nrow(nu0),
ncol = 1,
byrow = T
)
} else {
nu_psrf <- NULL
}
} else {
nueap <- NULL
nupsd <- NULL
nu_psrf <- NULL
}
if (est_zeta) {
zetadraws <- abind::abind(
lapply(X = draws, FUN = function(x) {
x[["zeta_draws"]]
}),
along = 1
)
zetaeap <- t(apply(X = zetadraws, MARGIN = c(2, 3), FUN = mean))
zetapsd <- t(apply(X = zetadraws, MARGIN = c(2, 3), FUN = sd))
if (chains > 1) {
zeta_psrf <- matrix(
data = unlist(
lapply(X = seq_len(nrow(zeta0)), FUN = function(i) {
coda::gelman.diag(
x = coda::mcmc.list(
lapply(
X = 1:chains,
FUN = function(ch) {
coda::mcmc(
data = lapply(X = draws, FUN = function(x) {
x[["zeta_draws"]]
})[[ch]][, , i]
)}
)
),
multivariate = F
)[["psrf"]][, 1]})
),
nrow = nrow(zeta0),
ncol = ncol(zeta0),
byrow = T
)
} else {
zeta_psrf <- NULL
}
} else {
zetaeap <- NULL
zetapsd <- NULL
zeta_psrf <- NULL
}
return(list(
"mcmhdraws" = draws, "omegaEAP" = omegaeap, "omegaPSD" = omegapsd,
"omega_psrf" = omega_psrf, "nuEAP" = nueap, "nuPSD" = nupsd,
"nu_psrf" = nu_psrf, "zetaEAP" = zetaeap, "zetaPSD" = zetapsd,
"zeta_psrf" = zeta_psrf
))
}
|
a6f2e7585fec5274954b0b4a15021092f40be274
|
9a92a4aa3d4afc87bfa13ec4145abedc1ab80b7c
|
/Code/CTSim/R/run_sim_N.r
|
3fe8941de4647fbe09403a3d2d3ead1018bad7e3
|
[] |
no_license
|
hurlbertlab/core-transient-simulation
|
68c5d2b8176283a801d77b9aacb7cef7a00e4dd3
|
52ab2aa2d163b5d5dd1e4a8a6aacb1c66d1e4f76
|
refs/heads/master
| 2021-01-16T23:51:11.430063
| 2019-08-20T19:31:45
| 2019-08-20T19:31:45
| 59,146,872
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,354
|
r
|
run_sim_N.r
|
#' Run multiple simulations
#'
#' Runs multiple independent simulations on the same set of parameters.
#'
#' This function runs multiple independent simulations on a set of parameters
#' given in \code{parms}. Each run generates a new landscape, species pool,
#' global species abundance distribution and initial metacommunity.
#' Landscapes, species pools and gsads are saved as lists
#' (object names: \code{lands_N}, \code{species_N}, \code{gsad_N})
#' in \code{<simID>_simobjects.RData}.
#' Simulations can be run in parallel by specifying
#' \code{nparallel > 1}, which requires the \code{\link[doParallel]{doParallel}} and
#' \code{\link[foreach]{foreach}} packages.
#' By default, \code{nparallel = 1} and the simulations proceed serially.
#' Each run of the simulation is temporarily saved to the working directory
#' or permanently saved to the directory specified by \code{save_sim}.
#' If this directory does not exist then it is created. Runs are saved as
#' \code{<simID>_run<i>.RData}. This file contains five objects:
#' \describe{
#' \item{results}{an array of the metacommunity through time returned by
#' \code{\link{run_sim}}. Note that if the parameter \code{calc_rates}
#' is \code{TRUE}, then this will be a list whose first element is
#' the simulation results and whose second element is an array describing
#' rates of species gains and losses through time for each cell.}
#' \item{this_land}{the landscape used for the simulation}
#' \item{this_species}{the species pool used in the simulation}
#' \item{this_gsad}{the global species abundance distribution used in
#' the simulation}
#' \item{this_metacomm}{the initial metacommunity}
#'}
#' If \code{return_results} is \code{TRUE}, then
#' after all simulations are complete, all runs are read back into memory,
#' compiled arrays, and returned as a list by the function.
#' List components are \code{results}, \code{species}, \code{lands},
#' \code{gsads} and the first dimension of each array is the run to which it
#' corresponds. For example, \code{results[1,,,]} is the metacommunity
#' from run 1. If \code{return_results} is \code{FALSE} then temporary
#' files from the simulation runs are NOT removed, even if \code{save_sim} is
#' not specified.
#'
#' This function can be used to restart a set of multiple simulation runs,
#' but does not currently allow users to restart a simulation on an existing
#' run. If \code{restart} is \code{TRUE}, the function navigates to the
#' \code{save_sim} directory and searches for the first instance of
#' code{i <= nruns} where \code{<simID>_run<i>.RData} does not exist.
#' It then starts simulations for all \code{i} that do not have saved files
#' using the objects saved in \code{[simID]_simobjects.RData}.
#'
#' @note Users should be cautious in specifying \code{return_results=TRUE}
#' for large simulations where memory requirements may not support large
#' arrays.
#'
#' @param nruns (required) number of replicate simulations
#' @param parms (required) list of parameters for running simulation as
#' created by \code{\link{make_parmlist}}
#' @param nparallel number of cores to run the simulations on in parallel.
#' Defaults to 1.
#' @param simID character string identifying this simulation run.
#' Defaults to 'test'.
#' @param save_sim directory in which to save simulation results.
#' If none specified simulation results are not saved.
#' @param report number of timesteps after which to report status.
#' Defaults to 0, no reporting.
#' @param return_results logical indicating whether function should
#' return simulation results and objects. See details. Defaults to TRUE.
#' @param restart logical indicating whether the simulation should continue
#' from a set of saved runs. See details.
#' @param lib_loc location where \code{CTSim} is installed, if not on default
#' search path
#' @return nothing or a list of simulation results and objects. See details.
#'
#' @seealso \code{\link{run_sim}} for details on how each simulation runs \cr
#' \code{\link{make_parmlist}} for parameters that can be passed to the
#' simulation
#' @export
run_sim_N = function(nruns, parms, nparallel=1, simID='test', save_sim=NULL, report=0, return_results=T, restart=F, lib_loc=NULL){
sim_complete=F
# Define working directory to save simulation runs
save_dir = ifelse(is.null(save_sim), getwd(), save_sim)
if(!file.exists(save_dir)) dir.create(save_dir)
save_dir = file.path(save_dir, simID)
if(!file.exists(save_dir)){
dir.create(save_dir)
} else {
warning('Simulation run already exists and may be overwritten unless restart = TRUE')
}
# Set restart to FALSE if no simulation objects
if(!file.exists(file.path(save_dir, 'sim_objects.RData'))) restart = F
# Simulate multiple runs in parallel
if(nparallel > 1){
# Attempt to load doParallel
if(requireNamespace('doParallel', quietly=TRUE)&requireNamespace('foreach', quietly=TRUE)){
# Attach functions in doParallel and foreach
library(doParallel)
# Make and register cluster
cluster = makeCluster(nparallel, outfile=paste0(simID, '.Rout'))
registerDoParallel(cluster)
# Send required functions and objects to each node
clusterExport(cluster, c('parms','simID','save_sim','report','save_dir','lib_loc'), envir=environment())
clusterEvalQ(cluster, library(CTSim, lib.loc=lib_loc))
# If this is not a restart of a previous run
if(!restart){
# Initialize simulation landscapes
lands_N = parLapply(cluster, 1:nruns, function(j){
with(parms, {
x = dimX
y = dimY
if(!exists('vgm_mod')) vgm_mod = NULL
d = ifelse(exists('vgm_dcorr'), vgm_dcorr, NA)
prop = ifelse(exists('habA_prop'), habA_prop, 0.5)
make_landscape(x, y, vgm_mod, d, prop, draw_plot=F)
})
})
# Report progress
if(report>0) print(paste0(Sys.time(), ': Finished making landscapes.'))
# Initialize species vital rates
species_N = parLapply(cluster, 1:nruns, function(j){
with(parms, {
S_AB = ifelse(exists('S_AB'), S_AB, NA)
if(!exists('dist_b')) dist_b = NULL
m = m_rates
r = r_rates
if(!exists('dist_d')){
if(exists('d_kernel')){
dist_d = list(type=d_kernel$type)
} else { dist_d = NULL }
} else {
if(exists('d_kernel')) dist_d = c(dist_d, type=d_kernel$type)
}
if(!exists('dist_v')){
if(exists('v_kernel')){
dist_v = list(type=v_kernel$type)
} else { dist_v = NULL }
} else {
if(exists('v_kernel')) dist_v = c(dist_v, type=v_kernel$type)
}
make_species(S_A, S_B, S_AB, dist_b, m, r, dist_d, dist_v)
})
})
# Report progress
if(report>0) print(paste0(Sys.time(), ': Finished making species pools.'))
# Send initial landscapes and species to all cluster cores
clusterExport(cluster, c('lands_N','species_N'), envir=environment())
# Initialize global species abundance distribution
# dist_gsad can be a generic distribution or 'b_rates' indicating that it should be the same as species birth rates
gsad_N = parLapply(cluster, 1:nruns, function(j){
with(parms, {
N_S = dim(species_N[[j]])[1]
if(exists('dist_gsad')){
if(is.list(dist_gsad)){
# Use specified distribution to generate abundances
distribution = dist_gsad
gsad_vec = make_sad(N_S, distribution)
} else {
# Make global abundances equal to species birth rates
if(dist_gsad=='b_rates'){
A_rates = species_N[[j]][1:S_A,'A','b']
B_rates = species_N[[j]][(S_A+1):(S_A+S_B),'B','b']
gsad_vec = c(A_rates, B_rates)
if(exists('S_AB')) if(S_AB > 0) gsad_vec = c(gsad_vec, rowMeans(species_N[[j]][(S_A+S_B+1):(S_A+S_B+S_AB),,'b']))
} else {
stop('Unrecognized value for parameter dist_gsad.')
}
}
# Defaults to same abundance for each species
} else {
distribution = list(type='same')
gsad_vec = make_sad(N_S, distribution)
}
# Return vector of global abundances
gsad_vec
})
})
# Report progress
if(report>0) print(paste0(Sys.time(), ': Finished making gsads.'))
# Save for later restart
save(lands_N, species_N, gsad_N, file=file.path(save_dir, 'sim_objects.RData'))
# Send global species abundance distributions to cluster
clusterExport(cluster, 'gsad_N', envir=environment())
# If this is a restart of a previous run
} else {
# Read in lands, species, gsads from directory where simulation results saved
load(file.path(save_dir, 'sim_objects.RData'))
# Export objects to cluster
clusterExport(cluster, c('lands_N','species_N','gsad_N'), envir=environment())
}
# Run simulations using foreach to reduce memory requirements
foreach(j=1:nruns) %dopar% {
# Define file to save results
this_runfile = file.path(save_dir, paste0(simID, '_run', j, '.RData'))
# Check whether this is a restart and whether this run has already be done
if(restart & file.exists(this_runfile)){
if(report>0) print(paste0(Sys.time(), ': Skipping run ', j))
} else {
if(report>0) print(paste0(Sys.time(), ': Start run ', j))
# Define this landscape and species pool
this_land = lands_N[[j]]
this_species = species_N[[j]]
this_gsad = gsad_N[[j]]
# Distribute species across landscape
this_metacomm = with(parms, {
p = ifelse(exists('prop_full'), prop_full, NA)
distribution = ifelse(exists('init_distribute'), init_distribute, NA)
if(exists('cells_distribute')){
which_cells = cells_distribute
} else {
which_cells = NULL
}
populate_landscape(this_land, this_species, this_gsad, K, distribution, p, which_cells)
})
# Run simulation
results = with(parms, {
if(!exists('d_kernel')) d_kernel = NULL
if(!exists('v_kernel')) v_kernel = NULL
imm_rate = ifelse(exists('imm_rate'), imm_rate, NA)
if(!exists('save_steps')) save_steps = NULL
if(!exists('calc_rates')) calc_rates = F
run_sim(nsteps, this_metacomm, this_land, this_species, this_gsad, d_kernel, v_kernel, imm_rate,
save_steps, report, ID=j, calc_rates=calc_rates)
})
# Save results
save(results, this_species, this_land, this_metacomm, this_gsad, file=this_runfile)
gc()
}
}
sim_complete=T
stopCluster(cluster)
} else {
stop('doParallel or foreach not found. Cannot run simulation in parallel without these package.')
# Simulate runs sequentially
}
} else {
# If this is not a restart of a previous simulation
if(!restart){
# Initialize simulation landscapes
lands_N = lapply(1:nruns, function(j){
with(parms, {
x = dimX
y = dimY
if(!exists('vgm_mod')) vgm_mod = NULL
d = ifelse(exists('vgm_dcorr'), vgm_dcorr, NA)
prop = ifelse(exists('habA_prop'), 1-habA_prop, 0.5)
make_landscape(x, y, vgm_mod, d, prop, draw_plot=F)
})
})
# Report progress
if(report>0) print(paste0(Sys.time(), ': Finished making landscapes.'))
# Initialize species vital rates
species_N = lapply(1:nruns, function(j){
with(parms, {
S_AB = ifelse(exists('S_AB'), S_AB, NA)
if(!exists('dist_b')) dist_b = NULL
m = m_rates
r = r_rates
if(!exists('dist_d')) dist_d = NULL
if(!exists('dist_v')) dist_v = NULL
make_species(S_A, S_B, S_AB, dist_b, m, r, dist_d, dist_v)
})
})
# Report progress
if(report>0) print(paste0(Sys.time(), ': Finished making species pools.'))
# Initialize global species abundance distribution
gsad_N = lapply(1:nruns, function(j){
with(parms, {
N_S = dim(species_N[[j]])[1]
if(exists('dist_gsad')){
if(is.list(dist_gsad)){
# Use specified distribution to generate abundances
distribution = dist_gsad
gsad_vec = make_sad(N_S, distribution)
} else {
# Make global abundaces equal to species birth rates
if(dist_gsad=='b_rates'){
A_rates = species_N[[j]][1:S_A,'A','b']
B_rates = species_N[[j]][(S_A+1):(S_A+S_B),'B','b']
gsad_vec = c(A_rates, B_rates)
if(exists('S_AB')) if(S_AB > 0) gsad_vec = c(gsad_vec, rowMeans(species_N[[j]][(S_A+S_B+1):(S_A+S_B+S_AB),,'b']))
} else {
stop('Unrecognized value for parameter dist_gsad.')
}
}
# Defaults to same abundance for each species
} else {
distribution = list(type='same')
gsad_vec = make_sad(N_S, distribution)
}
# Return vector of global abundances
gsad_vec
})
})
# Report prgress
if(report>0) print(paste0(Sys.time(), ': Finished making gsads.'))
# Save simulation objects
save(lands_N, species_N, gsad_N, file=file.path(save_dir, 'sim_objects.RData'))
# If this is a restart of a previous run
} else {
# Read in lands, species, gsads from directory where simulation results saved
load(file.path(save_dir, 'sim_objects.RData'))
}
# Run simulations
for(j in 1:nruns){
# Define file to save results
this_runfile = file.path(save_dir, paste0(simID, '_run', j, '.RData'))
# Check whether this is a restart and whether this run has already be done
if(restart & file.exists(this_runfile)){
if(report>0) print(paste0(Sys.time(), ': Skipping run ', j))
} else {
# Report progress
if(report>0) print(paste0(Sys.time(), ': Start run ', j))
# Define this landscape and species pool
this_land = lands_N[[j]]
this_species = species_N[[j]]
this_gsad = gsad_N[[j]]
# Distribute species across landscape
this_metacomm = with(parms, {
p = ifelse(exists('prop_full'), prop_full, NA)
distribution = ifelse(exists('init_distribute'), init_distribute, NA)
if(exists('cells_distribute')){
which_cells = cells_distribute
} else {
which_cells = NULL
}
populate_landscape(this_land, this_species, this_gsad, K, distribution, p, which_cells)
})
# Run simulation
results = with(parms, {
if(!exists('d_kernel')) d_kernel = NULL
if(!exists('v_kernel')) v_kernel = NULL
imm_rate = ifelse(exists('imm_rate'), imm_rate, NA)
if(!exists('save_steps')) save_steps = NULL
if(!exists('calc_rates')) calc_rates = F
run_sim(nsteps, this_metacomm, this_land, this_species, this_gsad, d_kernel, v_kernel, imm_rate,
save_steps, report, ID=j, calc_rates=calc_rates)
})
# Save results
save(results, this_species, this_land, this_metacomm, this_gsad, file=file.path(save_dir, paste0(simID, '_run', j, '.RData')))
gc()
}
}
sim_complete=T
}
# Read individual runs back into a list
if(return_results & sim_complete){
sim_results = lapply(1:nruns, function(j){
this_run = file.path(save_dir, paste0(simID, '_run', j, '.RData'))
load(this_run)
if(is.null(save_sim)) file.remove(this_run)
results
})
# Save results
sim_results = list(results = sim_results, species = species_N, lands = lands_N, gsads = gsad_N)
if(!is.null(save_sim)){
save(sim_results, file=file.path(save_dir, paste0(simID, '_results.RData')))
}
# Return results
sim_results
}
}
|
833e4c1bc4176bf1febbb7f9cd76f1d6d95d0dbc
|
403582dddb1f3bbe0d3abe51aa0feca62e471985
|
/tests/testthat/test-examples.R
|
ef29b67a08fd320a8c2afed2e5efaeb02e11f725
|
[] |
no_license
|
rkillick/changepoint
|
836ac7e64e1bba962ca8ccece1072e1ec54b41e0
|
5253a05c63b67230d9c78cf5e3914c81c67a52ec
|
refs/heads/main
| 2023-07-19T19:02:19.293558
| 2022-11-08T22:50:07
| 2022-11-08T22:50:07
| 49,007,800
| 121
| 50
| null | 2022-11-08T22:50:08
| 2016-01-04T16:08:49
|
R
|
UTF-8
|
R
| false
| false
| 13,724
|
r
|
test-examples.R
|
context("man file example tests")
# From changepoint-package.Rd
# change in variance
set.seed(1)
x=c(rnorm(100,0,1),rnorm(100,0,10))
ansvar=cpt.var(x)
test_that('var1',expect_identical(cpts(ansvar),100))
# change in mean
set.seed(1)
y=c(rnorm(100,0,1),rnorm(100,5,1))
ansmean=cpt.mean(y)
test_that('mean1',expect_identical(cpts(ansmean),100))
# change in mean and variance
set.seed(1)
z=c(rnorm(100,0,1),rnorm(100,2,10))
ansmeanvar=cpt.meanvar(z)
test_that('meanvar1',expect_identical(cpts(ansmeanvar),100))
# From cpt.mean.Rd
# Example of a change in mean at 100 in simulated normal data
set.seed(1)
x=c(rnorm(100,0,1),rnorm(100,10,1))
test_that('mean2',expect_equivalent(cpt.mean(x,penalty="SIC",method="AMOC",class=FALSE),c(100,1)))
ans=cpt.mean(x,penalty="Asymptotic",pen.value=0.01,method="AMOC")
test_that('mean3',expect_identical(cpts(ans),100))
ans=cpt.mean(x,penalty="Manual",pen.value=0.8,method="AMOC",test.stat="CUSUM")
test_that('mean4',expect_equivalent(cpts(ans),101))
# Example of multiple changes in mean at 50,100,150 in simulated normal data
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,5,1),rnorm(50,10,1),rnorm(50,3,1))
test_that('mean5',expect_identical(cpt.mean(x,penalty="Manual",pen.value="2*log(n)",method="BinSeg",Q=5,class=FALSE),c(50,100,150,200)))
# Example of using the CROPS penalty in data set above
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,5,1),rnorm(50,10,1),rnorm(50,3,1))
out=cpt.mean(x, pen.value = c(4,1500),penalty = "CROPS",method = "PELT")
truth=matrix(NA,ncol=7,nrow=7); truth[1:6,1]=50;truth[1:5,2]=c(96,96,100,100,150)
truth[1:4,3]=c(100,100,133,150);truth[1:3,4]=c(133,133,150);truth[1:2,5]=c(150,150)
truth[1,6]=159;truth[1,7]=180
test_that('crops1',expect_equivalent(cpts.full(out),truth))
truth=c(4.000000, 4.332496, 4.385247, 4.684254 ,559.366988, 646.962719,1311.335695)
test_that('crops2',expect_equal(pen.value.full(out),truth,tolerance=1e-6))
# Example multiple datasets where the first row has multiple changes in mean and the second row has
#no change in mean
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,5,1),rnorm(50,10,1),rnorm(50,3,1))
y=rnorm(200,0,1)
z=rbind(x,y)
test_that('mean6',expect_equal(cpt.mean(z,penalty="Asymptotic",pen.value=0.01,method="SegNeigh",Q=5,class=FALSE),list(c(50,100,150,200),200)))
ans=cpt.mean(z,penalty="Asymptotic",pen.value=0.01,method="PELT")
test_that('mean7',expect_equal(cpts(ans[[1]]),c(50,100,150)))
test_that('mean8',expect_equal(cpts(ans[[2]]),numeric()))
# From cpt.meanvar.Rd
# Example of a change in scale parameter (mean and variance) at 100 in simulated gamma data
set.seed(1)
x=c(rgamma(100,shape=1,rate=1),rgamma(100,shape=1,rate=5))
test_that('meanvar2',expect_equivalent(cpt.meanvar(x,penalty="SIC",method="AMOC",test.stat="Gamma",class=FALSE,shape=1),98))
ans=cpt.meanvar(x,penalty="AIC",method="AMOC",test.stat="Gamma",shape=1)
test_that('meanvar3',expect_equivalent(cpts(ans),98))
# Example of multiple changes in mean and variance at 50,100,150 in simulated normal data
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,5,3),rnorm(50,10,1),rnorm(50,3,10))
test_that('meanvar4',expect_equal(cpt.meanvar(x,penalty="Manual",pen.value="4*log(n)",method="BinSeg",Q=5,class=FALSE),c(50,100,150,152,200)))
# Example of using the CROPS penalty in the above example
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,5,3),rnorm(50,10,1),rnorm(50,3,10))
out=cpt.meanvar(x,pen.value=c(2*log(length(x)),100*log(length(x))),penalty="CROPS",method="PELT")
truth=matrix(NA,ncol=9,nrow=6);truth[1:5,1]=c(rep(15,2),rep(50,3));truth[1:4,2]=c(17,17,100,100)
truth[1:4,3]=c(22,22,133,150);truth[1:3,4]=c(44,50,151);truth[1:2,5]=c(46,100)
truth[1:2,6]=c(50,133);truth[1:2,7]=c(100,151);truth[1,8]=133;truth[1,9]=151
test_that('crops3',expect_equal(cpts.full(out),truth))
truth=c(10.59663, 10.68431, 11.31088, 11.38307, 119.78669, 191.42622)
test_that('crops4',expect_equal(pen.value.full(out),truth,tolerance=1e-6))
# Example multiple datasets where the first row has multiple changes in mean and variance and the
#second row has no change in mean or variance
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,5,3),rnorm(50,10,1),rnorm(50,3,10))
y=rnorm(200,0,1)
z=rbind(x,y)
test_that('meanvar5',expect_equivalent(cpt.meanvar(z,penalty="Asymptotic",pen.value=0.01,method="SegNeigh",Q=5,class=FALSE),list(c(50,100,150,200),200)))
ans=cpt.meanvar(z,penalty="Asymptotic",pen.value=0.01,method="PELT")
test_that('meanvar6',expect_equivalent(cpts(ans[[1]]),c(50,100,150)))
test_that('meanvar7',expect_equivalent(cpts(ans[[2]]),numeric()))
# From cpt.range-class.Rd
x=new("cpt.range")
test_that('class1',expect_is(x,"cpt.range"))
cpts(x)<-c(10,50,100)
test_that('class2',expect_equivalent(cpts(x),c(10,50,100)))
# Example of multiple changes in variance at 50,100,150 in simulated normal data
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,0,10),rnorm(50,0,5),rnorm(50,0,1))
out=cpt.var(x,pen.value=c(log(length(x)),10*log(length(x))),penalty="CROPS",method="PELT")
test_that('class3',expect_equivalent(logLik(out,ncpts=3),c(925.8085, 947.0578))) # raw likelihood of the data with changepoints, second value is likelihood + penalty
# From cpt.reg-class.Rd
x=new("cpt.reg") # creates a new object with the cpt.reg class defaults
test_that('class4',expect_is(x,"cpt.reg"))
test_that('class5',expect_is(data.set(x),"matrix"))
data.set(x)<-matrix(1:10,nrow=5,ncol=2) # replaces the data.set slot from x with a matrix
test_that('class6',expect_equivalent(data.set(x),matrix(1:10,nrow=5,ncol=2)))
# From cpt.var.Rd
# Example of a change in variance at 100 in simulated normal data
set.seed(1)
x=c(rnorm(100,0,1),rnorm(100,0,10))
test_that('var2',expect_equivalent(cpt.var(x,penalty="SIC",method="AMOC",class=FALSE),c(100,1)))
ans=cpt.var(x,penalty="Asymptotic",pen.value=0.01,method="AMOC")
test_that('var3',expect_equivalent(cpts(ans),100))
# Example of multiple changes in variance at 50,100,150 in simulated data
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,0,10),rnorm(50,0,5),rnorm(50,0,1))
out=cpt.var(x,penalty="Manual",pen.value="log(2*log(n))",method="BinSeg",test.stat="CSS",Q=5,
class=FALSE)
truth=list();truth$cps=matrix(c(99,53,150,50,140,3.156304,3.156304,3.156304,3.074743,1.254542),byrow=T,nrow=2)
truth$cpts=c(50,53,99,150,200);truth$op.cpts=4;truth$pen=2.360536
test_that('var4',expect_equal(out,truth,tolerance=0.00001))
# Example of using CROPS in the above example
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,0,10),rnorm(50,0,5),rnorm(50,0,1))
out=cpt.var(x,pen.value=c(log(length(x)),100*log(length(x))),penalty="CROPS",method="PELT")
truth=matrix(NA,ncol=7,nrow=7);truth[1:6,1]=50;truth[1:5,2]=c(77,rep(99,3),150);truth[1:4,3]=c(79,114,140,150)
truth[1:3,4]=c(99,133,150);truth[1:2,5]=c(114,150);truth[1,6]=133;truth[1,7]=150
test_that('var5',expect_equivalent(cpts.full(out),truth))
truth=c(5.298317,5.548538,6.149305,7.083099,26.592259,142.417161,145.146279)
test_that('var6',expect_equivalent(pen.value.full(out),truth))
# Example multiple datasets where the first row has multiple changes in variance and the second row
#has no change in variance
set.seed(10)
x=c(rnorm(50,0,1),rnorm(50,0,10),rnorm(50,0,5),rnorm(50,0,1))
y=rnorm(200,0,1)
z=rbind(x,y)
truth=list();truth[[1]]=c(50,100,149,200);truth[[2]]=200
test_that('var7',expect_equivalent(cpt.var(z,penalty="Asymptotic",pen.value=0.01,method="SegNeigh",Q=5,class=FALSE),truth))
ans=cpt.var(z,penalty="Asymptotic",pen.value=0.01,method="PELT")
test_that('var8',expect_equivalent(cpts(ans[[1]]),c(50,100,149)))
test_that('var9',expect_equivalent(cpts(ans[[2]]),numeric()))
# From cpt-class.Rd
x=new("cpt") # creates a new object with the cpt class defaults
test_that('class7',expect_is(x,"cpt"))
test_that('class8',expect_equivalent(cpts(x),numeric()))
cpts(x)<-c(10,50,100) # replaces the cpts slot from x with c(10,50,100)
test_that('class9',expect_equivalent(cpts(x),c(10,50,100)))
# Example of a change in variance at 100 in simulated normal data
set.seed(1)
x=c(rnorm(100,0,1),rnorm(100,0,10))
ans=cpt.var(x)
test_that('class10',expect_equivalent(logLik(ans),c(1003.2283241358,1012.438665)))
# From cpts.full.Rd
x=new("cpt.range") # new cpt.range object
test_that('class11',expect_is(x,"cpt.range"))
test_that('class12',expect_is(cpts.full(x),"matrix")) # retrieves the cpts.full slot from x
# From cpts.full-.Rd
x=new("cpt.range") # new cpt.range object
cpts.full(x)<-matrix(c(10,20,10,NA),nrow=2,byrow=TRUE)
test_that('class13',expect_equivalent(cpts.full(x),matrix(c(10,20,10,NA),nrow=2,byrow=TRUE) ))
# From cpts.Rd
x=new("cpt") # new cpt object
test_that('class14',expect_equivalent(cpts(x),numeric())) # retrieves the cpts slot from x
# From cpts.ts.Rd
x=new("cpt") # new cpt object
test_that('class15',expect_equivalent(cpts.ts(x),numeric()))
# From cpts-.Rd
x=new("cpt") # new cpt object
cpts(x)<-10 # replaces the vector of changepoint in object x with 10
test_that('class16',expect_equivalent(cpts(x),10))
# From cpttype.Rd
x=new("cpt") # new cpt object
test_that('class17',expect_equivalent(cpttype(x),"Not Set")) # retrieves the cpttype slot from x
# From cpttype-.Rd
x=new("cpt") # new cpt object
cpttype(x)<-"mean" # replaces the existing cpttype in object x with "mean"
test_that('class18',expect_equivalent(cpttype(x),'mean'))
# From data.set.Rd
x=new("cpt") # new cpt object
test_that('class19',expect_equivalent(data.set(x),ts()))
# From data.set.ts.Rd
x=new("cpt") # new cpt object
test_that('class20',expect_equivalent(data.set.ts(x),ts()))
# From data.set-.Rd
x=new("cpt") # new cpt object
data.set(x)<-c(1,2,3,4,5) # replaces the existing data.set slot in x with c(1,2,3,4,5)
test_that('class21',expect_equivalent(data.set(x),1:5))
# From distribution.Rd
x=new("cpt") # new cpt object
test_that('class22',expect_equivalent(distribution(x),character()))
# From distribution-.Rd
x=new("cpt") # new cpt object
distribution(x)<-"normal" # replaces the current distribution slot of x with "normal"
test_that('class23',expect_equivalent(distribution(x),"normal"))
# From likelihood.Rd
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,0,10),rnorm(50,0,5),rnorm(50,0,1))
out=cpt.var(x,penalty="Manual",pen.value="2*log(n)",method="BinSeg",Q=5)
test_that('logLik1',expect_equivalent(likelihood(out),c(925.8085, 957.5984)))
# From logLik-methods.Rd
set.seed(1)
x=c(rnorm(50,0,1),rnorm(50,0,10),rnorm(50,0,5),rnorm(50,0,1))
out=cpt.var(x,penalty="Manual",pen.value="2*log(n)",method="BinSeg",Q=5)
test_that('logLik1',expect_equivalent(logLik(out),c(925.8085, 957.5984)))
# From method.Rd
x=new("cpt") # new cpt object
test_that('class24',expect_equivalent(method(x),character()))
# From method-.Rd
x=new("cpt") # new cpt object
method(x)<-"mean" # replaces the existing method slot in x with "mean"
test_that('class25',expect_equivalent(method(x),"mean"))
# From minseglen.Rd
x=new("cpt") # new cpt object
test_that('class26',expect_equivalent(minseglen(x),numeric()))
# From minseglen-.Rd
x=new("cpt") # new cpt object
minseglen(x)<-5 # replaces the existing minseglen slot in x with 5
test_that('class27',expect_equivalent(minseglen(x),5))
# From ncpts.max.Rd
x=new("cpt") # new cpt object
test_that('class28',expect_equivalent(ncpts.max(x),numeric()))
# From ncpts.max-.Rd
x=new("cpt") # new cpt object
ncpts.max(x)<-10 # replaces the vector of changepoint in object x with 10
test_that('class29',expect_equivalent(ncpts.max(x),10))
# From ncpts.Rd
x=new("cpt") # new cpt object
test_that('class30',expect_equivalent(ncpts(x),0)) # returns the number of changepoints (i.e. length of the cpts slot in x minus 1)
# From nseg.Rd
x=new("cpt") # new cpt object
test_that('class31',expect_equivalent(nseg(x),1))
# From param.est.Rd
x=new("cpt") # new cpt object
test_that('class32',expect_equivalent(param.est(x),list()))
# From param.est-.Rd
x=new("cpt") # new cpt object
param.est(x)<-list(mean=0) # replaces the current param.est list in x with list(mean=0)
test_that('class33',expect_equivalent(param.est(x),list(mean=0)))
# From param.Rd
set.seed(1)
x=c(rnorm(100,0,1),rnorm(100,0,10))
ans=cpt.var(x,penalty="Asymptotic",pen.value=0.01,method="AMOC",param.estimates=FALSE)
ans=param(ans) # fills the param.est slot with the parameter estimes.
test_that('class34',expect_equivalent(param.est(ans),list(variance=c(0.7986945, 90.8356989))))
# From pen.type.Rd
x=new("cpt") # new cpt object
test_that('class35',expect_equivalent(pen.type(x),character()))
# From pen.type-.Rd
x=new("cpt") # new cpt object
pen.type(x)<-"SIC" # replaces the existing pen.type slot in x with "SIC"
test_that('class36',expect_equivalent(pen.type(x),"SIC"))
# From pen.value.full.Rd
x=new("cpt.range") # new cpt.range object
test_that('class37',expect_equivalent(pen.value.full(x),numeric()))
# From pen.value.full-.Rd
x=new("cpt.range") # new cpt.range object
pen.value.full(x)<-5 # replaces the existing pen.value.full slot in x with 5
test_that('class38',expect_equivalent(pen.value.full(x),5))
# From pen.value.Rd
x=new("cpt") # new cpt object
test_that('class39',expect_equivalent(pen.value(x),numeric()))
# From pen.value-.Rd
x=new("cpt") # new cpt object
pen.value(x)<-5 # replaces the existing pen.value slot in x with 5
test_that('class40',expect_equivalent(pen.value(x),5))
# From seglen.Rd
x=new("cpt") # new cpt object
test_that('class41',expect_equivalent(seg.len(x),numeric()))
# From test.stat.Rd
x=new("cpt") # new cpt object
test_that('class42',expect_equivalent(test.stat(x),character()))
# From test.stat-.Rd
x=new("cpt") # new cpt object
test.stat(x)<-"normal" # replaces the current test.stat slot of x with "normal"
test_that('class43',expect_equivalent(test.stat(x),"normal"))
|
a9e5ecdfa09ff2a08e071956d756e494af8c8054
|
510bc25ad2b6e67e4a3c13043cacd4424b75552e
|
/R/class_cnsVariable.r
|
2b6419d4ccce71402424442166855080365aeba3
|
[] |
no_license
|
orangeluc/energyRt
|
ff7423a2010d8edc3915034c396f079662ea4315
|
c72d1a528a95ef8fada215e0abef45d523383758
|
refs/heads/master
| 2020-04-24T06:04:06.819280
| 2019-02-20T13:26:26
| 2019-02-20T13:26:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,639
|
r
|
class_cnsVariable.r
|
#---------------------------------------------------------------------------------------------------------
# cnsVariable
#---------------------------------------------------------------------------------------------------------
setClass('cnsVariable',
representation(
parameter = "character",
set_name = "character",
#use_set = "list", # set that initialize by parent class
set = "list", # other set, creat by function create_set
ext_set = "list", # External set, that know use in constrains
defVal = "numeric",
value = "data.frame",
misc = "list"
# parameter= list() # For the future
),
prototype(
parameter = "",
set_name = NULL,
#use_set = list(),
set = list(),
ext_set = list(), # External set, that know use in constrains
defVal = 1,
value = NULL,#data.frame(value = numeric(), stringsAsFactors = FALSE),
# parameter= list() # For the future
#! Misc
misc = list(
)),
S3methods = TRUE
);
setMethod("initialize", "cnsVariable", function(.Object, ...) {
attr(.Object, 'GUID') <- 'a7c5f404-7107-405e-9aec-8684118f23ba'
.Object
})
# add set to constrain
add_set.cnsVariable <- function(vrb, ...) {
vrb@set[[length(cns@set) + 1]] <- create_set(...)
vrb
}
# Test
##print(create_cnsVariable('vTecInp'))
##technology <- c('TEC1', 'TEC2', 'TEC3')
### supply <- 'MINCOA'
### group <- as.character(1:3)
##comm <- c('COA', 'GAS')
### region <- 'RUS'
### year <- as.character(2005:2050)
### slice <- 'ANNUAL'
##level <- list(comm = comm, technology = technology)
##print(create_cnsVariable('vTecInp', set = list(create_set('technology', c('TEC1', 'TEC2')))))
##print(create_cnsVariable('vTecInp', set = list(create_set('technology', c('TEC1', 'TEC2')),
## create_set('comm', c('COA', 'GAS'))), defVal = 5))
##print(create_cnsVariable('vTecInp', ext_set = list(create_set('technology', c('TEC1', 'TEC2')),
## create_set('comm', c('COA', 'GAS'))), defVal = 5))
##
##vrb <- create_cnsVariable('vTecInp', set = list(create_set('technology', c('TEC1', 'TEC2')),
## create_set('comm', c('COA', 'GAS'))), defVal = 5)
##
##
##prepare(vrb, level)
# cns@name <- 'constr1'
# cns <- add_set(cns, 'group')
# cns <- add_set(cns, 'comm', alias = c('hgj'))
# cns <- add_set(cns, 'region', set = c('RUS'))
# cns <- add_set(cns, 'technology', set = c('TEC1', 'TEC2'))
# cns <- create_data_frame(cns)
#
|
3f6f988142e00dfb6d80cb82a0699804a0ab1147
|
a06da6adbad285ae75e75666323e33bdd0f02de4
|
/old/8_delele_tmp_files.r
|
92c365d30b4cb3b5cf30915a83d8d356cb78a835
|
[] |
no_license
|
lkaiser7/IS_V2
|
5c3ab012576a935444b854300703ba382e80f942
|
ef6d498f644db615dee11af580ea62a96b54bf0b
|
refs/heads/master
| 2023-06-16T23:10:38.880561
| 2023-05-30T16:41:35
| 2023-05-30T16:41:35
| 37,231,310
| 0
| 1
| null | 2022-10-27T18:02:47
| 2015-06-11T01:04:33
|
R
|
UTF-8
|
R
| false
| false
| 562
|
r
|
8_delele_tmp_files.r
|
sp_nm = all_sp_nm[1]
for (sp_nm in all_sp_nm){
sp_nm = as.character(sp_nm)
sp_dir = paste0(str_replace_all(sp_nm,"_", "."), "/")
temp_sp_files_to_delete<-paste0(project_path, sp_dir, "delete_temp_sp_files/", "*")
unlink(temp_sp_files_to_delete, recursive = T, force = T) #delete previous frames
# Loc <- "mydir"
# system(paste0("rm -r ", temp_sp_files_to_delete))
}
temp_loc_to_delete = paste0(project_path, "temp/", "*")
unlink(temp_loc_to_delete, recursive = T, force = T) # delete previous frames
# system(paste0("rm -r ", temp_loc_to_delete))
|
28465d7a167b1654f90857a4ebdc6ebeebb88e5e
|
b58aa88001f89d3e1576994ac209f0edf65db444
|
/CoD/main-madelon.R
|
01373d6bec29c385dd5cbfb2538aa3c5d4878acd
|
[] |
no_license
|
lesasi/CoD-gradient-descent
|
0f3e56c84bdd4a4de107546ce30b04a9d5d8da44
|
d149be700273c4448f76ce756b895b8cb9df4efc
|
refs/heads/master
| 2022-04-18T00:28:52.713322
| 2020-04-19T11:58:30
| 2020-04-19T11:58:30
| 256,986,266
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,458
|
r
|
main-madelon.R
|
#Set current working directory
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
#Include files
source("FLD.r")
source("SVD.r")
source("PCA.r")
source("PLS.r")
source("classifySVM.r")
#Include libraries
library(e1071)
library(pls)
library(MASS)
library(caTools)
library(kernlab)
x<-read.table("madelon_train.data",sep=" ")[,-501]
y<-read.table("madelon_train.labels",sep=" ")
dataset=cbind(y,x)
#Original
org<-classifySVM(dataset)
#Applying SVD
svd_dataset<-svd_reduce(x,y,20)
svd<-classifySVM(svd_dataset)
#Applying PCA
pca_dataset<-pca_reduce(x,y,40)
pca<-classifySVM(pca_dataset)
#Applying FLD
fld_dataset<-fld_reduce(pca_dataset[,-1],pca_dataset[,1],15)
fld<-classifySVM(fld_dataset)
#Applying PLS
pls_dataset<-pls_reduce(pca_dataset,15)
pls<-classifySVM(pls_dataset)
cat("Accuracy for original dataset: ", org$acc,"\n")
cat("Confusion matrix for original dataset\n")
print(org$cm)
cat("Accuracy for SVD reduced dataset: ", svd$acc,"\n")
cat("Confusion matrix for SVD reduced dataset\n")
print(svd$cm)
cat("Accuracy for PCA reduced dataset: ", pca$acc,"\n")
cat("Confusion matrix for PCA reduced dataset\n")
print(pca$cm)
cat("Accuracy for PCA+FLD reduced dataset: ", fld$acc,"\n")
cat("Confusion matrix for PCA+FLD reduced dataset\n")
print(fld$cm)
cat("Accuracy for PCA+PLS reduced dataset: ", pls$acc,"\n")
cat("Confusion matrix for PCA+PLS reduced dataset\n")
print(pls$cm)
|
67546b8df9905c108d546c88d7ed5a31017cc2fa
|
5f89a48ae87264a697476717b543215e1ef496aa
|
/scripts/kmeans.R
|
9b3199ac349e3b653b6277fa12a8a6acf8846e64
|
[] |
no_license
|
miloknowles/17.835-finalproject
|
3c9c2d97efa306f9d72401a55339ac4e24aff29e
|
885f80345d2ff9d6903b751f5acf7b53b86bbd93
|
refs/heads/master
| 2020-03-08T23:31:17.939211
| 2018-05-19T00:16:57
| 2018-05-19T00:16:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,102
|
r
|
kmeans.R
|
# AJ's work on kmeans
# Get XLSX package.
# install.packages('openxlsx') # Uncomment this to install the package.
library('openxlsx')
library('rworldmap')
setwd('C:/Users/aroot/Desktop/Classes/17.835/Project/gitstuff/17.835-finalproject/images/kmeans/')
# MAKE SURE CURRENT WORKING DIRECTORY IS SET TO THIS FILE!
PROCESSED_DATA_DIR = 'C:/Users/aroot/Desktop/Classes/17.835/Project/gitstuff/17.835-finalproject/data/processed/'
ORIGINAL_DATA_DIR = 'C:/Users/aroot/Desktop/Classes/17.835/Project/gitstuff/17.835-finalproject/data/original/'
FINAL_DATA_DIR = 'C:/Users/aroot/Desktop/Classes/17.835/Project/gitstuff/17.835-finalproject/data/final'
processed.codes = read.csv(file.path(PROCESSED_DATA_DIR, 'country_codes.csv'))
processed.terror = read.csv(file.path(PROCESSED_DATA_DIR, 'terrorism_incidents.csv'))
processed.wdi = read.csv(file.path(PROCESSED_DATA_DIR, 'wdi.csv'))
processed.wgi = read.csv(file.path(PROCESSED_DATA_DIR, 'wgi.csv'))
# data.full = read.csv(file.path(FINAL_DATA_DIR, 'data_matched_all.csv'))
data.copy <- read.csv(file.path(FINAL_DATA_DIR, 'data_matched_all.csv'))
#remove columns with more than 100 NAs
# removed_columns <- function(data.stuff){
#
# fourty_percent <- .40 * nrow(data.stuff)
#
# counter <- 0
# for(i in names(data.stuff)){
#
# is.val <- sum(is.na(data.stuff[[i]]))
# if (is.val > fourty_percent){
# #print("removed")
# #print(i)
# data.stuff[[i]] <- NULL
# counter <- (counter + 1)
# }
# else
# {
# #print("saved")
# #print(i)
# }
# }
# return(counter)
# }
# library(cluster)
# library(HSAUR)
#
# data(pottery)
# km <- kmeans(pottery,4)
# dissE <- daisy(pottery)
# dE2 <- dissE^2
# sk2 <- silhouette(km$cl, dE2)
# plot(sk2)
# data(iris)
# dat <- iris[, -5] # without known classification
# # Kmeans clustre analysis
# clus <- kmeans(dat, centers=4)
# plotcluster(dat, clus$cluster)
library(cluster)
library(fpc)
data.full = read.csv(file.path(FINAL_DATA_DIR, 'data_matched_all.csv'))
data.copy <- as.data.frame(data.full)
years = c(1996, 1998, 2000, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016)
#
# for (year_val in years) {
# dir.create(toString(year_val))
# }
for (year_val in years) {
temp.data <- subset(data.copy, data.copy$year == year_val)
#print(removed_columns(temp.data))
twenty_percent <- .20 * nrow(temp.data)
counter <- 0
for(i in names(temp.data)){
is.val <- sum(is.na(temp.data[[i]]))
if (is.val > twenty_percent){
#print("removed")
#print(i)
temp.data[[i]] <- NULL
counter <- (counter + 1)
}
}
print(counter)
full.year <- na.omit(temp.data)
altered.year <- as.data.frame(full.year)
row.names(altered.year) <- altered.year$code
altered.index <- altered.year[, c("stability_index_numsrc", "stability_index_rank", "stability_index_lower", "stability_index_upper", "stability_index_estimate",
"stability_index_stderr")]
maintain.stab <- altered.year$stability_index_estimate
remove.names <- c("stability_index_numsrc", "stability_index_rank", "stability_index_lower", "stability_index_upper", "stability_index_estimate",
"stability_index_stderr", "X", "country", "year", "code")
for(i in remove.names){
if ( i %in% names(altered.year))
{
altered.year[[i]] <- NULL
}
}
# Kmeans clustre analysis
scaled.year <- scale(altered.year)
scaled.index <- scale(altered.index)
# for(i in 1:20) {
# jpeg(paste0(year_val,"/", "kmeans_", year_val, "_",i,".jpg"))
# clus <- kmeans(scaled.year, centers=i)
# clusplot(scaled.year, clus$cluster, color=TRUE, shade=TRUE,
# labels=2, lines=0)
# dev.off()
#
# jpeg(paste0(year_val, "/", "kmeans_", year_val, "_",i,"_index.jpg"))
# clus_ind <- kmeans(scaled.index, centers=i)
# clusplot(scaled.index, clus_ind$cluster, color=TRUE, shade=TRUE,
# labels=2, lines=0)
# dev.off()
# }
for(i in 2:10) {
# jpeg(paste0(year_val,"/", "kmaps_", year_val, "_",i,".jpg"))
clus <- kmeans(scaled.year, centers=i)
world_data = as.data.frame(clus$cluster)
world_data$code = row.names(world_data)
heatcolors = heat.colors(i, alpha = 1)
revheatcolors = rev(heatcolors)
# sPDF <- joinCountryData2Map(world_data, joinCode = "NAME", nameJoinColumn = "code", verbose=TRUE)
#
# par(mai=c(0,0,0.2,0),xaxs="i",yaxs="i")
#
# map.params <- mapCountryData(sPDF, nameColumnToPlot='clus$cluster', addLegend='TRUE', catMethod = "pretty", missingCountryCol = "grey",
# mapTitle=paste0("K Means Clustering - Year: ", year_val, " Clusters: ", i), oceanCol="lightblue", colourPalette =heatcolors)
#
#
#
# dev.off()
jpeg(paste0(year_val,"/", "corre_", year_val, "_",i,".jpg"))
plot(world_data$`clus$cluster`, maintain.stab)
dev.off()
}
}
|
1bda197849c3d715ca0850c8c60355e7d74474db
|
320ddefc84d992475db8b83befde46e2b780314f
|
/man/stan.error.Rd
|
d5ec6e25ee38075f50b59f53a9845490a93cac8e
|
[] |
no_license
|
cran/asbio
|
bca146e402058cd67ff5fc42423cb0c0544f942b
|
3cb01b7cb1a8dec60a5f809f91bc460a6566954d
|
refs/heads/master
| 2023-08-31T23:43:08.304864
| 2023-08-20T02:22:36
| 2023-08-20T04:30:48
| 17,694,492
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 516
|
rd
|
stan.error.Rd
|
\name{stan.error}
\alias{stan.error}
\alias{stan.error.sq}
\title{Variance and standard error estimators for the sampling distribution of the sample mean}
\description{Estimator for the variance for the sampling distribution of the sample mean, i.e. \eqn{S^2/n}, and the standard deviation of the sampling distribution, i.e. \eqn{S/\sqrt(n)}.
}
\usage{
stan.error(x)
stan.error.sq(x)
}
\arguments{
\item{x}{A vector of quantitative data.}}
\author{Ken Aho}
\seealso{\code{\link{sd}}}
\keyword{univar}
|
925376c5f677ccf06c9c5d4ad6ef902d418664c7
|
154b7c69214c2005d6746f832ca317a8dde85ce4
|
/Rlabkey/R/labkey.selectRows.R
|
c3c124a61f02365460dbd93413bbcfdba49ac75a
|
[
"Apache-2.0"
] |
permissive
|
SRenan/Rlabkey
|
22e6f9dd765c47b66d27146afe92dfaf05d2a2fa
|
470820f4d5414844e2da480abb3dae62821f354f
|
refs/heads/master
| 2020-06-02T20:37:01.828711
| 2013-09-24T17:27:22
| 2013-09-24T17:27:22
| 13,071,617
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,226
|
r
|
labkey.selectRows.R
|
##
# Copyright (c) 2008-2013 Fred Hutchinson Cancer Research Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
labkey.selectRows <- function(baseUrl, folderPath, schemaName, queryName, viewName=NULL, colSelect=NULL,
maxRows=NULL, rowOffset=NULL, colSort=NULL, colFilter=NULL, showHidden=FALSE, colNameOpt='caption', containerFilter=NULL)
{
## Empty string/NULL checking
if(is.null(viewName)==FALSE) {char <- nchar(viewName); if(char<1){viewName<-NULL}}
if(is.null(colSelect)==FALSE) {char <- nchar(colSelect[1]); if(char<1){colSelect<-NULL}}
if(is.null(maxRows)==FALSE) {char <- nchar(maxRows); if(char<1){maxRows<-NULL}}
if(is.null(rowOffset)==FALSE) {char <- nchar(rowOffset); if(char<1){rowOffset<-NULL}}
if(is.null(colSort)==FALSE) {char <- nchar(colSort); if(char<1){colSort<-NULL}}
if(is.null(colFilter)==FALSE) {char <- nchar(colFilter[1]); if(char<1){colFilter<-NULL}}
if(is.null(showHidden)==FALSE) {char <- nchar(showHidden); if(char<1){showHidden<-FALSE}}
if(is.null(containerFilter)==FALSE) {char <- nchar(containerFilter[1]); if(char<1){containerFilter<-NULL}}
## Error if any of baseUrl, folderPath, schemName or queryName are missing
if(exists("baseUrl")==FALSE || exists("folderPath")==FALSE || exists("schemaName")==FALSE || exists("queryName")==FALSE)
stop (paste("A value must be specified for each of baseUrl, folderPath, schemaName and queryName."))
## URL encoding of schema, query, view, etc. (if not already encoded)
if(schemaName==curlUnescape(schemaName)) {schemaName <- curlEscape(schemaName)}
if(queryName==curlUnescape(queryName)) {queryName <- curlEscape(queryName)}
if(folderPath==URLdecode(folderPath)) {folderPath <- URLencode(folderPath)}
if(is.null(viewName)==FALSE) {if(viewName==curlUnescape(viewName)) viewName <- curlEscape(viewName)}
if(is.null(containerFilter)==FALSE) {if(containerFilter==curlUnescape(containerFilter)) containerFilter<- curlEscape(containerFilter)}
if(is.null(colSort)==FALSE) {if(colSort==curlUnescape(colSort)) colSort <- curlEscape(colSort)}
## Format colSelect
if(is.null(colSelect)==FALSE)
{ lencolSel <- length(colSelect)
holder <- NULL
for(i in 1:length(colSelect)) holder <-paste(holder,curlEscape(colSelect[i]),",",sep="")
colSelect <- substr(holder, 1, nchar(holder)-1)}
## Formatting
baseUrl <- gsub("[\\]", "/", baseUrl)
folderPath <- gsub("[\\]", "/", folderPath)
if(substr(baseUrl, nchar(baseUrl), nchar(baseUrl))!="/"){baseUrl <- paste(baseUrl,"/",sep="")}
if(substr(folderPath, nchar(folderPath), nchar(folderPath))!="/"){folderPath <- paste(folderPath,"/",sep="")}
if(substr(folderPath, 1, 1)!="/"){folderPath <- paste("/",folderPath,sep="")}
## Construct url
myurl <- paste(baseUrl,"query",folderPath,"selectRows.api?schemaName=",schemaName,"&query.queryName=",queryName,sep="","&apiVersion=8.3")
if(is.null(viewName)==FALSE) {myurl <- paste(myurl,"&query.viewName=",viewName,sep="")}
if(is.null(colSelect)==FALSE) {myurl <- paste(myurl,"&query.columns=",colSelect,sep="")}
if(is.null(maxRows)==FALSE) {myurl <- paste(myurl,"&query.maxRows=",maxRows,sep="")}
if(is.null(maxRows)==TRUE) {myurl <- paste(myurl,"&query.showRows=all",sep="")}
if(is.null(rowOffset)==FALSE) {myurl <- paste(myurl,"&query.offset=",rowOffset,sep="")}
if(is.null(colSort)==FALSE) {myurl <- paste(myurl,"&query.sort=",colSort,sep="")}
if(is.null(colFilter)==FALSE) {for(j in 1:length(colFilter)) myurl <- paste(myurl,"&query.",colFilter[j],sep="")}
if(is.null(containerFilter)==FALSE) {myurl <- paste(myurl,"&containerFilter=",containerFilter,sep="")}
## Set options
reader <- basicTextGatherer()
header <- basicTextGatherer()
myopts <- curlOptions(writefunction=reader$update, headerfunction=header$update, netrc=1, ssl.verifyhost=FALSE, ssl.verifypeer=FALSE, followlocation=TRUE)
## Support user-settable options for debuggin and setting proxies etc
if(exists(".lksession"))
{
userOpt <- .lksession[["curlOptions"]]
if (!is.null(userOpt))
{myopts<- curlOptions(.opts=c(myopts, userOpt))}
}
## Http get
handle <- getCurlHandle()
clist <- ifcookie()
if(clist$Cvalue==1)
{
mydata <- getURI(myurl, .opts=myopts, cookie=paste(clist$Cname,"=",clist$Ccont,sep=""))
}
else
{
myopts <-curlOptions(.opts=c(myopts, httpauth=1L))
mydata <- getURI(myurl, .opts=myopts, curl=handle)
}
## Error checking, decode data and return data frame
h <- parseHeader(header$value())
status <- getCurlInfo(handle)$response.code
message <- h$statusMessage
if(status==500)
{decode <- fromJSON2(mydata); message <- decode$exception; stop(paste("HTTP request was unsuccessful. Status code = ",status,", Error message = ",message,sep=""))}
if(status>=400)
{
contTypes <- which(names(h)=='Content-Type')
if(length(contTypes)>0 && (tolower(h[contTypes[1]])=="application/json;charset=utf-8" || tolower(h[contTypes[2]])=="application/json;charset=utf-8"))
{
decode <- fromJSON2(mydata);
message<-decode$exception;
stop (paste("HTTP request was unsuccessful. Status code = ",status,", Error message = ",message,sep=""))
} else
{
stop(paste("HTTP request was unsuccessful. Status code = ",status,", Error message = ",message,sep=""))
}
}
newdata <- makeDF(mydata, colSelect, showHidden, colNameOpt)
## Check for less columns returned than requested
if(is.null(colSelect)==FALSE){if(ncol(newdata)<lencolSel)warning("Fewer columns are returned than were requested in the colSelect variable. The column names may be invalid. Be sure to use the column name and not the column caption. See the documentation for further explaination.")}
return(newdata)
}
|
ea5c177a79153434e27bbedd1b75c889476257cc
|
be5dacb84d9a27c35e9faae863e1603b4946709b
|
/AltruisticLanguage/regressionFinal.R
|
fd57aa46cd012bf52c87e25e99168bc9214841af
|
[] |
no_license
|
ghllee/CS6742
|
7e878e510264c91b00ef0c0adb6109b820bcf13c
|
f0815e907dfa5ea6126f94e94a8081cbf30ae511
|
refs/heads/master
| 2021-01-14T11:00:27.540115
| 2015-01-15T06:37:09
| 2015-01-15T06:37:09
| 28,832,687
| 0
| 0
| null | 2015-01-05T21:30:40
| 2015-01-05T21:30:40
| null |
UTF-8
|
R
| false
| false
| 4,360
|
r
|
regressionFinal.R
|
setwd("/home/jack/Desktop/NLP/CS6742/AltruisticLanguage")
library('Matrix')
library('glmnet')
library('foreach')
library('doParallel')
registerDoParallel()
rsort <- function(x) {
sort(x, decreasing = TRUE)
}
setwd("regressionData")
data <- readMM("all.mtx")
headers <- read.csv("all.csv")
setwd("..")
colnames(data) <- colnames(headers)
colnames(dataControl) <- colnames(headersControl)
target <- data[,c("numBackers", "numAltruistic")]
target <- target[,2]/target[,1]
target[is.nan(target)] <- 0
target <- target > .1
control <- cv.glmnet(data[,c(-grep("^T|^L", colnames(data)), -which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess")))], target,
family = "binomial",
type.measure = "class",
alpha = 1, parallel = T)
langControl <- cv.glmnet(data[,c(-grep("^T", colnames(data)), -which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess")))], target,
family = "binomial",
type.measure = "class",
alpha = 1, parallel = T)
total <- cv.glmnet(data[,-which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess"))], target,
family = "binomial",
type.measure = "class",
alpha = 1, parallel = T)
######
train <- 40000
holdoutControl <- cv.glmnet(data[1:train,c(-grep("^T|^L", colnames(data)), -which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess")))],
target[1:train], family = "binomial", type.measure = "class", alpha = 1, parallel = T)
holdoutLangControl <- cv.glmnet(data[1:train,c(-grep("^T", colnames(data)), -which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess")))],
target[1:train], family = "binomial", type.measure = "class", alpha = 1, parallel = T)
holdoutTotal <- cv.glmnet(data[1:train,-which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess"))],
target[1:train], family = "binomial", type.measure = "class", alpha = 1, parallel = T)
predControl <- predict(holdoutControl,
data[(train+1):nrow(data),c(-grep("^T|^L", colnames(data)), -which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess")))],
type='class')
predLangControl <- predict(holdoutLangControl,
data[(train+1):nrow(data),
c(-grep("^T", colnames(data)), -which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess")))],
type='class')
predTotal <- predict(holdoutTotal,
data[(train+1):nrow(data), -which(colnames(data) %in% c("numBackers","numAltruistic", "Osuccess"))],
type='class')
topBottomK <- function(cvReg, k) {
bestlambda<-cvReg$lambda.min
mse.min <- cvReg$cvm[cvReg$lambda == bestlambda]
print(1-mse.min)
myCoefs <- coef(cvReg, s=bestlambda)
myCoefMatrix <- as.matrix(myCoefs)
print("BOTTOM")
print(apply(myCoefMatrix, 2, sort)[1:k,])
print("TOP")
print(apply(myCoefMatrix, 2, rsort)[1:k,])
}
checkModel <- function(pred) {
print("Accuracy")
print(sum(pred == target[(train+1):nrow(data)])/(length(target[(train+1):nrow(data)])))
posCor <- sum(pred[target[(train+1):nrow(data)]==1] == target[(train+1):nrow(data)][target[(train+1):nrow(data)]==1])
negCor <- sum(pred[target[(train+1):nrow(data)]==0] == target[(train+1):nrow(data)][target[(train+1):nrow(data)]==0])
posFail <- sum(pred[target[(train+1):nrow(data)]==1] != target[(train+1):nrow(data)][target[(train+1):nrow(data)]==1])
negFail <- sum(pred[target[(train+1):nrow(data)]==0] != target[(train+1):nrow(data)][target[(train+1):nrow(data)]==0])
perc <- posCor/(posCor + negFail)
rec <- posCor/(posCor + posFail)
f <- 2 * perc * rec / (perc + rec)
print("Precision")
print(perc)
print("Recall")
print(rec)
print("F-measure")
print(f)
}
orderedCoefs <- function(cvReg) {
bestlambda<-cvReg$lambda.min
mse.min <- cvReg$cvm[cvReg$lambda == bestlambda]
print(1-mse.min)
myCoefs <- coef(cvReg, s=bestlambda)
myCoefMatrix <- as.matrix(myCoefs)
return(apply(myCoefMatrix, 2, sort))
}
checkModel(predControl)
checkModel(predLangControl)
checkModel(predTotal)
topK <- 50
topBottomK(total, topK)
write.table(orderedCoefs(total), file = "orderedCoefs")
|
ac8a297c2c16ff69eaef810715b4ff9afb5b4983
|
79f5edebf760abb8ccc4e58f246586ad71f5eb6c
|
/man/remove_question_type.Rd
|
712acca608f56575d086683c1e56a8ce7c46b71c
|
[
"MIT"
] |
permissive
|
rwash/surveys
|
3b09bd12633ed5322b35224cc79d90cc5b2443d7
|
4cded70f3aabd097f2f1e9ccb75a1c9d1639f40f
|
refs/heads/master
| 2023-04-29T05:50:11.471493
| 2023-04-20T13:41:56
| 2023-04-20T13:41:56
| 31,321,921
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 379
|
rd
|
remove_question_type.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/detector.R
\name{remove_question_type}
\alias{remove_question_type}
\title{Remove a question type from the auto-detector}
\usage{
remove_question_type(name)
}
\arguments{
\item{name}{Character string name of the question type to remove}
}
\description{
Remove a question type from the auto-detector
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.