blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3eeeb454d07771d7fd845e01822b44152f05ff0
|
51ce3d248c7e6dc9d0a12bb2cd7e8d8c0dd8b1af
|
/h2o-r/tests/testdir_misc/runit_quantile.R
|
3f8876c0a82f1c4453d7b1b7f61047ee15ae7728
|
[
"Apache-2.0"
] |
permissive
|
KR8T3R/h2o-3
|
019336a122904c323cdb0043529ea681a0d90513
|
9adc6e35fb2771dbb86b7b147a840ef09be71ff2
|
refs/heads/master
| 2020-04-10T22:18:32.557475
| 2018-12-11T10:23:18
| 2018-12-11T10:38:33
| 161,320,436
| 2
| 0
|
Apache-2.0
| 2018-12-11T10:58:20
| 2018-12-11T10:58:20
| null |
UTF-8
|
R
| false
| false
| 1,564
|
r
|
runit_quantile.R
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
#This tests quantile and weighted quantile on synthetic data by comparing with R
test.quantile <- function(conn){
# set random seed to generate random dataset
set.seed(1234)
N = 1000
x = rgamma(N, shape=0.067, scale = 0.008)
aa = as.h2o(x)
r_q = quantile(x, probs = c(0.1, 0.5, 1, 2, 5, 10, 50,88.83,99,90)/100,na.rm=T)
h_q = h2o.quantile(aa,probs = c(0.1, 0.5, 1, 2, 5, 10, 50,88.83,99,90 )/100,na.rm=T)
expect_equal(r_q,h_q )
x = rlnorm(N,meanlog = 12,sdlog = 132)
aa = as.h2o(x)
r_q = quantile(x, probs = seq(0,1,.05),na.rm=T)
h_q = h2o.quantile(aa,probs = seq(0,1,.05),na.rm=T)
expect_equal(r_q,h_q )
x = rexp(N, rate = 12.3)
ss = sample(1:N,size = N/10,replace = F)
x[ss]=NA
aa = as.h2o(x)
r_q = quantile(x, probs = seq(0,1,.05),na.rm=T)
h_q = h2o.quantile(aa,probs = seq(0,1,.05),na.rm=T)
expect_equal(r_q,h_q )
#weighted quantiles
#library(Hmisc)
set.seed(1)
N=1e5
x = runif(N)
aa = as.h2o(x)
wts = sample(1:6, N, TRUE)
aa$h_wts = as.h2o(wts)
#r_q = wtd.quantile(x, wts, probs = seq(0,1,.05))
r_q=c(3.895489e-06,4.863379e-02,9.789691e-02,1.470487e-01,1.977443e-01,2.473365e-01,2.975013e-01,3.482667e-01,3.980460e-01,4.483631e-01,4.990024e-01,5.489128e-01,5.986945e-01,6.486255e-01,6.991498e-01,7.500031e-01,8.001472e-01,8.504057e-01,8.996923e-01,9.498159e-01,9.999471e-01)
h_q = h2o.quantile(aa,probs = seq(0,1,.05),weights_column = "h_wts")
expect_true(max(abs((r_q-h_q)/r_q)) < 1e-5)
}
doTest("Test quantile",test.quantile )
|
a2fb3ca032b1120889061f1c9b05252a33d2d2e8
|
f5dd1b06166bae7b674a4784d258c50784287359
|
/plot1.R
|
ec2b5e5ffac31c44ca935a36e5b8f1eebe7b9c05
|
[] |
no_license
|
DawitHabtemariam/ExData_Plotting1
|
bfd4d65868ab0d3d9554838c5bee9da9b5f2cbe8
|
9ecb7a88a656cb5e146857df083b2bd7b11072b8
|
refs/heads/master
| 2021-01-09T05:41:00.031469
| 2014-08-06T15:06:45
| 2014-08-06T15:06:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 514
|
r
|
plot1.R
|
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","power.csv")
unzip("power.csv")
power<-read.table("household_power_consumption.txt",sep=";",header=T,stringsAsFactors=F)
data<-subset(power,Date=="1/2/2007" | Date=="2/2/2007")
data$Date <- strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S")
hist(as.numeric(data$Global_active_power),col="red",xlab="Global Active Power (in kilowatts)", main="Global Active Power")
dev.copy(png,"plot1.png")
dev.off()
|
84d7d0097317d98c58a9e58de824a2944afe2bfb
|
0e7cff005b3300f2e8bbfd1beabedf2ffc4e276c
|
/R/cat.R
|
a8964cb0b26068e0bf7ce8324723f67d136d8d9e
|
[] |
no_license
|
gui11aume/vtrackR
|
afdda1958d7c88db87e707151410d365ee49c9c6
|
0d33c243708396cc52927a38e0195f1e402d153d
|
refs/heads/master
| 2020-05-01T00:59:16.834568
| 2015-03-02T22:30:17
| 2015-03-02T22:34:10
| 2,506,372
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
cat.R
|
cat <- function(x, file = "", sep = " ", fill = FALSE, labels = NULL,
append = FALSE, ...) {
# Extra paramters are passed to 'vheader(...)'.
isroot <- Sys.info()[["user"]] == "root";
if (append || file == "" || isroot) {
# Just an append, a screen display, or root user:
# call 'base::write()'.
base::cat(x, file=file, sep=sep, fill=fill, labels=labels,
append=append);
}
else {
# Otherwise there should be a vheader.
if (is.null(attr(x, "vtag"))) {
# Oops... Forgot to vtag the variable?
warning('no vtag, writing session vheader');
attr(x, "vtag") <- vsessionInfo();
attr(x, "vtag")[["self"]][["self SHA1"]] <- SHA1(x);
}
# Now 'x' has a vtag, format a vheader and write.
base::cat(vheader(x, ...), file=file, sep=sep, fill=fill,
labels=labels, append=append);
# And finally write 'x'.
base::cat(x, file=file, sep=sep, fill=fill,
labels=labels, append=TRUE);
}
}
|
06c7e511c9014333eb3b4ef6d96076e41361bacb
|
bba58777bb8370709c297c5e400eab6601df92ff
|
/BasicFunctions.R
|
dfa447e4f7d08b84e647fc5e9ad4d024e7ea6621
|
[] |
no_license
|
srivasrrahul/StatisticsFunction
|
2e2661672dec5659824f8ec6521464af8b1595d5
|
b36235bf793fb2bda5dd9e7e577854f5d124de3b
|
refs/heads/master
| 2021-01-01T06:17:20.344612
| 2015-05-10T06:44:41
| 2015-05-10T06:44:41
| 35,360,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,635
|
r
|
BasicFunctions.R
|
#Statistics basic tools
tsquare <- function(x,y) {
m1 <- mean(x)
m2 <- mean(y)
s1 <- sd(x)
s2 <- sd(y)
numerator <- (m1-m2)
d1 <- sqrt((length(x)*s1*s1 + length(y)*s2*s2)/(length(x)+ length(y)-2))
d2 <- sqrt((length(x) + length(y))/(length(x)*length(y)))
#print(d1*d2)
return (numerator/(d1*d2))
}
findDf <- function(x,y) {
return (length(x)-1 + length(y)-1)
}
sumOfSquareDeviation <- function(x) {
m1 <- mean(x)
s1 <- sd(x)
data <- 0
for (item in x) {
diff <- item - m1
data <- data + (diff*diff)
}
return (data)
}
sumOfSuare <- function(x) {
sum <- 0
for (item in x) {
sum <- sum + (item *item)
}
return (sum)
}
overallMean <- function(lst) {
len <- length(lst)
sum <- 0
count <- 0
for (i in 1:len) {
for (item in lst[[i]]) {
sum <- sum + item
count <- count + 1
}
}
return (sum / count)
}
sumOfSquaresBetween <- function(lst) {
len <- length(lst)
om <- overallMean(lst)
ssb <- 0
for (i in 1:len) {
l <- lst[[i]]
m <- mean(l)
diff <- m - om
ssb <- ssb + length(l)*(diff * diff)
}
return (ssb)
}
sumLst <- function(lst) {
sumLsts <- c()
for (arr in lst) {
sumLst <- c(sumLsts,sum(arr))
}
return (sumLsts)
}
meanLst <- function(lst) {
meanVec <- c()
for (arr in lst) {
meanVec <- c(meanVec,mean(arr))
}
return (meanVec)
}
squareLst <- function(lst) {
squareVec <- c()
for (arr in lst) {
s <- 0
for (item in arr) {
s <- s + (item*item)
}
squareVec <- c(squareVec,s)
}
return (squareVec)
}
overallCount <- function(lst) {
items <- 0
for (arr in lst) {
for (item in arr) {
items <- items + 1
}
}
#print(items)
return (items)
}
sstInternal <- function(overallMean,meanArr,meanSquareArr,itemsPerGroup) {
s <- 0
l <- length(meanSquareArr)
sq <- sum(meanSquareArr)
return (sq - itemsPerGroup*(length(meanArr)*(overallMean*overallMean)))
}
sst <- function(lst) {
#sum(squares) - N*(overallMean*overallMean)
squares <- squareLst(lst)
N <- overallCount(lst)
om <- overallMean(lst)
s <- 0
for (sq in squares) {
s <- s + sq
}
#print(s)
return (s - N*(om*om))
}
ssbInternal <- function(overallMean,meanArr,meanSquareArr,itemsPerGroup) {
s <- 0
l <- length(meanArr)
for (i in 1:l) {
diff <- overallMean - meanArr[i]
s <- s + itemsPerGroup * (diff*diff)
}
return (s)
}
ssb <- function(lst) {
om <- overallMean(lst)
s <- 0
for (arr in lst) {
diff <- om - mean(arr)
s <- s + length(arr) * (diff*diff)
}
return (s)
}
ssw <- function(lst) {
return (sst(lst) - ssb(lst))
}
dfw <- function(lst) {
return (overallCount(lst) - length(lst))
}
dfb <- function(lst) {
return (length(lst)-1)
}
msw <- function(lst) {
return (ssw(lst)/dfw(lst))
}
msb <- function(lst) {
return (ssb(lst)/dfb(lst))
}
fstats <- function(lst) {
return (msb(lst)/msw(lst))
}
scalarProduct <- function(x,y) {
}
basicRegressionModel <- function(xAxis,yAxis) {
n <- length(xAxis)
scalarProduct <- xAxis * yAxis
xSquare <- xAxis * xAxis
m <- mean(xAxis)
numerator <- (n*sum(scalarProduct)) - (sum(xAxis)*sum(yAxis))
denominator <- (n*sum(xSquare)) - (sum(xAxis) * sum(xAxis))
return (numerator/denominator)
}
alphaStat <- function(xAxis,yAxis) {
regression <- basicRegressionModel(xAxis,yAxis)
alpha <- mean(y) - regression*mean(x)
return (alpha)
}
smallSigmaForRegression <- function(xAxis,yAxis) {
meanY <- mean(yAxis)
predictValues <- c()
alphaS <- alphaStat(xAxis,yAxis)
regressionCoeff <- basicRegressionModel(xAxis,yAxis)
for (x in xAxis) {
predictValue <- alphaS + regressionCoeff * x
predictValues <- c(predictValues,predictValue)
}
s <- 0
diffValues <- predictValues - yAxis
s <- sum(diffValues * diffValues)
print("gello")
print(s)
numerator <- sqrt(s / (length(yAxis)-2))
meanX <- mean(xAxis)
s <- 0
for (x in xAxis) {
s <- s + (meanX-x)*(meanX-x)
}
print(s)
denominator <- sqrt(s)
return (numerator/denominator)
}
significanceValueForRegression <- function(xAxis,yAxis) {
return (basicRegressionModel(xAxis,yAxis)/(smallSigmaForRegression(xAxis,yAxis)))
}
|
fa4f3f441e6400a511807a0f534d6d19f6818646
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/europepmc/examples/epmc_profile.Rd.R
|
37d4221a17964f4a9b83df91a644c1681cd19e73
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 345
|
r
|
epmc_profile.Rd.R
|
library(europepmc)
### Name: epmc_profile
### Title: Obtain a summary of hit counts
### Aliases: epmc_profile
### ** Examples
## Not run:
##D epmc_profile('malaria')
##D # use field search, e.g. query materials and reference section for
##D # mentions of "ropensci"
##D epmc_profile('(METHODS:"ropensci")')
##D
## End(Not run)
|
4d404a17b10135ddcda9c8320beb2852b7db545e
|
0ea92a0e1eace26312972c5d2be22ae49b59c99c
|
/R/AnnualReporting/dup_wlg_2019_2020.R
|
9de38411532a6ad066c1019c155e03e081183871
|
[] |
no_license
|
HARPgroup/vahydro
|
852425ccf9271ebe2ff95fb6b48d8b7eb23e473f
|
73ea02207eee96f574f24db95fad6a2f6a496e69
|
refs/heads/master
| 2023-09-04T00:11:29.732366
| 2023-09-01T18:13:46
| 2023-09-01T18:13:46
| 147,414,383
| 0
| 0
| null | 2023-09-08T13:52:26
| 2018-09-04T20:37:13
|
R
|
UTF-8
|
R
| false
| false
| 515
|
r
|
dup_wlg_2019_2020.R
|
library(sqldf)
dup_wlg_2019_2020 <- read.csv("C:/Users/maf95834/Downloads/dup_wlg_2019-2020.csv", header = TRUE)
dup_wlg <- sqldf("SELECT distinct a_featureid, a_tsvalue, b_tsvalue, to_timestamp
FROM dup_wlg_2019_2020
ORDER BY a_featureid")
dup_wlg2 <- sqldf("SELECT distinct a_featureid, to_timestamp
FROM dup_wlg_2019_2020
ORDER BY a_featureid")
sqldf("SELECT distinct a_featureid
FROM dup_wlg_2019_2020
")
|
df9ef3aa5b02fcee8310f537c6ecbdeaecda6ff7
|
eac3daa61e88a1c6d8a4681c021b03c59a5cc2ad
|
/man/coef.regimix.Rd
|
cdf0b9f01f3cd9b0b4d1cd75bc1e746aba1c80eb
|
[] |
no_license
|
GrantDornan/RCPmod
|
10082ac51346ed216a669258dedc4fdbf4a49c1b
|
6c03f910c7f82c8afdd91cd68f83286e6874b699
|
refs/heads/master
| 2021-01-17T18:20:47.627051
| 2013-11-01T00:00:00
| 2013-11-01T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 645
|
rd
|
coef.regimix.Rd
|
\name{coef.regimix}
\alias{coef.regimix}
\title{A regimix objects coefficients.}
\description{Returns coefficicients from a regimix object.
}
\section{Method}{ coef( object, \dots{})
}
\arguments{
\item{ object}{an object obtained from fitting a regions of common profile mixture model. Such as that generated from a call to regimix(qv).}
\item{ ...}{ignored}
}
\value{Returns a list of three elements, one each for the estimates for the species prevalence (alpha), the deviations from alpha for the first (nRCP-1) regional profiles (tau), and the (nRCP-1) sets of region regression coefficents (beta).
}
\author{Scott D. Foster}
\keyword{misc}
|
a8643f435a617961aa1a04a174ee833b9770ac69
|
4f43b8f323fcf9f902933a2500fea7689a14fc4e
|
/ui.R
|
c847c0631272d3d6b95c6d37546fb99e143abc67
|
[] |
no_license
|
Rdfricker/NormalProbDemos
|
59b267908e2afce9bb90c695d8d544137043d52b
|
30e177d820bb3976390b7fe032f6c275de42aa1a
|
refs/heads/master
| 2016-09-06T15:05:42.292012
| 2013-08-20T23:54:00
| 2013-08-20T23:54:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 673
|
r
|
ui.R
|
library(shiny)
tails <- list("F(y) or the 'left tail'"="left",
"1-F(y) or the 'right tail'"="right")
shinyUI(pageWithSidebar(
# Application title
headerPanel("Probabilities from Normal Distributions"),
# Sidebar with a slider input for number of observations
sidebarPanel(
radioButtons("typeTail",label="",choices=tails),
br(),
numericInput("y", "y:", min = -6, max = 6, value = 1, step=0.01),
br(),
numericInput("mean", "Mu:", min = -10, max = 10, value = 0, step=0.1),
numericInput("sd", "Sigma:", min = 0.01, max = 10, value = 1, step=0.1)
),
mainPanel(plotOutput("normal_pdf_plot")
)
))
|
9bffc254fafd3e16aa305e518bacfe88d1dd01f4
|
277dbb992966a549176e2b7f526715574b421440
|
/R_training/실습제출/권용진/10.29/(10.29)scraping.R
|
7fd06eb77116a682fb6ccdb880abbe46716bed35
|
[] |
no_license
|
BaeYS-marketing/R
|
58bc7f448d7486510218035a3e09d1dd562bca4b
|
03b500cb428eded36d7c65bd8b2ee3437a7f5ef1
|
refs/heads/master
| 2020-12-11T04:30:28.034460
| 2020-01-17T08:47:38
| 2020-01-17T08:47:38
| 227,819,378
| 0
| 0
| null | 2019-12-13T12:06:33
| 2019-12-13T10:56:18
|
C++
|
UTF-8
|
R
| false
| false
| 967
|
r
|
(10.29)scraping.R
|
url = "http://unico2013.dothome.co.kr/crawling/exercise_bs.html"
text = read_html(url)
text
html_text(html_nodes(text, "h1"))
nodes = html_nodes(text, "a")
nodes
html_attr(html_nodes(text, "a"), "href")
html_attr(html_nodes(text, "img"), "src")
html_text(html_nodes(text, "h2:nth-of-type(1)"))
html_text(html_nodes(text, "ul>li[style$=green]"))
#꺾새는 자식 선택자에서 찾겠다는 의미
html_text(html_nodes(text, "h2:nth-of-type(2)"))
html_text(html_nodes(text, "h2:nth-of-type(2)"))
html_text(html_nodes(text, "ol > *"))
# 꺾새 : 자식만 찾고 싶을 때
html_text(html_nodes(text, "table *"))
# +blank :자손을 다 찾고 싶을 때
html_text(html_nodes(text, "tr[class=name]"))
html_text(html_nodes(text, "tr.name"))
html_text(html_nodes(text, "td#target"))
#id는 .이 아니라 #
html_text(html_nodes(text, "#target"))
#td라는 아이디 속성은 다른 것은 가질 수 없다.
html_text(html_nodes(text, "td[id=target]"))
|
0ff8659358ba3a7c76a5cb5cd3cd6e503e3226d8
|
273e68a7608d20da07288085583983979146fde7
|
/R/loadMetaQ.R
|
b8cd7e2ec8598b06f4fb8bed09607dcbd575bb4c
|
[] |
no_license
|
HCBravoLab/metagenomeSeq
|
cd3a36e82a508dc3ac44f37d20b713f9ea4c7abc
|
df8a28214fa9cb25870dee0e5cc909c160ce8da2
|
refs/heads/master
| 2023-04-08T19:09:04.362067
| 2020-06-10T13:41:35
| 2020-06-10T13:41:35
| 8,764,233
| 48
| 23
| null | 2023-03-27T18:39:38
| 2013-03-13T23:55:45
|
R
|
UTF-8
|
R
| false
| false
| 990
|
r
|
loadMetaQ.R
|
#' Load a count dataset associated with a study set up in a Qiime format.
#'
#' Load a matrix of OTUs in Qiime's format
#'
#'
#' @aliases loadMetaQ qiimeLoader
#' @param file Path and filename of the actual data file.
#' @return An list with 'counts' containing the count data, 'taxa' containing the otu annotation, and 'otus'.
#' @seealso \code{\link{loadMeta}} \code{\link{loadPhenoData}}
#' @examples
#'
#' # see vignette
#'
loadMetaQ <- function(file) {
dat2 <- read.delim(file,header=FALSE,stringsAsFactors=FALSE,nrows=1,skip=1);
len = ncol(dat2)
subjects = as.character(dat2[1,-c(1,len)]);
classes <-c("character",rep("numeric",(len-2)),"character");
dat3 <- read.delim(file,header=TRUE,colClasses=classes,skip=1);
taxa<- dat3[,len];
taxa<-as.matrix(taxa);
matrix <- dat3[,-c(1,len)]
colnames(matrix) = subjects;
otus = dat3[,1];
rownames(matrix) = otus;
obj <- list(counts=as.data.frame(matrix), taxa=as.data.frame(taxa),otus = as.data.frame(otus))
return(obj);
}
|
507a908743cdcf0e8a1f1d3441348da85761109e
|
2c38fc71287efd16e70eb69cf44127a5f5604a81
|
/man/tar_runtime_object.Rd
|
a9ace11b1b6fad87d5c8c4c4f2a4bdff3dcb0eb6
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
ropensci/targets
|
4ceef4b2a3cf7305972c171227852338dd4f7a09
|
a906886874bc891cfb71700397eb9c29a2e1859c
|
refs/heads/main
| 2023-09-04T02:27:37.366455
| 2023-09-01T15:18:21
| 2023-09-01T15:18:21
| 200,093,430
| 612
| 57
|
NOASSERTION
| 2023-08-28T16:24:07
| 2019-08-01T17:33:25
|
R
|
UTF-8
|
R
| false
| true
| 520
|
rd
|
tar_runtime_object.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_runtime.R
\name{tar_runtime_object}
\alias{tar_runtime_object}
\title{Get the \code{tar_runtime} object.}
\usage{
tar_runtime_object()
}
\value{
The internal \code{tar_runtime} object of class \code{"tar_runtime"}.
}
\description{
For internal purposes only. Not a user-side function.
Do not invoke directly.
}
\details{
Manages internal settings
that targets need while they run.
}
\examples{
tar_runtime_object()
}
\keyword{internal}
|
b20cbd3c939c844bb6b351dd58aa418beb2d9d74
|
06e566344a44b240870cd5455798634baf8a4fcd
|
/go_hyperG_entrez.R
|
e9f83faa73844fe915579e5df75ed320afa58b2d
|
[] |
no_license
|
raagbtitl/bio_genomics
|
460895e71b03c1524cb1ac4ba12600806537fd55
|
9d6ff08a865829ab429b687e5cc596bf76fd416a
|
refs/heads/master
| 2020-05-29T08:49:45.582403
| 2018-10-13T04:04:30
| 2018-10-13T04:04:30
| 68,956,572
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,730
|
r
|
go_hyperG_entrez.R
|
library(GOstats)
library( "org.Hs.eg.db" )
convertIDs <- function( ids, fromKey, toKey, db, ifMultiple=c( "putNA", "useFirst" ) ) {
stopifnot( inherits( db, "AnnotationDb" ) )
ifMultiple <- match.arg( ifMultiple )
suppressWarnings( selRes <- AnnotationDbi::select(
db, keys=ids, keytype=fromKey, cols=c(fromKey,toKey) ) )
if( ifMultiple == "putNA" ) {
duplicatedIds <- selRes[ duplicated( selRes[,1] ), 1 ]
selRes <- selRes[ ! selRes[,1] %in% duplicatedIds, ] }
return( selRes[ match( ids, selRes[,1] ), 2 ] )
}
dt<- read.csv("/home/rahul/ITLIVER/MACE/all_comparisons_MACE_ITliver.csv", header=T,sep="\t")
a<-as.vector(dt$gene);dt$entrez <- convertIDs( a, "SYMBOL", "ENTREZID", org.Hs.eg.db )
selected <- unique(dt$entrez)
param <- new("GOHyperGParams", geneIds=selected,
#universe=universe,
annotation="org.Hs.eg.db", ontology="BP",pvalueCutoff=0.05,
conditional=FALSE, testDirection="over")
hyp <- hyperGTest(param)
sumTable <- summary(hyp)
# subset the output table to get the columns of interest
# (GO ID, GO description, p-value)
out <- subset(sumTable, select=c(1, 7, 2))
# retrieve input genes associated with each GO identifier
# use the org.Hs.eg data mapping to get GO terms for each ID
goMaps <- lapply(out$GOBPID, function(x) unlist(mget(x, org.Hs.egGO2ALLEGS)))
# subset the selected genes based on those in the mappings
goSelected <- lapply(goMaps, function(x) selected[selected %in% x])
# join together with a semicolon to make up the last column
out$inGenes <- unlist(lapply(goSelected, function(x) paste(x, collapse=";")))
# write the final data table as a tab separated file
write.table(out, file="go_results.csv", sep="\t", row.names=FALSE)
|
ecf3f602ad962399b9947a9049883b5999606f6c
|
450bd26d71984a23ff5412cf17026fa13cc33098
|
/R/mod_plot_settings.R
|
e4b465c0e6faf34d7565a5c1a4a4a65479b83dd8
|
[
"MIT"
] |
permissive
|
cparsania/funomeView
|
e18e8424bd59fac63b6140b7695b5792f8fca6e5
|
dca6c8baab6018c4ee1bd5a29485c85f64579c9a
|
refs/heads/master
| 2021-01-04T07:00:55.222815
| 2020-02-24T13:27:57
| 2020-02-24T13:27:57
| 240,440,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,227
|
r
|
mod_plot_settings.R
|
# Module UI
#' @title mod_plot_settings_ui and mod_plot_settings_server
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#'
#' @rdname mod_plot_settings
#'
#' @keywords internal
#' @export
#' @importFrom shiny NS tagList
mod_plot_settings_ui <- function(id){
ns <- NS(id)
tagList(
shinydashboardPlus::boxPad(
color = "gray" ,
shinydashboardPlus::boxPlus(width = '75%',
title = "Labels and Title",
status = "danger",closable = FALSE,
collapsible = T,
shinyWidgets::sliderTextInput(inputId = ns("plot1_slider1"),
label = "Slider 1 value 1:",
choices = seq(from = 10,to = 1,by = -1),
grid = TRUE
)
),
shinydashboardPlus::boxPlus(width = '75%',
title = "Theme and Legend",
status = "danger",
closable = FALSE,
collapsible = T,
collapsed = T,
shinyWidgets::sliderTextInput(inputId = ns("plot1_slider2"),
label = "Slider 2 value 2:",
choices = seq(from = 10,to = 1,by = -1),
grid = TRUE)
),
shinydashboardPlus::boxPlus(width = '75%',
title = "Advance option" ,
status = "danger",
closable = FALSE,
collapsible = T,
collapsed = T,
shinyWidgets::pickerInput(inputId = ns("plot1_picker1") ,
label = "picker1",choices = LETTERS[1:5])
),
shinydashboardPlus::boxPlus(width = '75%',
closable= FALSE,
collapsible = T,
title = "Export" ,
status = "danger",
collapsed = T,
shinyWidgets::pickerInput(inputId = ns("plot1_picker1") ,
label = "picker1",choices = LETTERS[1:5],
)
)
)
)
}
# Module Server
#' @rdname mod_plot_settings
#' @export
#' @keywords internal
mod_plot_settings_server <- function(input, output, session){
ns <- session$ns
}
## To be copied in the UI
# mod_plot_settings_ui("plot_settings_ui_1")
## To be copied in the server
# callModule(mod_plot_settings_server, "plot_settings_ui_1")
|
585e9a0fac0efa7a197a584e1b4ef457cb0886ca
|
ee531f00ad32845f124142f1c60ebc4b48846cf2
|
/cdfplots.R
|
bae5b4d4d8c3722909f779d1992738b2fd9e9dc3
|
[
"CC0-1.0"
] |
permissive
|
kandarpRJ/epi_psych_metaanalysis
|
f27ff8b7d3df1a1531e2b79b128fcb438ae0546a
|
2218b676b78888b0499b17f2b1273e873eb1446d
|
refs/heads/main
| 2023-02-25T03:46:24.824405
| 2021-02-05T08:10:36
| 2021-02-05T08:10:36
| 333,604,718
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,961
|
r
|
cdfplots.R
|
library(plyr)
library(dplyr)
library(ggplot2)
library(cowplot)
## Inputs
cblm<-"human_cblm_m6a_containing_genes_with_m6a_counts.txt"
crbm<-"human_crbm_m6a_containing_genes_with_m6a_counts.txt"
## Functions
makeplot<-function (m6agl, fn, grp) {
m6a_gl<-read.table(m6agl, header = FALSE, row.names = NULL)
degl<-read.table(fn, header = TRUE, row.names = NULL, sep = "\t")
degl<-degl[degl$P.Value<0.05,]
m6a_1<-degl[degl$Gene.symbol %in% m6a_gl[m6a_gl$V2==1,1],]
m6a_2_5<-degl[degl$Gene.symbol %in% m6a_gl[m6a_gl$V2>1 & m6a_gl$V2<=5,1],]
m6a_6<-degl[degl$Gene.symbol %in% m6a_gl[m6a_gl$V2>5,1],]
`%notin%` <- Negate(`%in%`)
m6a_0<-degl[degl$Gene.symbol %notin% m6a_gl$V1,]
newdf<-data.frame(m6a_counts = "m6a_0", lfc = m6a_0$logFC, grp = grp)
newdf<-rbind(newdf, data.frame(m6a_counts = "m6a_1", lfc = m6a_1$logFC, grp = grp))
newdf<-rbind(newdf, data.frame(m6a_counts = "m6a_2_5", lfc = m6a_2_5$logFC, grp = grp))
newdf<-rbind(newdf, data.frame(m6a_counts = "m6a_6", lfc = m6a_6$logFC, grp = grp))
newdf <- ddply(newdf, .(m6a_counts), transform, ecd = ecdf(lfc)(lfc))
}
pl<-function (p) {
ggplot(p, aes(x = lfc, y = ecd, group = m6a_counts, colour = m6a_counts)) +
geom_line() +
geom_vline(xintercept = 0, color="grey", alpha=0.5, linetype="dashed") +
geom_hline(yintercept = 0.5, color="grey", alpha=0.5, linetype="dashed") +
facet_wrap(~grp, ncol = 3) +
xlab("Log2FoldChange") +
ylab("Cumulative Distribution") +
xlim(-1, 1) +
ylim(0,1) +
theme_classic()
}
pl1<-function (p) {
ggplot(p, aes(x = lfc, y = ecd, group = m6a_counts, colour = m6a_counts)) +
geom_line() +
geom_vline(xintercept = 0, color="grey", alpha=0.5, linetype="dashed") +
geom_hline(yintercept = 0.5, color="grey", alpha=0.5, linetype="dashed") +
facet_wrap(~grp, ncol = 3) +
xlab("Log2FoldChange") +
ylab("Cumulative Distribution") +
xlim(-2, 2) +
ylim(0,1) +
theme_classic()
}
#######################################################################################################################################
## Make plots for all DEGs
## Function takes arguements such as file with gene name and corresponding m6a sites, DEG list and group/sample/condition name as input
p1<-makeplot(crbm, "GSE54570.txt", "DLPFC_BA9")
p2<-makeplot(crbm, "GSE54568.txt", "DLPFC_BA9_F")
p3<-makeplot(crbm, "GSE54567.txt", "DLPFC_BA9_M")
p4<-makeplot(crbm, "GSE87610_DLPFC_MDD_L3.txt", "DLPFC_L3")
p5<-makeplot(crbm, "GSE87610_DLPFC_MDD_L5.txt", "DLPFC_L5")
p6<-makeplot(crbm, "GSE54571.txt", "ACC_BA25_F")
p7<-makeplot(crbm, "GSE54572.txt", "ACC_BA25_M")
p8<-makeplot(crbm, "GSE92538_DLPFC_male", "DLPFC_M")
p9<-makeplot(crbm, "GSE92538_DLPFC_female", "DLPFC_F")
p10<-makeplot(crbm, "GSE17440_HIV_MDD.txt", "FC_HIV_M")
p11<-makeplot(crbm, "GSE54575.txt", "OVPFC_BA47")
p12<-makeplot(crbm, "GSE35977_CORT.txt", "PariCORT_Mix")
p13<-makeplot(crbm, "GSE53987_PFC_MDD.txt", "PFC_BA46")
p14<-makeplot(crbm, "GSE12654.txt", "PFC_BA10")
p15<-makeplot(crbm, "biorxiv_DLPFC_MDD.csv", "DLPFC_Jaffe_et.al.")
p16<-makeplot(crbm, "biorxiv_ACC_MDD.csv", "ACC_Jaffe_et.al.")
p17<-makeplot(crbm, "biorxiv_DLPFC_PTSD.csv", "DLPFC_Jaffe_et.al.")
p18<-makeplot(crbm, "biorxiv_ACC_PTSD.csv", "ACC_Jaffe_at.al.")
p19<-makeplot(cblm, "GSE35974_CBLM.txt", "CBLM_Mix")
## MDD CDF plot
pg1<-plot_grid(pl(p2) + theme(legend.position="none"),
pl(p3) + theme(legend.position="none"),
pl(p6) + theme(legend.position="none"),
pl(p7) + theme(legend.position="none"),
pl(p9) + theme(legend.position="none"),
pl(p8) + theme(legend.position="none"),
pl(p15) + theme(legend.position="none"),
pl(p16) + theme(legend.position="none"),
pl(p4) + theme(legend.position="none"),
pl(p5) + theme(legend.position="none"), ncol = 2, labels = "AUTO", label_size = 12 )
pg2<-plot_grid(pl(p11) + theme(legend.position="none"),
pl(p13) + theme(legend.position="none"),
pl(p14) + theme(legend.position="none"),
pl1(p10) + theme(legend.position="none"),
pl(p19) + theme(legend.position="none"), ncol = 2, labels = "AUTO", label_size = 12 )
legend <- get_legend(
# create some space to the left of the legend
pl(p2) + theme(legend.box.margin = margin(0, 0, 0, 12))
)
png("figure5.png", res = 300, height = 6000, width = 3000)
plot_grid(pg1, legend, rel_widths = c(3, .4))
dev.off()
png("figureS1.png", res = 300, height = 6000, width = 3000)
plot_grid(pg2, legend, rel_widths = c(3, .4))
dev.off()
## PTSD CDF plot
pgptsd<-plot_grid(pl(p17) + theme(legend.position="none"),
pl(p18) + theme(legend.position="none"), ncol = 2, labels = "AUTO", label_size = 12 )
legendptsd <- get_legend(
pl(p17) + theme(legend.box.margin = margin(0, 0, 0, 12))
)
png("figure7.png", res = 300, height = 3000, width = 6000)
plot_grid(pgptsd, legend, rel_widths = c(3, .4))
dev.off()
|
4960d517ba31feb49c56d318cee0a00bf02724fe
|
f7aee635c39f4ab9cb7922263150e793e5361ab2
|
/JSON/Google-Geocode-Example-JSON.R
|
177428466d091830d6fab863b8e1af17153307f8
|
[] |
no_license
|
EarlGlynn/google-geocode-json-xml
|
9f3dac1081b4010e61345bc412371716008f971b
|
e7dce0c2343886eccd1ccb3b374a1b2fad02cfa7
|
refs/heads/master
| 2021-01-23T13:53:07.688329
| 2015-01-06T16:06:37
| 2015-01-06T16:06:37
| 28,834,168
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
Google-Geocode-Example-JSON.R
|
# JSON Google Geocoding tests
# efg, UMKC Center for Health Insights, 2014-01-05
setwd( "C:/2015/R/Geocoding-Google-JSON-XML/JSON/")
sink("Google-Geocode-Example-JSON.txt", split=TRUE)
source("Google-Geocode-JSON.R")
# No ZIP
d <- get.geocode("IDNumber", "2411 Holmes St", "Kansas City", "MO", "")
d
# With ZIP
id <- "IDNumber"
street <- "2411 Holmes St"
city <- "Kansas City"
state <- "MO"
zip <- "64108"
d <- get.geocode(id, street, city, state, zip)
t(d)
sink()
|
6434595722e18c3ea0aac3debceda937293ecfb4
|
10b73f8b17fbe3034da332815b08d816ba5aa60e
|
/server.R
|
61b500211fd9001a0dacb00288db1037080f17b9
|
[] |
no_license
|
calefin/ShinyApp_Cell_count
|
d155bc0612bd6db181b2efd78986958740e37a2b
|
9da3be18f818477f8ff6fa91d3cc18750e1a7ac8
|
refs/heads/master
| 2021-01-22T02:53:55.443181
| 2015-08-20T13:57:52
| 2015-08-20T13:57:52
| 41,097,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 279
|
r
|
server.R
|
shinyServer(
function(input, output) {
output$valor <- renderText((input$num/input$num1)*input$num2*10000)
output$count <- renderText((input$num3*1000)/((input$num/input$num1)*input$num2*10000))
}
)
|
6ba9cb9c51c0424a31c2eef2adc67b1a3f80ff40
|
1d02eb4f73d255f39ef1ae97945e81259af4060b
|
/R/BALESTRIVOL2CAP7EXER13.R
|
5073a52ec87b1eb7f84cc08658b222a5d8bce882
|
[] |
no_license
|
alexandrenovo/stats4eb
|
79416a0b8e8750c559f15ca6f3251c76048e9c88
|
ba3e9036d2449a89ef75bc6fdbb41858ebe9e981
|
refs/heads/main
| 2023-05-09T16:52:07.452491
| 2021-06-15T22:41:00
| 2021-06-15T22:41:00
| 368,988,889
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
BALESTRIVOL2CAP7EXER13.R
|
#' BALESTRIVOL2CAP7EXER13
#'
#' IDADES DOS ALUNOS DO 1 ANO EM MATEMATICA EM 2016
#'
#' @format A data frame with 7 rows and 2 variables:
#' \describe{
#' \item{IDADE}{IDADES DOS ALUNOS EM ANOS}
#' \item{FREQUENCIA}{FREQUENCIA DOS ALUNOS COM ESSA IDADE}
#' }
"BALESTRIVOL2CAP7EXER13"
|
bf7e59083cbedf4dc49a2366ea1b8f6df31118f7
|
15c9ccaadce922531f241d57c29d60d96d47286a
|
/man/trunc.Rd
|
9a7329e525a96ce3841736f1998849a22ffb3271
|
[] |
no_license
|
walkabillylab/activityCounts
|
9b15db61216ca2ee35c5279ef1713dac9de3074b
|
12e9d782c51eef1f57cb4e21caccbe7bda1b96e1
|
refs/heads/master
| 2023-06-02T00:04:57.064561
| 2023-05-11T18:40:25
| 2023-05-11T18:40:25
| 187,262,813
| 5
| 2
| null | 2023-05-11T18:40:27
| 2019-05-17T18:21:36
|
R
|
UTF-8
|
R
| false
| true
| 451
|
rd
|
trunc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trunc.R
\name{trunc}
\alias{trunc}
\title{trunc}
\usage{
trunc(data, min_value)
}
\arguments{
\item{data}{The input variable which will be altered if less than the threshold}
\item{min_value}{the threshold which the input below it will be set to zero}
}
\value{
returns zero if the "data" is less than the "mean_value" otherwise returns the "data"
}
\description{
trunc
}
|
169a22eae20cf8920cfbe19916f9bc8bfb78d3dd
|
7441a5909020383eb5b328439c2025367c9375ae
|
/R/dualpathFusedL1X.R
|
a33897d79150c0db1c1fc9f24a1282531b7f2c12
|
[] |
no_license
|
cran/genlasso
|
1c306ff866222fd38561173aa936de8fe64e4d47
|
c2367f08977cfcc615f3e0e33ad885ab3d72a94e
|
refs/heads/master
| 2022-08-29T09:01:57.881638
| 2022-08-22T07:10:10
| 2022-08-22T07:10:10
| 17,696,330
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,174
|
r
|
dualpathFusedL1X.R
|
# We compute a solution path of the sparse fused lasso dual problem:
#
# \hat{u}(\lambda) =
# \argmin_u \|y - (X^+)^T D^T u\|_2^2 \rm{s.t.} \|\u\|_\infty \leq \lambda
#
# where D is (a multiple of) incidence matrix of a given graph, row-
# binded with (a multiple of) the identity matrix, and X is a full column
# rank predictor matrix, X^+ being its pseudoinverse.
#
# Fortuitously, we never have to fully invert X (i.e. compute its pseudo-
# inverse).
#
# Note: the df estimates at each lambda_k can be thought of as the df
# for all solutions corresponding to lambda in (lambda_k,lambda_{k-1}),
# the open interval to the *right* of the current lambda_k.
dualpathFusedL1X <- function(y, X, D, D0, gamma, approx=FALSE, maxsteps=2000,
minlam=0, rtol=1e-7, btol=1e-7, eps=1e-4,
verbose=FALSE, object=NULL) {
# If we are starting a new path
if (is.null(object)) {
m = nrow(D)
p = ncol(D)
n = length(y)
numedges = m-p
numnodes = p
# Modify y,X,n in the case of a ridge penalty, but
# keep the originals
y0 = y
X0 = X
if (eps>0) {
y = c(y,rep(0,p))
X = rbind(X,diag(sqrt(eps),p))
n = n+p
}
# Find the minimum 2-norm solution, using some linear algebra
# tricks and a little bit of graph theory
L = abs(crossprod(D0))
diag(L) = 0
gr = graph.adjacency(L,mode="upper") # Underlying graph
cl = clusters(gr)
q = cl$no # Number of clusters
i = cl$membership # Cluster membership
# First we project y onto the row space of D*X^+
xy = t(X)%*%y
g = xy
# Here we perform our usual fused lasso solve but
# with g in place of y
x = numeric(p)
# For efficiency, don't loop over singletons
tab = tabulate(i)
oo = which(tab[i]==1)
if (length(oo)>0) {
x[oo] = g[oo]/(diag(L)[oo])
}
# Now all groups with at least two elements
oi = order(i)
cs = cumsum(tab)
grps = which(tab>1)
for (j in grps) {
oo = oi[Seq(cs[j]-tab[j]+1,cs[j])]
Lj = crossprod(Matrix(D[,oo],sparse=TRUE))
x[oo] = as.numeric(solve(Lj,g[oo]))
}
uhat = as.numeric(D%*%x) # Dual solution
betahat = numeric(p) # Primal solution
ihit = which.max(abs(uhat)) # Hitting coordinate
hit = abs(uhat[ihit]) # Critical lambda
s = sign(uhat[ihit]) # Sign
if (verbose) {
cat(sprintf("1. lambda=%.3f, adding coordinate %i, |B|=%i...",
hit,ihit,1))
}
# Now iteratively find the new dual solution, and
# the next critical point
# Things to keep track of, and return at the end
buf = min(maxsteps,1000)
lams = numeric(buf) # Critical lambdas
h = logical(buf) # Hit or leave?
df = numeric(buf) # Degrees of freedom
lams[1] = hit
h[1] = TRUE
df[1] = 0
u = matrix(0,m,buf) # Dual solutions
beta = matrix(0,p,buf) # Primal solutions
u[,1] = uhat
beta[,1] = betahat
# Special interior set over nodes
I0 = rep(TRUE,numnodes)
# Update the graph if we need to, otherwise
# update the special interior set
if (ihit <= numedges) {
ed = which(D[ihit,]!=0)
gr[ed[1],ed[2]] = 0 # Delete edge
newcl = subcomponent(gr,ed[1]) # New cluster
oldcl = which(i==i[ed[1]]) # Old cluster
# If these two clusters aren't the same, update
# the memberships
if (length(newcl)!=length(oldcl) || any(sort(newcl)!=sort(oldcl))) {
i[newcl] = q+1
q = q+1
}
}
else {
I0[ihit-numedges] = FALSE
}
# Other things to keep track of, but not return
r = 1 # Size of boundary set
B = ihit # Boundary set
I = Seq(1,m)[-ihit] # Interior set
D1 = D[-ihit,,drop=FALSE] # Matrix D[I,]
D2 = D[ihit,,drop=FALSE] # Matrix D[B,]
k = 2 # What step are we at?
}
# If iterating already started path
else {
# Grab variables from outer object
lambda = NULL
for (j in 1:length(object)) {
if (names(object)[j] != "pathobjs") {
assign(names(object)[j], object[[j]])
}
}
# Trick: save y,X from outer object
y0 = y
X0 = X
# Grab variables from inner object
for (j in 1:length(object$pathobjs)) {
assign(names(object$pathobjs)[j], object$pathobjs[[j]])
}
lams = lambda
# In the case of a ridge penalty, modify X
if (eps>0) X = rbind(X,diag(sqrt(eps),p))
}
tryCatch({
while (k<=maxsteps && lams[k-1]>=minlam) {
##########
# Check if we've reached the end of the buffer
if (k > length(lams)) {
buf = length(lams)
lams = c(lams,numeric(buf))
h = c(h,logical(buf))
df = c(df,numeric(buf))
u = cbind(u,matrix(0,m,buf))
beta = cbind(beta,matrix(0,p,buf))
}
##########
Ds = as.numeric(t(D2)%*%s)
# Precomputation for the hitting times: first we project
# y and Ds onto the row space of D1*X^+
A = matrix(0,n,q)
z = matrix(0,q,2)
nz = rep(FALSE,q)
# For efficiency, don't loop over singletons
tab = tabulate(i)
oo = which(tab[i]==1)
oo2 = oo[!I0[oo]]
if (length(oo2)>0) {
j = i[oo2]
A[,j] = X[,oo2,drop=FALSE]
z[j,1] = xy[oo2]
z[j,2] = Ds[oo2]
nz[j] = TRUE
}
# Now consider all groups with at least two elements
grps = which(tab>1)
for (j in grps) {
oo = which(i==j)
if (all(!I0[oo])) {
A[,j] = rowMeans(X[,oo,drop=FALSE])
z[j,1] = mean(xy[oo])
z[j,2] = mean(Ds[oo])
nz[j] = TRUE
}
}
nzq = sum(nz)
e = matrix(0,q,2)
if (nzq>0) {
R = qr.R(qr(A[,nz]))
e[nz,] = backsolve(R,forwardsolve(R,z[nz,,drop=FALSE],upper.tri=TRUE,transpose=TRUE))
# Note: using a QR here is preferable than simply calling
# e[nz,] = solve(crossprod(A[,nz]),z[nz,,drop=FALSE]), for
# numerical stablity. Plus, it's not really any slower
}
ea = e[,1]
eb = e[,2]
ga = xy-t(X)%*%(A%*%ea)
gb = Ds-t(X)%*%(A%*%eb)
# If the interior is empty, then nothing will hit
if (r==m) {
fa = ea[i]
fb = eb[i]
hit = 0
}
# Otherwise, find the next hitting time
else {
# Here we perform our usual fused lasso solve but
# with ga in place of y and gb in place of Ds
xa = xb = numeric(p)
fa = fb = numeric(p)
# For efficiency, don't loop over singletons
oo = which(tab[i]==1)
fa[oo] = ea[i][oo]
fb[oo] = eb[i][oo]
oo1 = oo[I0[oo]]
if (length(oo1)>0) {
Ldiag = diag(crossprod(Matrix(D1[,oo1],sparse=TRUE)))
xa[oo1] = ga[oo1]/Ldiag
xb[oo1] = gb[oo1]/Ldiag
}
# Now all groups with at least two elements
oi = order(i)
cs = cumsum(tab)
grps = which(tab>1)
for (j in grps) {
oo = oi[Seq(cs[j]-tab[j]+1,cs[j])]
fa[oo] = ea[j]/length(oo)
fb[oo] = eb[j]/length(oo)
gaj = ga[oo]
gbj = gb[oo]
if (any(I0[oo])) {
Lj = crossprod(Matrix(D1[,oo],sparse=TRUE))
xa[oo] = as.numeric(solve(Lj,gaj))
xb[oo] = as.numeric(solve(Lj,gbj))
}
else {
Lj = crossprod(Matrix(D1[,oo[-1]],sparse=TRUE))
xa[oo][-1] = as.numeric(solve(Lj,(gaj-mean(gaj))[-1]))
xb[oo][-1] = as.numeric(solve(Lj,(gbj-mean(gbj))[-1]))
}
}
a = as.numeric(D1%*%xa)
b = as.numeric(D1%*%xb)
shits = Sign(a)
hits = a/(b+shits)
# Make sure none of the hitting times are larger
# than the current lambda (precision issue)
hits[hits>lams[k-1]+btol] = 0
hits[hits>lams[k-1]] = lams[k-1]
ihit = which.max(hits)
hit = hits[ihit]
shit = shits[ihit]
}
##########
# If nothing is on the boundary, then nothing will leave
# Also, skip this if we are in "approx" mode
if (r==0 || approx) {
leave = 0
}
# Otherwise, find the next leaving time
else {
c = as.numeric(s*(D2%*%fa))
d = as.numeric(s*(D2%*%fb))
leaves = c/d
# c must be negative
leaves[c>=0] = 0
# Make sure none of the leaving times are larger
# than the current lambda (precision issue)
leaves[leaves>lams[k-1]+btol] = 0
leaves[leaves>lams[k-1]] = lams[k-1]
ileave = which.max(leaves)
leave = leaves[ileave]
}
##########
# Stop if the next critical point is negative
if (hit<=0 && leave<=0) break
# If a hitting time comes next
if (hit > leave) {
# Record the critical lambda and properties
lams[k] = hit
h[k] = TRUE
df[k] = nzq
uhat = numeric(m)
uhat[B] = hit*s
uhat[I] = a-hit*b
betahat = fa-hit*fb
# Update our graph if we need to, otherwise
# update the special interior set
if (I[ihit] <= numedges) {
ed = which(D1[ihit,]!=0)
gr[ed[1],ed[2]] = 0 # Delete edge
newcl = subcomponent(gr,ed[1]) # New cluster
oldcl = which(i==i[ed[1]]) # Old cluster
# If these two clusters aren't the same, update
# the memberships
if (length(newcl)!=length(oldcl) || any(sort(newcl)!=sort(oldcl))) {
i[newcl] = q+1
q = q+1
}
}
else {
I0[I[ihit]-numedges] = FALSE
}
# Update all other variables
r = r+1
B = c(B,I[ihit])
I = I[-ihit]
s = c(s,shit)
D2 = rbind(D2,D1[ihit,])
D1 = D1[-ihit,,drop=FALSE]
if (verbose) {
cat(sprintf("\n%i. lambda=%.3f, adding coordinate %i, |B|=%i...",
k,hit,B[r],r))
}
}
# Otherwise a leaving time comes next
else {
# Record the critical lambda and properties
lams[k] = leave
h[k] = FALSE
df[k] = nzq
uhat = numeric(m)
uhat[B] = leave*s
uhat[I] = a-leave*b
betahat = fa-leave*fb
# Update our graph if we need to, otherwise
# update the special interior set
if (B[ileave] <= numedges) {
ed = which(D2[ileave,]!=0)
gr[ed[1],ed[2]] = 1 # Add edge
newcl = subcomponent(gr,ed[1]) # New cluster
oldcl = which(i==i[ed[1]]) # Old cluster
# If these two clusters aren't the same, update
# the memberships
if (length(newcl)!=length(oldcl) || !all(sort(newcl)==sort(oldcl))) {
newno = i[ed[2]]
oldno = i[ed[1]]
i[oldcl] = newno
i[i>oldno] = i[i>oldno]-1
q = q-1
}
}
else {
I0[B[ileave]-numedges] = TRUE
}
# Update all other variables
r = r-1
I = c(I,B[ileave])
B = B[-ileave]
s = s[-ileave]
D1 = rbind(D1,D2[ileave,])
D2 = D2[-ileave,,drop=FALSE]
if (verbose) {
cat(sprintf("\n%i. lambda=%.3f, deleting coordinate %i, |B|=%i...",
k,leave,I[m-r],r))
}
}
u[,k] = uhat
beta[,k] = betahat
# Step counter
k = k+1
}
}, error = function(err) {
err$message = paste(err$message,"\n(Path computation has been terminated;",
" partial path is being returned.)",sep="")
warning(err)})
# Trim
lams = lams[Seq(1,k-1)]
h = h[Seq(1,k-1)]
df = df[Seq(1,k-1)]
u = u[,Seq(1,k-1),drop=FALSE]
beta = beta[,Seq(1,k-1),drop=FALSE]
# If we reached the maximum number of steps
if (k>maxsteps) {
if (verbose) {
cat(sprintf("\nReached the max number of steps (%i),",maxsteps))
cat(" skipping the rest of the path.")
}
completepath = FALSE
}
# If we reached the minimum lambda
else if (lams[k-1]<minlam) {
if (verbose) {
cat(sprintf("\nReached the min lambda (%.3f),",minlam))
cat(" skipping the rest of the path.")
}
completepath = FALSE
}
# Otherwise, note that we completed the path
else completepath = TRUE
# The least squares solution (lambda=0)
bls = NULL
if (completepath) bls = fa
if (verbose) cat("\n")
# Save needed elements for continuing the path
pathobjs = list(type="fused.l1.x" ,r=r, B=B, I=I, Q1=NA, approx=approx,
Q2=NA, k=k, df=df, D1=D1, D2=D2, Ds=Ds, ihit=ihit, m=m, n=n, p=p, q=q,
h=h, q0=NA, rtol=rtol, btol=btol, eps=eps, s=s, y=y, gr=gr, i=i,
numedges=numedges, I0=I0, xy=xy)
colnames(u) = as.character(round(lams,3))
colnames(beta) = as.character(round(lams,3))
return(list(lambda=lams,beta=beta,fit=X0%*%beta,u=u,hit=h,df=df,y=y0,X=X0,
completepath=completepath,bls=bls,gamma=gamma,pathobjs=pathobjs))
}
|
d7c08965ba79e6be4e5746b3aa6b75ee43227cb5
|
ec16d99b02295eb346af885cfe3cd21c9a827f01
|
/plot4.R
|
144fcb63afbaa9bbf52ec676657833aa1018affb
|
[] |
no_license
|
lindalest/ExData_Plotting1
|
c96d570b9f33adf711d5ade5f53aaae4c7fc4cec
|
c36d48e03a04f555d31a1324ede73155a034f049
|
refs/heads/master
| 2021-01-17T20:26:09.885428
| 2015-04-11T21:03:07
| 2015-04-11T21:03:07
| 33,792,332
| 0
| 0
| null | 2015-04-11T20:58:56
| 2015-04-11T20:58:55
| null |
UTF-8
|
R
| false
| false
| 1,286
|
r
|
plot4.R
|
library(dplyr)
library(lubridate)
data <- read.csv("household_power_consumption.txt", header = T,
sep =";", stringsAsFactors = F, na.strings= "?", dec=".")
glimpse(data)
spec_data<-filter(data, Date == "1/2/2007" | Date == "2/2/2007")
spec_data$ssd_date <- paste(spec_data$Date, spec_data$Time, sep=" ")
spec_data$ss_data <- strptime(spec_data$ssd_date, "%d/%m/%Y %H:%M:%S")
glimpse(spec_data)
png(filename="plot4.png", width=480, height=480)
par(mfrow=c(2,2))
plot(spec_data$ss_data, spec_data$Global_active_power,
xlab=" ", ylab="Global Active Power",
type="l")
plot(spec_data$ss_data, spec_data$Voltage, type = "l",
ylab = "Voltage", xlab= "datetime")
plot(spec_data$ss_data, spec_data$Sub_metering_1, type ="l",
ylab= "Energy sub metering", xlab=" ")
lines(spec_data$ss_data, spec_data$Sub_metering_2, col = "red")
lines(spec_data$ss_data, spec_data$Sub_metering_3, col = "blue")
legend ("topright", lty=1, col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty ="n")
plot(spec_data$ss_data, spec_data$Global_reactive_power,
type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
4e5ffcd5ab73e2193a12a233f733bfee7ca8bc0a
|
fa44e7a7a231c54078a56ead85da8cd26eef2e64
|
/Codici/CONSEGNA FINALE/Provincia di Venezia (caso puntuale)/Genera Triangolazione (per avere triangolazione più fitta)/Aumenta Triangolazioni.R
|
dddce3f4f945cf0c0e674522900d8c73a5805d56
|
[] |
no_license
|
GabrieleMazza/TesiDiLaurea
|
1018d2d7aeaba3894f4042488a04f8923bb0a759
|
952e34355c2e718f180c4b79b55f1e48af33c887
|
refs/heads/master
| 2021-01-23T07:21:28.876038
| 2015-04-24T13:38:10
| 2015-04-24T13:38:10
| 26,230,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,670
|
r
|
Aumenta Triangolazioni.R
|
# This script is used to create the riangulation
# INPUT FILES
# Boundary.txt -> Boundary of Venice Province
# CoordinateCovariate.txt -> Points of municipalities
# OUTPUT FILES
# TriangulationPoints.txt -> Spatial points of triangulation
# Triangulation.txt -> Index of TriangulationPoints for each triangle
library(RTriangle)
Boundary<-read.table(file="Boundary.txt",header=T)
CoordinateCovariate<-read.table(file="CoordinateCovariate.txt",header=T)
#Need total vectors of x and y, for triangulation
x<-c(CoordinateCovariate$Longitudine,Boundary$xBound)
y<-c(CoordinateCovariate$Latitudine,Boundary$yBound)
#Boundaries object for RTriangle
Boundaries<-NULL
for(i in (length(CoordinateCovariate$Longitudine)+1):(length(x)-1))
{
Boundaries<-rbind(Boundaries, c(i,i+1))
}
Boundaries<-rbind(Boundaries, c(length(x),length(CoordinateCovariate$Longitudine)+1))
# pslg object
pslg_obj<-pslg(cbind(x,y),S=Boundaries)
#Maximum value for area
amax=0.003
mesh<-triangulate(pslg_obj,Y=FALSE,D=TRUE,a=amax)
Triang<-mesh$T
dimnames(Triang)[[2]]<-c("Vertex1","Vertex2","Vertex3")
xnew<-mesh$P[,1]
ynew<-mesh$P[,2]
Points<-data.frame(xTriang=xnew,yTriang=ynew)
# Plot
png(filename="Triangulation.png")
plot(xnew,ynew,type="n",xlab=" ",ylab=" ",main=paste("Triangulation (",length(xnew), " points, ",dim(Triang)[1]," triangles)",sep=""))
for (ne in 1:dim(Triang)[1])
{
polygon(c(xnew[Triang[ne,1]],xnew[Triang[ne,2]],xnew[Triang[ne,3]]),c(ynew[Triang[ne,1]],ynew[Triang[ne,2]],ynew[Triang[ne,3]]))
}
dev.off()
#Save new files
write.table(file="Triangulation.txt",Triang,row.names=F)
write.table(file="TriangulationPoints.txt",Points,row.names=F)
|
a03981b2137a984b4f39d1af0d2ff6db3f924adb
|
8085d4fd6acc4a912c669e9b3cb67545ac90fc3d
|
/Experiments/PURSUIT.r
|
3e7cacdca861b0d18e850cb10709a252f13e8581
|
[] |
no_license
|
jonscottstevens/Pursuit
|
9d523ede33045e1e496c0b187d729043da7bfa0f
|
dca3a8bf46935684ef881fbf31464c484ffc8e13
|
refs/heads/master
| 2021-01-09T20:45:06.228952
| 2016-06-02T15:22:25
| 2016-06-02T15:22:25
| 60,275,087
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,779
|
r
|
PURSUIT.r
|
#A REINFORCEMENT-BASED WORD LEARNING ALGORITHM
library(plyr)
################
#preliminaries
#MAKE SURE YOU HAVE GENERATED DATA BEFORE YOU LOAD THIS SCRIPT
#initialize data structures
words<-c()
objects<-c()
observed_words<-c()
#construct an empty matrix for storing probability scores
for(n in 1:length(uttered)){for(w in uttered[[n]]){if(is.element(w,words)==F){words<-c(words,w)}}}
for(n in 1:length(visible)){for(o in visible[[n]]){if(is.element(o,objects)==F){objects<-c(objects,o)}}}
values<-matrix(nrow=length(words),ncol=length(objects))
rownames(values)<-words
colnames(values)<-objects
values[,]<-0
################
#parameter values
gamma<-0.02
tau<-0.79
lambda<-0.001
################
#reward functions
#return the reward amount (0 or 1) for a mapping for a given instance
rewarded<-FALSE
reward<-function(w,o,x){
if(is.element(o,visible[[x]])){ #<-hypothesis confirmed?
rewarded<<-TRUE
return(1)
}
else{ #<-hypothesis refuted?
rewarded<<-FALSE
return(0)
}
}
#incrementally adjust the association score
adjust<-function(w,o,x){
if(sum(values[w,])>0){values[w,o]<<-values[w,o]+(gamma*(reward(w,o,x)-values[w,o]))}
}
################
#the word learning algorithm
#choose meaning(s) for INITIALIZATION based on mutual exclusivity
first_guess<-function(x){
scene<-visible[[x]]
if(length(scene)==1){return(scene)}
else{
maxes<-apply(values[,scene],2,max)
return(names(maxes[maxes==min(maxes)])) #<-"arg min max"
}
}
#INITIALIZATION of an association vector for a brand new word
introduce<-function(x){
for(w in uttered[[x]]){
if(is.element(w,observed_words)==F){
for(o in first_guess(x)){values[w,o]<<-gamma} #<--initialize to gamma
observed_words<<-c(observed_words,w) #<--log which words have been heard
}
}
}
#choose a single hypothesis to test against the current scene
choose_meaning<-function(w){
return(sample(names(values[w,][values[w,]==max(values[w,])]),1)) #<--pick a single mapping w/ the highest association score
}
#INITIALIZATION plus PURSUIT: introduce any new words, select a hypothesis for each word, adjust associations
analyze<-function(x){
introduce(x)
for(w in uttered[[x]]){
choice<-choose_meaning(w)
if(choice!="NULL"){adjust(w,choice,x)}
if(rewarded==FALSE){adjust(w,sample(visible[[x]],1),x)} #<--if failure has occurred, reward a visible meaning at random
}
}
#LEXICON step is unnecessary for experimental sims (subjs must make a guess even if there is no lexical entry)
################
#the experimental task
remember<-1
guess<-function(w,x){ #<-choose best hypothesis; fall back on random guess
hypothesis<-choose_meaning(w)
if(is.element(hypothesis,visible[[x]])==FALSE){hypothesis<-sample(visible[[x]],1)}
if(sample(c(FALSE,TRUE),1,prob=c(remember,1-remember))){hypothesis<-sample(visible[[x]],1)} #the subject can forget their hypothesis!
return(hypothesis)
}
evaluate<-function(w,x){ #<-did the learner guess the correct referent?
best_guess<-guess(w,x)
guess_list<<-c(guess_list,best_guess)
if(best_guess==gold(w)){return(1)}
else{return(0)}
}
#initialize results lists
guess_list<-c() #<-sequence of all guesses
results_master<-c() #<-sequence of all guess evaluations
#simulate an experimental run
simulate<-function(id){
values[,]<<-0
observed_words<<-c()
for(n in 1:length(uttered)){
analyze(n)
if(gold(uttered[[n]])!="NULL"){ #<-don't record fillers
eval<-evaluate(uttered[[n]],n)
results_master<<-c(results_master,eval)
}
print(c(id,n))
}
}
#aggregate guesses and evals from multiple experiments; guesses and evals are dumped into "guess_list" and "results_master", respectively
aggregate<-function(num){
for(n in 1:num){
values[,]<<-0
observed_words<<-c()
simulate(n)
}
}
#aggregate simulation data and write the results to a CSV
runsims<-function(numsims,memory,filename){
remember<<-memory
results_master<<-c()
guess_list<<-c()
#import generic data frame with subject/item data, repeat data "numsims" times
output<-do.call("rbind", replicate(numsims, blank_data, simplify = FALSE))
Simulation<-c()
for(n in 1:numsims){Simulation<-c(Simulation,rep(n,nrow(blank_data)))}
output$Simulation<-Simulation
#run simulations and replace "Correct" column with simulation output
aggregate(numsims)
output$Correct<-results_master
#add the recorded guesses for each instance
output$Guess<-guess_list
#add a "model" column
output$Model<-rep("PURSUIT",length(guess_list))
#write data frame as a CSV to "filename" in the working directory
write.csv(output,paste(filename,".csv",sep=""))
}
|
36b6c46ff0538df8cca0ac80c6cf4c7d2628f2e6
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615842172-test.R
|
5cd16c3dd6750d0c3aedeb669b3ff4f3294ddd1f
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
1615842172-test.R
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 4.56997008477285e+255, -1.93925524631694e-68, -1.9689320904705e+208, 1.10818199152215e-09, 4.00294354529823e-221, -1.15261897385911e+41, -8.10849672500667e+229, -4.0826884167278e-277, -3.91881664584645e-291, 1.05691417767735e+254, -1.44288984971022e+71, -7.00861543724366e-295, -4.55414938106482e-200, -6.87353716589742e-83, 179.214603488924, -6.67707850404722e+133, 5.32948612168953e-320, 0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
78d97809bc9040f1d5fc7be63f78a357c4ab1e5d
|
edee4a9c4cf3c35a52dfc99ac53279ab23e069ab
|
/examples/Join/spatial_joins.R
|
227af7f2f1380a8af1d4a2b379a79450b9df4835
|
[
"Apache-2.0"
] |
permissive
|
benardonyango/rgee
|
a8dd22a72f2c77a0d1e88f6177c740942fe2cfbc
|
e9e0f2fa7065e79c1c794bd7387fd0af633031ff
|
refs/heads/master
| 2022-04-09T18:10:23.689798
| 2020-03-31T10:56:00
| 2020-03-31T10:56:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 929
|
r
|
spatial_joins.R
|
library(rgee)
# ee_reattach() # reattach ee as a reserved word
ee_Initialize()
# Load a primary 'collection': protected areas (Yosemite National Park).
primary <- ee$FeatureCollection("WCMC/WDPA/current/polygons")$
filter(ee$Filter$eq("NAME", "Yosemite National Park"))
# Load a secondary 'collection': power plants.
powerPlants <- ee$FeatureCollection("WRI/GPPD/power_plants")
# Define a spatial filter, with distance 100 km.
distFilter <- ee$Filter$withinDistance(
distance = 100000,
leftField = ".geo",
rightField = ".geo",
maxError = 10
)
# Define a saveAll join.
distSaveAll <- ee$Join$saveAll(
matchesKey = "points",
measureKey = "distance"
)
# Apply the join.
spatialJoined <- distSaveAll$apply(primary, powerPlants, distFilter)
# Print the result.
# print(spatialJoined.getInfo())
Map$centerObject(spatialJoined, zoom = 9)
Map$addLayer(ee$Image()$paint(spatialJoined, 1, 3), name = "Spatial Joined")
|
8ddca526344a06ab0d6236dcec8021dfd5c2342c
|
485f0a84c87e2cbd0f8ca91b3fe945dd8dc3de30
|
/src/data-prep/download-file.R
|
41e56c8456a2bcdd0035231a55f22c6331a1d439
|
[] |
no_license
|
bramvdbemt/DPREP_teamproject_group14
|
65411cb72a3440c24d3079ff0f7191ce765bc833
|
cc6c8aa318d2407f80f3cdd1eb8ab6fd4dcc5d47
|
refs/heads/main
| 2023-08-30T18:16:06.056186
| 2021-10-08T12:41:30
| 2021-10-08T12:41:30
| 404,618,017
| 0
| 0
| null | 2021-09-09T06:55:39
| 2021-09-09T06:55:38
| null |
UTF-8
|
R
| false
| false
| 1,320
|
r
|
download-file.R
|
######################
### DOWNLOAD DATA ####
######################
# all listings
url_listings12.20 <- "http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2020-12-12/data/listings.csv.gz"
url_listings01.21 <- "http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-01-09/data/listings.csv.gz"
url_listings02.21 <- "http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-02-08/data/listings.csv.gz"
url_listings03.21 <- "http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-03-04/data/listings.csv.gz"
url_listings04.21 <- "http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-04-09/data/listings.csv.gz"
url_listings05.21 <- "http://data.insideairbnb.com/the-netherlands/north-holland/amsterdam/2021-05-19/data/listings.csv.gz"
# downloading all files and saving them in the data folder
download.file(url_listings12.20, "../../data/listings-12.20.csv.gz")
download.file(url_listings01.21, "../../data/listings-01.21.csv.gz")
download.file(url_listings02.21, "../../data/listings-02.21.csv.gz")
download.file(url_listings03.21, "../../data/listings-03.21.csv.gz")
download.file(url_listings04.21, "../../data/listings-04.21.csv.gz")
download.file(url_listings05.21, "../../data/listings-05.21.csv.gz")
|
85e0b7518262e73ca1e9c0083ef12aafeab898a2
|
a7c217ddf1e942f50829a5c85517b9461d65534e
|
/observational_metrics.R
|
c063d571382b7f281f4dc2a898ed71cb3768206c
|
[] |
no_license
|
mandycoston/counterfactual
|
3d6a2a555de7c088a8d97999f98c5b4683f7c2eb
|
eaa4cad7d3f52ef633848c578954fc47f930f2d2
|
refs/heads/master
| 2021-12-14T00:40:34.869680
| 2021-12-01T16:35:36
| 2021-12-01T16:35:36
| 205,283,106
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,065
|
r
|
observational_metrics.R
|
# Compute fpr
compute_fpr_obs <- function(t, pred, label) {
pred_label <- ifelse(pred >= t, 1, 0)
neg <- 1-label
return(sum(pred_label*neg)/sum(neg))
}
# Compute fnr
compute_fnr_obs <- function(t, pred, label) {
pred_label <- ifelse(pred >= t, 1, 0)
return(sum((1-pred_label)*label)/sum(label))
}
# Compute fpr_cost
compute_fpr_cost_obs <- function(pred, label) {
return(mean(pred[label==0]))
}
# Compute fnr_cost
compute_fnr_cost_obs <- function(pred, label) {
return(1-mean(pred[label==1]))
}
# Compute Recall
compute_recall_obs <- function(t, pred, label) {
pred_label <- ifelse(pred >= t, 1, 0)
return(sum(pred_label*label)/sum(label))
}
compute_precision_thresh <- function(t, pred, label) {
pred_label <- ifelse(pred >= t, 1, 0)
return(sum(pred_label*label)/sum(pred_label))
}
compute_pr_df <- function(dat.eval, y, pred_caus, pred_batch, group) {
#t_vals <- seq(0.1, 1, 0.01)
t_vals <- seq(0, 1, 0.01)
r.caus <- sapply(t_vals, compute_recall_obs, pred = pull(dat.eval,!! pred_caus), label = pull(dat.eval, !! y))
r.batch <- sapply(t_vals, compute_recall_obs, pred = pull(dat.eval, !! pred_batch), label = pull(dat.eval, !! y))
p.caus <- sapply(t_vals, compute_precision_thresh, pred = pull(dat.eval, !! pred_caus), label = pull(dat.eval, !! y))
p.batch <- sapply(t_vals, compute_precision_thresh, pred = pull(dat.eval, !! pred_batch), label = pull(dat.eval,!! y))
obs.all.caus <- data.frame("Recall" = r.caus, "Precision" = p.caus, "Threshold"= t_vals, "Method" = "Counterfactual")
obs.all.batch <- data.frame("Recall" = r.batch, "Precision" = p.batch,"Threshold"= t_vals, "Method" = "Observational")
obs.all <- rbind(obs.all.caus, obs.all.batch)
obs.all$Group <- group
return(obs.all)
}
compute_roc_df <- function(dat.eval, y, pred_caus, pred_batch, group) {
#t_vals <- seq(0.1, 1, 0.01)
t_vals <- seq(0, 1, 0.01)
r.caus <- sapply(t_vals, compute_recall_obs, pred = pull(dat.eval,!! pred_caus), label = pull(dat.eval, !! y))
r.batch <- sapply(t_vals, compute_recall_obs, pred = pull(dat.eval, !! pred_batch), label = pull(dat.eval, !! y))
fpr.caus <- sapply(t_vals, compute_fpr_obs, pred = pull(dat.eval, !! pred_caus), label = pull(dat.eval, !! y))
fpr.batch <- sapply(t_vals, compute_fpr_obs, pred = pull(dat.eval, !! pred_batch), label = pull(dat.eval,!! y))
obs.all.caus <- data.frame("Recall" = r.caus, "FPR" = fpr.caus, "Threshold"= t_vals, "Method" = "Counterfactual")
obs.all.batch <- data.frame("Recall" = r.batch, "FPR" = fpr.batch,"Threshold"= t_vals, "Method" = "Observational")
obs.all <- rbind(obs.all.caus, obs.all.batch)
obs.all$Group <- group
return(obs.all)
}
# Calibration based on deciles (default) or ventiles if num_bins = 20
compute_calibration_obs <- function(num_bins = 10, mu_hat, Y) {
steps <- seq(1/num_bins, 1, 1/num_bins)
threshs <- sapply(steps, function(x) {unname(quantile(mu_hat, probs=x))})
calib <- data.frame(matrix(nrow=0, ncol=4))
colnames(calib) <- c("Average score", "Obs.rate", "Low", "High")
i <- 1
while(i <= num_bins) {
prev <- ifelse(i >1, threshs[i-1], 0)
sub <- (mu_hat > prev & mu_hat <= threshs[i])
calib <- rbind(calib, data.frame("Ventile risk score" = i, "Average score" = mean(mu_hat[sub]), "Rate"= mean(Y[sub]), "Low" = mean(Y[sub]) - 1.96*sqrt(var(Y[sub])/length(Y[sub])), "High" = mean(Y[sub]) + 1.96*sqrt(var(Y[sub])/length(Y[sub]))))
i= i+1
}
return(calib)
}
# num_bins is a scalar. dat.eval is a dataframe. pred_caus/batch is the counterfactual/observational predictions as a quosure. y is a quosure of the outcome
# group is a string
compute_calib_df <- function(num_bins, dat.eval, pred_caus, pred_batch, y, group) {
calib.batch <- compute_calibration_obs(num_bins, pull(dat.eval, !! pred_batch), pull(dat.eval, !! y))
calib.batch$Method <- "Observational"
calib.caus <- compute_calibration_obs(num_bins, pull(dat.eval, !! pred_caus), pull(dat.eval, !! y))
calib.caus$Method <- "Counterfactual"
calib <- rbind(calib.caus, calib.batch)
calib$Group <- group
return(calib)
}
|
f1b9b82d64c9252786cea973eed9630c8b0ad56c
|
2e2d277765b284c31083d967ea7b5a64d7005e99
|
/sampleScript.R
|
5559cbecf4eceb7c5770a9512c583b3e10d60d34
|
[] |
no_license
|
mchikina/SIRageStratified
|
06f8ce2ee3cd009d552d8a5aa661221d09b6319b
|
657c19b421ebf6dd0f07cfb64fffb2bed16fcd90
|
refs/heads/master
| 2022-12-10T14:17:32.097686
| 2020-09-06T12:41:30
| 2020-09-06T12:41:30
| 279,653,102
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,750
|
r
|
sampleScript.R
|
source("simSIRmultiPopSingleIwithICUcomp.R")
intMat=readRDS("interactMatsymmetric.RDS")
popTable=readRDS("popParams.RDS")
#crushing
out=simulateSIRmultiPopICUcompartment( R0default = 2.7,R0init=0.9, popParams=popTable, restriction.effect = 1, interactionMat = intMat, free.bracket.fraction = 1, free.bracket=17, forceNormalAt = round(365*1.25) , step.up.days=round(365*.5),nstep.max = 3*365, nstep.min = 3*365)
#plot mortality
plot(apply(out$data[, "M",],1,sum), type="l", ylim=c(0, 2.5e6))
#flatten
out=simulateSIRmultiPopICUcompartment( R0default = 2.7,R0init=1.6, popParams=popTable, restriction.effect = 1, interactionMat = intMat, free.bracket.fraction = 1, free.bracket=17, forceNormalAt = round(365*1.25) , step.up.days=round(365*.5),nstep.max = 3*365, nstep.min = 3*365)
lines(apply(out$data[, "M",],1,sum), col=2)
#no interaction matrix
out=simulateSIRmultiPopICUcompartment( R0default = 2.7,R0init=1.6, popParams=popTable, restriction.effect = 1, interactionMat = NULL, free.bracket.fraction = 1, free.bracket=17, forceNormalAt = round(365*1.25) , step.up.days=round(365*.5),nstep.max = 3*365, nstep.min = 3*365)
lines(apply(out$data[, "M",],1,sum), col=4) #will be off the scale
#heterogeneous
#relax through 50 (free.bracket=10), row 10 in popTable
#relax only 70% of the <50 population (free.bracket.faction)
#rest is at 10% interactions (restriction.effect=0.1)
#R0default and R0init are the same
out=simulateSIRmultiPopICUcompartment( R0default = 2.7,R0init=2.7, popParams=popTable, restriction.effect = 0.3, interactionMat = intMat, free.bracket.fraction = 0.7, free.bracket=10, forceNormalAt = round(365*1.25) , step.up.days=round(365*.5),nstep.max = 3*365, nstep.min = 3*365)
lines(apply(out$data[, "M",],1,sum), col=3)
|
bdf85fddd48f0fb99db8712f19f54f3465e046cb
|
257f253f9fd8fd6a385abaaaa9d9509409137631
|
/Figures/Figure - gs(w) (ESS).r
|
811a3041fcb6f8a2b7b859fbc3be545c4b2b5912
|
[] |
no_license
|
YaojieLu/Optimal-model-with-cost
|
ee16d16e159687e0b7d06e132101422cd7f3a788
|
0a66573bc221034f02c0554c3ff63e371d025a85
|
refs/heads/master
| 2020-05-22T01:17:15.500934
| 2017-01-24T22:45:10
| 2017-01-24T22:45:10
| 50,156,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,225
|
r
|
Figure - gs(w) (ESS).r
|
# psf(w)
psf <- function(w, pe=-1.58*10^-3, b=4.38)pe*w^(-b)
# wf(ps)
wf <- function(ps, pe=-1.58*10^-3, b=4.38)(ps/pe)^(-1/b)
# PLC(px)
PLCf <- function(px, c=2.64, d=3.54)1-exp(-(-px/d)^c)
# xylem conductance function
kxf <- function(px, kxmax=5, c=2.64, d=3.54)kxmax*exp(-(-px/d)^c)
# minimum xylem water potential function at given w
pxminf <- function(w, a=1.6, nZ=0.5, p=43200, l=1.8e-5, LAI=1, h=l*a*LAI/nZ*p, h2=l*LAI/nZ*p/1000){
ps <- psf(w)
f1 <- function(px)(ps-px)*h2*kxf(px)
res <- optimize(f1, c(-20, 0), tol=.Machine$double.eps, maximum=T)$maximum
return(res)
}
# Water balance in plants
gswpxf <- function(w, px, a=1.6, nZ=0.5, p=43200, l=1.8e-5, LAI=1, h=l*a*LAI/nZ*p, h2=l*LAI/nZ*p/1000, VPD=0.02){
ps <- psf(w)
res <- (ps-px)*h2*kxf(px)/(h*VPD)
return(res)
}
# Inverse pxmin(w)
inversepxminf <- function(px)optimize(function(w)(pxminf(w)-px)^2, c(0, 1), tol=.Machine$double.eps)$minimum
# Water balance in plants with pxmin(w)
gspxminf <- Vectorize(function(px)gswpxf(inversepxminf(px), px))
# gsmaxf(w)
gsmaxf <- function(w, a=1.6, nZ=0.5, p=43200, l=1.8e-5, LAI=1, h=l*a*LAI/nZ*p, VPD=0.02, h2=l*LAI/nZ*p/1000){
ps <- psf(w)
pxmin <- pxminf(w)
res <- (ps-pxmin)*h2*kxf(pxmin)/(h*VPD)
return(res)
}
# Inverse gsmaxf(w)
inversegsmaxf <- function(gs)optimize(function(w)(gsmaxf(w)-gs)^2, c(0, 1), tol=.Machine$double.eps)$minimum
# PLC with gsmaxf(w)
PLCgsmaxf <- Vectorize(function(gs)PLCwgsf(inversegsmaxf(gs), gs)*100)
# xylem water potential function
pxf <- function(w, gs, a=1.6, LAI=1, nZ=0.5, p=43200, l=1.8e-5, h=l*a*LAI/nZ*p, VPD=0.02, h2=l*LAI/nZ*p/1000){
ps <- psf(w)
pxmin <- pxminf(w)
f1 <- function(px)((ps-px)*h2*kxf(px)-h*VPD*gs)^2
res <- ifelse(pxmin<ps, optimize(f1, c(pxmin, ps), tol=.Machine$double.eps)$minimum, ps)
return(res)
}
# Af(gs, ca)
Af <- function(gs, ca, Vcmax=50, cp=30, Km=703, Rd=1, LAI=1){
LAI*1/2*(Vcmax+(Km+ca)*gs-Rd-((Vcmax)^2+2*Vcmax*(Km-ca+2*cp)*gs+((ca+Km)*gs+Rd)^2-2*Rd*Vcmax)^(1/2))
}
# PLCwgsf(w, gs)
PLCwgsf <- function(w, gs){
px <- pxf(w, gs)
res <- PLCf(px)
return(res)
}
# PLCwgsf(w, gs) px < pxmin
PLCwgsf2 <- function(w, gs,
a=1.6, LAI=1, nZ=0.5, p=43200, l=1.8e-5, h=l*a*LAI/nZ*p, VPD=0.02, h2=l*LAI/nZ*p/1000){
# xylem water potential function
pxf1 <- function(w, gs){
ps <- psf(w)
pxmin <- pxminf(w)
f1 <- function(x)((ps-x)*h2*kxf(x)-h*VPD*gs)^2
res <- optimize(f1, c(-1000, pxmin), tol=.Machine$double.eps)$minimum
return(res)
}
px <- pxf1(w, gs)
res <- PLCf(px)
return(res)
}
# mf(w, gs)
mf <- function(w, gs, h3=10)h3*PLCwgsf(w, gs)
# mf(w, gs) px < pxmin
mf2 <- function(w, gs, h3=10)h3*PLCwgsf2(w, gs)
# m with gsmaxf(w)
mgsmaxf <- Vectorize(function(gs)mf(inversegsmaxf(gs), gs))
# B(w, gs)
Bf <- function(w, gs, ca)Af(gs, ca)-mf(w, gs)
# ESS gs(w)
ESSf <- function(w, ca){
f1 <- function(gs)Bf(w, gs, ca)
res <- optimize(f1, c(0, gsmaxf(w)), tol=.Machine$double.eps, maximum=T)
return(res$maximum)
}
# ESS gs(ps)
ESSpsf <- function(ps, ca){
w <- wf(ps)
res <- ESSf(w, ca)
return(res)
}
# ESS A(w)
ESSAf <- function(w, ca)Af(ESSf(w, ca), ca)
# ESS m(w)
ESSmf <- function(w, ca)mf(w, ESSf(w, ca))
# ESS B(w)
ESSBf <- function(w, ca)ESSAf(w, ca)-ESSmf(w, ca)
# ESS B(ps)
ESSBpsf <- function(ps, ca){
w <- wf(ps)
res <- Bf(w, ESSf(w, ca), ca)
return(res)
}
# ESS PLC(w)
ESSPLCf <- function(w, ca){
px <- pxf(w, ESSf(w, ca))
res <- PLCf(px)
return(res)
}
# ESS PLC(ps)
ESSPLCpsf <- function(ps, ca)ESSPLCf(wf(ps), ca)
# integralfnoc of PDF
integralfnocf <- function(ca, k, MAP, wL,
LAI=1, a=1.6, nZ=0.5, p=43200, l=1.8e-5, h=l*a*LAI/nZ*p, VPD=0.02,
gamma=1/((MAP/365/k)/1000)*nZ){
ESSf1 <- Vectorize(function(w)ESSf(w, ca))
Ef <- function(w){h*VPD*ESSf1(w)}
rEf <- function(w){1/Ef(w)}
integralrEf <- Vectorize(function(w){integrate(rEf, wL, w, rel.tol=.Machine$double.eps^0.3)$value})#
fnoc <- function(w){1/Ef(w)*exp(-gamma*(w-wL)/(1-wL)+k*integralrEf(w)*1/(1-wL))*1/(1-wL)}
res <- integrate(fnoc, wL, 1, rel.tol=.Machine$double.eps^0.3)
return(res)
}
# PDF of w
PDFf <- function(w, ca, k, MAP, wL, cPDF,
LAI=1, a=1.6, nZ=0.5, p=43200, l=1.8e-5, h=l*a*LAI/nZ*p, VPD=0.02,
gamma=1/((MAP/365/k)/1000)*nZ){
ESSf1 <- Vectorize(function(w)ESSf(w, ca))
Ef <- function(w){h*VPD*ESSf1(w)}
rEf <- function(w){1/Ef(w)}
integralrEf <- Vectorize(function(w){integrate(rEf, wL, w, rel.tol=.Machine$double.eps^0.3)$value})#
res <- cPDF/Ef(w)*exp(-gamma*(w-wL)/(1-wL)+k*integralrEf(w)*1/(1-wL))*1/(1-wL)
return(res)
}
# Average A
averAf <- function(ca, k, MAP, wL, cPDF){
ESSAf1 <- Vectorize(function(w)ESSAf(w, ca))
f1 <- function(w)ESSAf1(w)*PDFf(w, ca, k, MAP, wL, cPDF)
res <- integrate(f1, wL, 1, rel.tol=.Machine$double.eps^0.3)
return(res)
}
# Average m
avermf <- function(ca, k, MAP, wL, cPDF){
ESSmf1 <- Vectorize(function(w)ESSmf(w, ca))
f1 <- function(w)ESSmf1(w)*PDFf(w, ca, k, MAP, wL, cPDF)
res <- integrate(f1, wL, 1, rel.tol=.Machine$double.eps^0.3)
return(res)
}
# Average E
averEf <- function(ca, k, MAP, wL, cPDF,
LAI=1, a=1.6, nZ=0.5, p=43200, l=1.8e-5, h=l*a*LAI/nZ*p, VPD=0.02){
ESSf1 <- Vectorize(function(w)ESSf(w, ca))
Ef <- function(w)h*VPD*ESSf1(w)
f1 <- function(w)Ef(w)*PDFf(w, ca, k, MAP, wL, cPDF)
res <- integrate(f1, wL, 1, rel.tol=.Machine$double.eps^0.3)
return(res)
}
# Average w
averwp1f <- function(ca, k, MAP, wL, cPDF){
f1 <- function(w)w*PDFf(w, ca, k, MAP, wL, cPDF)
res <- integrate(f1, wL, 1, rel.tol=.Machine$double.eps^0.3)
return(res)
}
# Average cica
avercicaf <- function(ca, k, MAP, wL, cPDF){
ESSAf1 <- Vectorize(function(w)ESSAf(w, ca))
ESSf1 <- Vectorize(function(w)ESSf(w, ca))
f1 <- function(w)1-ESSAf1(w)/ESSf1(w)/ca
f2 <- function(w)f1(w)*PDFf(w, ca, k, MAP, wL, cPDF)
res <- integrate(f2, wL, 1, rel.tol=.Machine$double.eps^0.3)
return(res)
}
# Initializing
ca <- 400
ESSf1 <- Vectorize(function(w)ESSf(w, ca))
ESSBf1 <- Vectorize(function(w)ESSBf(w, ca))
wL <- uniroot(ESSBf1, c(0.12, 1), tol=.Machine$double.eps)$root
curve(ESSf1, wL, 1, add=T, col="blue")
|
48fa3050b9b3b00700da0465f40b88e14fca41c4
|
321d64b8075c68a8472aa712114ea9f5131607d1
|
/plot6.R
|
6628afb23f647f150a70fd91be25630af62d545b
|
[] |
no_license
|
fengkehh/Exploratory_Data_Final
|
9ac6a4239450a70cfec0be4373512346e2a3e6e3
|
c28fb065569451be4397e307f8c40f0a59a1618f
|
refs/heads/master
| 2021-01-19T11:18:30.936074
| 2017-02-18T22:19:54
| 2017-02-18T22:19:54
| 82,237,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,806
|
r
|
plot6.R
|
# Plot 6
# Load data if necessary
data_load <- function() {
# Check that both data files exist
if (!prod(c('Source_Classification_Code.rds', 'summarySCC_PM25.rds') %in% dir())) {
download.file('https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip',
destfile = 'dataset.zip')
unzip('dataset.zip')
}
code <- readRDS('Source_Classification_Code.rds')
data <- readRDS('summarySCC_PM25.rds')
return(list(code, data))
}
if (!prod(c('code', 'data') %in% ls())) {
result <- data_load()
code <- result[[1]]
data <- result[[2]]
}
# Design: match the phrase 'Mobile' and 'On-Road' at least once with any other
# stuff in front, in between or after from the code list to determine proper
# source codes to extract. The definition of "motor vehicle" is from M-W:
# https://www.merriam-webster.com/dictionary/motor%2Bvehicle
expr <- '(Mobile)+.*(On-Road)+'
target <- code[grep(expr, code$EI.Sector),]
# Compute total emission from MV in Baltimore City & Los Angeles County
mv <- data[data$SCC %in% target$SCC &
(data$fips == '24510' | data$fips == '06037'),]
mv_annual <- aggregate(mv$Emissions, by = list(mv$year, mv$fips), FUN = sum)
names(mv_annual) <- names(mv_annual) <- c('year', 'fips', 'totalemission')
# Cmopute CHANGE in total emission per year (year N+1 - year N)
delta <- function(fips) {
# Extract portion of the data frame that matches fips
matched <- mv_annual[mv_annual$fips == fips,]
# Compute emission delta
result <- data.frame(deltaemission = -1*diff(matched$totalemission),
year = matched$year[2:nrow(matched)],
fips = matched$fips[2:nrow(matched)])
return(result)
}
delta_annual <- rbind(delta('24510'), delta('06037'))
# Plot and save to a png file
library(ggplot2)
delta_annual$fips <- factor(delta_annual$fips,
labels = c('Baltimore City', 'LA County'))
qplot(x = year, y = deltaemission, data = delta_annual,
color = fips,
geom = 'line',
xlab = 'Year',
ylab = 'Delta Total Annual Emission (tons)') +
labs(title = 'Change of Total Annual Emissions between Baltimore City and
Los Angeles County by Year End',
subtitle = 'Positive indicates DECREASE') +
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5))
ggsave('plot6.png')
# Compute total delta over the years
total <- aggregate(delta_annual$deltaemission, by = list(delta_annual$fips),
FUN = sum)
cat(sprintf('Total for Baltimore City: %f tons\nTotal for LA County: %f tons',
total$x[1], total$x[2]))
# Variable clean up
rm(list = setdiff(ls(), c('data', 'code')))
|
06eb181ae041e03b3357d22916481933e1ed1c10
|
5669abed76a620d6ceca9196df2f5aa5403f5d1e
|
/plot4.R
|
4eb40adccdc465295a8e35970847dd287af9d232
|
[] |
no_license
|
katsai81/ExData_Plotting1
|
f681947e38ca51c870565ddcaad17546799fd919
|
9ff8a02e763153a7bee0b8907730872e6af7f12f
|
refs/heads/master
| 2021-01-21T06:02:10.947960
| 2016-01-11T04:56:07
| 2016-01-11T04:56:07
| 49,402,832
| 0
| 0
| null | 2016-01-11T04:51:39
| 2016-01-11T04:51:39
| null |
UTF-8
|
R
| false
| false
| 1,642
|
r
|
plot4.R
|
hpc<-read.table("household_power_consumption.txt",sep = ";",header = TRUE,fill = TRUE)
hpc$Global_active_power <- as.numeric(as.vector(hpc$Global_active_power))
hpc$Sub_metering_1 <- as.numeric(as.vector(hpc$Sub_metering_1))
hpc$Sub_metering_2 <- as.numeric(as.vector(hpc$Sub_metering_2))
hpc$Sub_metering_3 <- as.numeric(as.vector(hpc$Sub_metering_3))
hpc$Global_reactive_power <- as.numeric(as.vector(hpc$Global_reactive_power))
hpc$Voltage <- as.numeric(as.vector(hpc$Voltage))
hpc$X.Date <- as.Date(hpc$X.Date,"%d/%m/%Y")
hpcplot2<- hpc[hpc$X.Date %in% c(as.Date("01/02/2007","%d/%m/%Y"),as.Date("02/02/2007","%d/%m/%Y")),]
hpcplot2 <- cbind(hpcplot2,DayOfWeek=format(hpcplot2$X.Date, "%a"))
dateTime <- paste(hpcplot2$X.Date, hpcplot2$Time)
#head(dateTime)
hpcplot2 <-cbind(hpcplot2,dateTime=strptime(dateTime, "%Y-%m-%d %H:%M:%S"))
#lapply(hpcplot2, class)
png(filename="plot4.png",width = 480, height = 480, units = "px")
par(mfrow=c(2,2))
plot(hpcplot2$dateTime,hpcplot2$Global_active_power,type = "l",xlab="",ylab="Global Active Power")
plot(hpcplot2$dateTime,hpcplot2$Voltage,type = "l",xlab="datetime",ylab="Voltage")
plot(hpcplot2$dateTime,hpcplot2$Sub_metering_1,type = "l",xlab="",ylab="Energy sub metering")
lines(hpcplot2$dateTime,hpcplot2$Sub_metering_2,type = "l",xlab="",col="RED")
lines(hpcplot2$dateTime,hpcplot2$Sub_metering_3,type = "l",xlab="",col="BLUE")
legend("topright",lwd=2,col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),bty = "n")
plot(hpcplot2$dateTime,hpcplot2$Global_reactive_power,type = "l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
|
2c547ef6d2c5b0869cd61ade82eafb934daba2db
|
68f98adc577eeb49af175e3b28ddff9d1c816181
|
/funfacts.R
|
3217ac38a1d661a6d812f5fa1ed846aadac89ec8
|
[] |
no_license
|
AF2-Informagics/Final-Project
|
8fb1bfb52d71d28f844aa110b5cb9c00338283e5
|
b2ac4afe06e69b46f8cb447b443d614c0e5d7088
|
refs/heads/master
| 2020-12-30T11:52:49.063870
| 2017-06-02T02:20:29
| 2017-06-02T02:20:29
| 91,435,921
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,182
|
r
|
funfacts.R
|
library(dplyr)
library(stringr)
library(plotly)
library(ggplot2)
source("scripts/dataframe.R")
improved.df <- df
improved.df$new.additional <- substring(improved.df$Additional, 2)
improved.df$new.additional <- ifelse(improved.df$new.additional != "", as.numeric(gsub(",", "", improved.df$new.additional)), 0)
#Select the most expensive additional class
most.expensive.additional <- improved.df %>% filter(new.additional == max(new.additional))
#Longest time of the class
longest.time.class <- improved.df %>% filter(is.na(time_diff) == FALSE) %>% filter(time_diff == max(time_diff))
#Most Capacity
most.capacity <- improved.df %>% filter(is.na(Capacity) == FALSE) %>% filter(Capacity == max(Capacity))
#very expensive small capacity class
expensive.smallest.capacity <- improved.df %>% filter(is.na(Capacity) == FALSE & Capacity != 0) %>% filter(Capacity < 5 & new.additional > 1000)
improved.df$credits <- as.numeric(improved.df$Sub.Credit)
new.improved.credit <- improved.df %>% filter(is.na(credits) == FALSE)
pie <- ggplot(new.improved.credit, aes(x = factor(1), fill = factor(new.improved.credit$credits))) + geom_bar(width = 1)
p <- pie + coord_polar(theta = "y")
p
|
7d88ed6c4467a94c675fd5e67856020a8a790247
|
6ae2d6b27576cc8c75a7e02256db410e5007a8b2
|
/man/dev_copy2a4.Rd
|
7ee3e8b448b962ddb8ee2961a964a34543ebc308
|
[] |
no_license
|
HughParsonage/hutils
|
3c2cec1b1a01179219deb47df0dc9e6a3a127435
|
11e6f828876bbc87a42d43a0ee8084ee6d9a6765
|
refs/heads/master
| 2023-02-20T04:27:01.996815
| 2023-02-10T05:18:04
| 2023-02-10T05:18:04
| 88,268,552
| 11
| 2
| null | 2022-04-13T08:31:57
| 2017-04-14T13:09:05
|
R
|
UTF-8
|
R
| false
| true
| 572
|
rd
|
dev_copy2a4.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dev_copy2a4.R
\name{dev_copy2a4}
\alias{dev_copy2a4}
\title{Copy device to an A4 PDF}
\usage{
dev_copy2a4(filename, ...)
}
\arguments{
\item{filename}{A string giving the name of the PDF file to write to, must end in \code{.pdf}.}
\item{...}{Other parameters passed to \code{\link[grDevices]{pdf}}.}
}
\value{
As in \code{\link[grDevices]{dev2}}.
}
\description{
Simply a wrapper around \code{dev.copy2pdf}, but without the need to remember that an A4 sheet of paper is 8.27 in by 11.69 in.
}
|
8dc9a7adf6cc29a9bcd389e4c0f9a0dc9f9fb21d
|
07c0538a8d869c175cd7dda2de462bf5b41295f9
|
/functions/basic/download_AWAP_vp3pm_data.R
|
6f2e0f83dba5f7282bcc53a68626c34784a66453
|
[] |
no_license
|
mingkaijiang/Australia_drought_storm_risk
|
065d45fb13914f84ed1c24de8c0dbce58f966d2e
|
7c2a4aa37048041cbc14ee43a6cd2ab670314d40
|
refs/heads/master
| 2023-08-16T00:17:45.592504
| 2021-10-26T04:27:33
| 2021-10-26T04:27:33
| 269,794,353
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 884
|
r
|
download_AWAP_vp3pm_data.R
|
download_AWAP_vp3pm_data <- function(destDir) {
### remote url
url1 <- "http://www.bom.gov.au/web03/ncc/www/awap/vprp/vprph15/daily/grid/0.05/history/nat/"
### file names
day.list <- seq.Date(as.Date("2020/04/01"),
as.Date("2020/11/30"),
by="day")
day.list <- gsub("-", "", day.list)
### create storage directories
yr.list <- c(1971:2020)
for (i in yr.list) {
if(!dir.exists(paste0(destDir, i, "/"))) {
dir.create(paste0(destDir, i, "/"), showWarnings = FALSE)
}
}
### download command
for (i in day.list) {
subDir <- substr(i, 1, 4)
download.file(url=paste0(url1, i, i, ".grid.Z"),
destfile=paste0(destDir, subDir, "/", i, i, ".grid.Z"))
}
}
|
b489e49e2b734acaaf075d47578631ddae6b1bf6
|
8826ea55530c96aa11f5d66dd6ea4bb15c0feb5a
|
/man/set_model_library_path.Rd
|
92c5b2dfeb839c578c983d934329d78915acdd62
|
[
"MIT"
] |
permissive
|
odaniel1/stanworkflowR
|
37a6d5f43d6803f7cf13db10c0579c0d88c12191
|
52fa15b371a7193128a9bea6b66850d909a4591c
|
refs/heads/master
| 2020-08-28T03:28:58.044736
| 2019-10-26T14:01:45
| 2019-10-26T14:01:45
| 217,564,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 385
|
rd
|
set_model_library_path.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paths.R
\name{set_model_library_path}
\alias{set_model_library_path}
\title{Fix a path in global options for stan models to be saved.}
\usage{
set_model_library_path(path)
}
\arguments{
\item{path}{A file path}
}
\value{
Invisible
}
\description{
Fix a path in global options for stan models to be saved.
}
|
9ac881f2034111fb81555620f3ec8f6912ac4ed0
|
1e2580d2b2771ea6f5d5c07827d9679507af1011
|
/R/nih.R
|
f47d731e8536de1f448da96283a6dae91416eed3
|
[] |
no_license
|
louisahsmith/louisahstuff
|
5500e7476f62903059d357927e43550df0d5ce43
|
75ee938ce59551999e17145580de2aec33add646
|
refs/heads/master
| 2022-08-29T00:53:32.687843
| 2022-08-03T15:46:16
| 2022-08-03T15:46:16
| 170,620,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,148
|
r
|
nih.R
|
#' @title NIH grant template
#'
#' @description This function produces an R Markdown document corresponding approximately to what an NIH grant may look like
#' @param metadata Extra yaml metadata to include. Defaults to NULL.
#' @param type Either "pdf" or "word". Defaults to "pdf".
#' @keywords template
#' @export
#' @examples
#' rmarkdown::render("doc.Rmd", nih())
nih <- function(..., metadata = NULL, type = "pdf") {
if(type == "pdf"){
return(pdf_document_format(...,
format = "nih",
template = "nih.tex",
metadata = metadata,
bibstyle = "nih",
bibliography = "library"))
}
if(type == "word"){
return(word_document_format(...,
format = "nih",
template = "nih.docx",
metadata = metadata,
bibstyle = "nih",
bibliography = "library"))
}
if(!type %in% c("pdf", "word")) stop("Other formats not yet supported")
}
|
2f6f3d0db258568d876cf7a9da915307bbbd6f6b
|
8a46fd6a2acd85260015a4ece2a12ed7fbcf14e4
|
/man/PDFtexp.Rd
|
ec5bf0d873e961a28a4cde741a2989c217e673bc
|
[] |
no_license
|
petrkeil/spasm
|
3c85faee44b25dfd81cb9587a0214960aa036d71
|
bfda334b6870f3850936b07a0f363582c9cc8b2b
|
refs/heads/master
| 2021-06-22T01:44:52.548414
| 2021-01-11T09:17:45
| 2021-01-11T09:17:45
| 160,325,563
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 623
|
rd
|
PDFtexp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/point_patterns_simulated_communities.R
\name{PDFtexp}
\alias{PDFtexp}
\title{Truncated exponential probability density function}
\usage{
PDFtexp(d, alpha, dlim = c(0, 1))
}
\arguments{
\item{d}{a vector or scalar representing spatial distance}
\item{alpha}{the main parameter of the function}
\item{dlim}{the two truncation points of the function (default is 0 and 1)}
}
\value{
Probabiliy densities for each of the d values.
}
\description{
This is the probability density function from Keil (2014) PeerJ: https://peerj.com/preprints/446/.
}
|
8cade118acf2e2ae3ea35c05b4e85db3e66c8540
|
4e929f4a92a2533e713b87adb06f773748814126
|
/R/RProjects/HITHATStats/R/ml17.R
|
bb2bc695d9f54358dcf33d03b651e3341fc32bbf
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
jlthomps/EflowStats
|
f4fe56f17cb675bcc1d618bc838003c2f2e9f81b
|
016c9cb65a2f13a041af3eb87debb4f83793238a
|
refs/heads/master
| 2021-01-01T05:33:44.189671
| 2013-12-04T23:30:21
| 2013-12-04T23:30:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,024
|
r
|
ml17.R
|
#' Function to return the ML17 hydrologic indicator statistic for a given data frame
#'
#' This function accepts a data frame that contains a column named "discharge" and
#' calculates the base flow. Compute the mean annual flows. Compute the minimum of a 7-day moving average
#' flow for each year and divide them by the mean annual flow for that year. ML17 is the mean (or median -
#' use preference option) of ratios.
#'
#' @param qfiletempf data frame containing a "discharge" column containing daily flow values
#' @param pref string containing a "mean" or "median" preference
#' @return ml17 numeric value of the mean annual flow for the given data frame
#' @export
#' @examples
#' load_data<-paste(system.file(package="HITHATStats"),"/data/obs_data.csv",sep="")
#' qfiletempf<-read.csv(load_data)
#' ml17(qfiletempf)
ml17 <- function(qfiletempf, pref = "mean") {
bfibyyear <- bfi(qfiletempf)
if (pref == "median") {
ml17 <- median(bfibyyear)
}
else {
ml17 <- mean(bfibyyear)
}
return(ml17)
}
|
a4769ac843eca27aa0ddc724b8a276ef5522e3af
|
09b2ffbd39efa3fc96449ce538d729b0a454fa33
|
/data-analytics-skills/scripts/src/RAM/sumcol.R
|
430f4244c9d63481b76d23c4f4a5c0b5c634c4bd
|
[] |
no_license
|
sgelias/bacterial-16s-metabarcoding
|
9bdccf9ec089c471374ad7179388bc19e5dd3244
|
5b6162755871c3c7bebc88761d96bc4df35b30f4
|
refs/heads/main
| 2023-02-21T22:13:43.577038
| 2020-11-10T02:03:56
| 2020-11-10T02:03:56
| 310,366,424
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 113
|
r
|
sumcol.R
|
sumcol <- function(X)
{
ret = X
X = as.matrix(X)
if (dim(X)[1] > 1)
ret = colSums(X)
return(ret)
}
|
00f7ae5fdea070fd78e857afc0b1fd9d53a1189c
|
6dc2d9ef6198ede44345bdea09aad12107e4d5d9
|
/cuantiAtr.R
|
467415644083ed5a39c29bef685700bcaefa3402
|
[] |
no_license
|
laparcela/modelo_red_booleana_milpa_rafa
|
0785af645855f393712c69fb26ceb6b4447cd75f
|
2c593eebe211f9af2443e74b33446583a276e049
|
refs/heads/master
| 2021-07-17T01:28:33.787325
| 2017-10-23T21:12:18
| 2017-10-23T21:12:18
| 108,016,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,497
|
r
|
cuantiAtr.R
|
#Descripcion: funcion que cuantifica las frecuencias de los atractores a los que se llega a partir de una red logica (logica, multivaluada o infinitamente valuada) bajo distintos criterios
cuantiAtr<-function(x){
require(MASS)
svd<-function (x, nu = min(n, p), nv = min(n, p), LINPACK = FALSE)#Funcion que obtiene los valores singulares, tomada de la biblioteca "svd"
{
x <- as.matrix(x)
if (any(!is.finite(x)))
stop("infinite or missing values in 'x'")
dx <- dim(x)
n <- dx[1L]
p <- dx[2L]
if (!n || !p)
stop("a dimension is zero")
La.res <- La.svd(x, nu, nv)
res <- list(d = La.res$d)
if (nu)
res$u <- La.res$u
if (nv) {
if (is.complex(x))
res$v <- Conj(t(La.res$vt))
else res$v <- t(La.res$vt)
}
res
}
lista.prop.mzGJ<-list()
lista.prop.cbGJ<-list()
lista.prop.freGA<-list()
lista.prop.quelites<-list()
lista.prop.conj<-list()
lista.prop.mzGJ.e<-list()
lista.prop.freGA.e<-list()
lista.prop.cbGJ.e<-list()
lista.prop.quelites.e<-list()
lista.prop.conj.e<-list()
for(e in 1:length(x)){
for(f in 1:length(x[[e]])){
if(length(svd(x[[e]][[f]])$d)>5){
if(sum(x[[e]][[f]]["MaizJ",])!=0){
lista.prop.mzGJ.e[[f]]<-(sum(x[[e]][[f]]["MaizG",])/sum(x[[e]][[f]]["MaizJ",]))*(as.numeric(substr((names(x[[e]][f])),1,nchar(names(x[[e]][f]))-1))/(sum(as.numeric(sapply(names(x[[e]][which(lapply(x[[e]],function(x){length(svd(x)$d)})>5)]),function(x){(substr(x,1,nchar(x)-1))})))))
#*(1/length(x))
#print(as.numeric(substr((names(x[[e]][f])),1,nchar(names(x[[e]][f]))-1))/(sum(as.numeric(sapply(names(x[[e]][which(lapply(x[[e]],length)>5)]),function(x){(substr(x,1,nchar(x)-1))})))))
lista.prop.mzGJ[[e]]<-lista.prop.mzGJ.e
}
if(sum(x[[e]][[f]]["MaizJ",])==0){
lista.prop.mzGJ.e[[f]]<-0
lista.prop.mzGJ[[e]]<-lista.prop.mzGJ.e
}
if(sum(x[[e]][[f]]["FrijolE",])!=0){
lista.prop.freGA.e[[f]]<-(sum(x[[e]][[f]]["FrijolEG",])/sum(x[[e]][[f]]["FrijolE",]))*(as.numeric(substr((names(x[[e]][f])),1,nchar(names(x[[e]][f]))-1))/(sum(as.numeric(sapply(names(x[[e]][which(lapply(x[[e]],function(x){length(svd(x)$d)})>5)]),function(x){(substr(x,1,nchar(x)-1))})))))
lista.prop.freGA[[e]]<-lista.prop.freGA.e
}
if(sum(x[[e]][[f]]["FrijolE",])==0){
lista.prop.freGA.e[[f]]<-0
lista.prop.freGA[[e]]<-lista.prop.freGA.e
}
if(sum(x[[e]][[f]]["CalabazaJ",])!=0){
lista.prop.cbGJ.e[[f]]<-(sum(x[[e]][[f]]["CalabazaG",])/sum(x[[e]][[f]]["CalabazaJ",]))*(as.numeric(substr((names(x[[e]][f])),1,nchar(names(x[[e]][f]))-1))/(sum(as.numeric(sapply(names(x[[e]][which(lapply(x[[e]],function(x){length(svd(x)$d)})>5)]),function(x){(substr(x,1,nchar(x)-1))})))))
lista.prop.cbGJ[[e]]<-lista.prop.cbGJ.e
}
if(sum(x[[e]][[f]]["CalabazaJ",])==0){
lista.prop.cbGJ.e[[f]]<-0
lista.prop.cbGJ[[e]]<-lista.prop.cbGJ.e
}
if(sum(x[[e]][[f]]["Quelites",])!=0){
lista.prop.quelites.e[[f]]<-(sum(x[[e]][[f]]["Quelites",])/length(svd(x[[e]][[f]])$d))*(as.numeric(substr((names(x[[e]][f])),1,nchar(names(x[[e]][f]))-1))/(sum(as.numeric(sapply(names(x[[e]][which(lapply(x[[e]],function(x){length(svd(x)$d)})>5)]),function(x){(substr(x,1,nchar(x)-1))})))))
lista.prop.quelites[[e]]<-lista.prop.quelites.e
}
if(sum(x[[e]][[f]]["Quelites",])==0){
lista.prop.quelites.e[[f]]<-0
lista.prop.quelites[[e]]<-lista.prop.quelites.e
}
lista.prop.conj.e[[f]]<-((sum(x[[e]][[f]]["MaizG",])+sum(x[[e]][[f]]["FrijolEG",])+sum(x[[e]][[f]]["CalabazaG",])+sum(x[[e]][[f]]["Quelites",]))/(sum(x[[e]][[f]]["MaizJ",])+sum(x[[e]][[f]]["FrijolE",])+sum(x[[e]][[f]]["CalabazaJ",])+length(svd(x[[e]][[f]])$d)))*(as.numeric(substr((names(x[[e]][f])),1,nchar(names(x[[e]][f]))-1))/(sum(as.numeric(sapply(names(x[[e]][which(lapply(x[[e]],function(x){length(svd(x)$d)})>5)]),function(x){(substr(x,1,nchar(x)-1))})))))
lista.prop.conj[[e]]<-lista.prop.conj.e
}else{
lista.prop.mzGJ.e[[f]]<-NA
lista.prop.mzGJ[[e]]<-lista.prop.mzGJ.e
lista.prop.freGA.e[[f]]<-NA
lista.prop.freGA[[e]]<-lista.prop.freGA.e
lista.prop.cbGJ.e[[f]]<-NA
lista.prop.cbGJ[[e]]<-lista.prop.cbGJ.e
lista.prop.quelites.e[[f]]<-NA
lista.prop.quelites[[e]]<-lista.prop.quelites.e
lista.prop.conj.e[[f]]<-NA
lista.prop.conj[[e]]<-lista.prop.conj.e
next
}
}
}
PropPond.MzG_MzJ<-c(mean(sapply(lista.prop.mzGJ,function(x){sum(unlist(x),na.rm=T)})),var(sapply(lista.prop.mzGJ,function(x){sum(unlist(x),na.rm=T)}))/length(lista.prop.mzGJ))
PropPond.FreG_Fre<-c(mean(sapply(lista.prop.freGA,function(x){sum(unlist(x),na.rm=T)})),var(sapply(lista.prop.freGA,function(x){sum(unlist(x),na.rm=T)}))/length(lista.prop.freGA))
PropPond.CbG_CbJ<-c(mean(sapply(lista.prop.cbGJ,function(x){sum(unlist(x),na.rm=T)})),var(sapply(lista.prop.cbGJ,function(x){sum(unlist(x),na.rm=T)}))/length(lista.prop.cbGJ))
PropPond.quelites<-c(mean(sapply(lista.prop.quelites,function(x){sum(unlist(x),na.rm=T)})),var(sapply(lista.prop.quelites,function(x){sum(unlist(x),na.rm=T)}))/length(lista.prop.quelites))
PropPond.conj<-c(mean(sapply(lista.prop.conj,function(x){sum(unlist(x),na.rm=T)})),var(sapply(lista.prop.conj,function(x){sum(unlist(x),na.rm=T)}))/length(lista.prop.conj))
prop<-list(PropPond.MzG_MzJ,PropPond.FreG_Fre,PropPond.CbG_CbJ,PropPond.quelites,PropPond.conj)
names(prop)<-c("MzG_MzJ","FreG_Fre","CbG_CbJ","quelites","conj")
return(prop)
}
|
323109fe8723eaa966af19959f31fd1d0004bada
|
01b14f97b730f8a05c74d04c3779c7c21ee599de
|
/man/leave.Rd
|
e024e3b86144dda36f1d3d1b65c34b71b2983f7f
|
[] |
no_license
|
sifuentesrigo/simmer
|
8fdbdc1dac485169a248cff4c5faff0e342154db
|
72e73591e4af1152706c43936294efd579d7b586
|
refs/heads/master
| 2021-01-21T23:21:00.594696
| 2017-06-05T15:45:42
| 2017-06-05T15:45:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 404
|
rd
|
leave.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trajectory-activities.R
\name{leave}
\alias{leave}
\title{Add a leave activity}
\usage{
leave(.trj, prob)
}
\arguments{
\item{.trj}{the trajectory object.}
\item{prob}{a probability or a function returning a probability.}
}
\value{
Returns the trajectory object.
}
\description{
Leave the trajectory with some probability.
}
|
c921f88c4d03ba5b8c2067ae3406d8bcb6d55faa
|
f70b257d50e83ee8308398ca8ca6fa369316c5c5
|
/project_summary/r proj.R
|
cf7625d947d595d129bb806dc08e4e5c8273d81c
|
[] |
no_license
|
MeilingDai/rmarkdown_template_stephen
|
2f2db1cea8c55beb617aeffad2b0d8f3f5c51b3c
|
04c774923e8cd5e0e10289b33d35475d5725cdca
|
refs/heads/master
| 2020-09-20T15:58:53.726612
| 2019-11-27T11:21:42
| 2019-11-27T11:21:42
| 224,101,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,095
|
r
|
r proj.R
|
library("tidyverse")
read_csv("data/field_horses_metadata.csv")
field_horses_metadata <- read_csv("data/field_horses_metadata.csv")
field_horses_metadata
read_csv("data/field_horses_raw_counts.csv")
field_horses_raw_counts <- read_csv("data/field_horses_raw_counts.csv")
field_horses_raw_counts
field_horses_raw_counts_modified <- read_csv("data/field_horses_raw_counts_modified.csv")
field_horses_raw_counts_modified
field_horses_metadata_modified <- read_csv("data/field_horses_metadata_modified.csv")
field_horses_metadata_modified
tidy_field_horses_raw_counts <- field_horses_raw_counts_modified %>%
gather(key = horse_id, value = counts, -gene)
tidy_field_horses_raw_counts
full_data <- full_join(tidy_field_horses_raw_counts, field_horses_metadata_modified)
full_data
write.csv(full_data,"data/field_horses_full_data.csv")
full_data %>% ggplot (aes(x = horse_id,
y = counts,
color = infection,
group = horse_id))+
geom_boxplot() +
geom_jitter()+
scale_y_log10()+
scale_color_manual(values = c(mock = "grey", infected = "red")) +
theme_bw() +
labs(title = "microRNA raw counts of field horses")
field_horses_raw_counts <- read_csv("data/field_horses_raw_counts.csv")
field_horses_raw_counts
#use geom_boxplot and geom_jitter to plot microRNA raw counts of field horses
full_data %>% ggplot (aes(x = horse_id,
y = counts,
color = infection,
group = horse_id))+
geom_boxplot() +
geom_jitter()+
scale_y_log10()+
scale_color_manual(values = c(mock = "grey", infected = "red")) +
theme_bw() +
labs(title = "microRNA raw counts of field horses")
#use geom_boxplot and geom_jitter to plot microRNA raw counts of field horses with raw counts > 10,000.
aw_count_more_than_10000 <- full_data %>%
filter(counts > 10000)
raw_count_more_than_10000 %>% ggplot(aes(x = horse_id,
y = counts,
color = infection,
group = horse_id))+
geom_boxplot() +
geom_jitter()+
scale_y_log10()+
scale_color_manual(values = c(mock = "grey", infected = "red")) +
theme_bw() +
labs(title = "microRNA raw counts more than 1000 of 6 horses")
#PCA analysis
field_horses_raw_counts_PCA <- field_horses_raw_counts %>%
column_to_rownames("gene") %>%
scale()
# Peform the PCA on the already scaled heat_stress data
microRNA_PCA <- prcomp(field_horses_raw_counts_PCA)
# Check the summary
summary(microRNA_PCA)
plot(microRNA_PCA)
data_1 <- field_horses_raw_counts %>%
filter(gene > 0) %>%
column_to_rownames("gene")
data_1
# Transpose the rows and columns
data_2 <- t(data_1)
data_2
head(data_2)
# Remove first column from data
data_without_gene <- field_horses_raw_counts[,-(1)]
data_without_gene
# Store GeneID as rownames
rownames(data_without_gene) <- field_horses_raw_counts$gene
View(data_without_gene)
head(data_without_gene)
|
d8976a343563de89945cb775b808f843e2afab2d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lqr/examples/best.lqr.Rd.R
|
bfa3575ad18a037f61530d8680dc3419ddd74d47
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 612
|
r
|
best.lqr.Rd.R
|
library(lqr)
### Name: best.lqr
### Title: Best Fit in Robust Linear Quantile Regression
### Aliases: best.lqr
### Keywords: package quantile regression skew
### ** Examples
## Not run:
##D data(crabs,package = "MASS")
##D
##D crabs$sex <- as.character(crabs$sex)
##D crabs$sex[crabs$sex=="M"]=0
##D crabs$sex[crabs$sex=="F"]=1
##D crabs$sex = as.numeric(crabs$sex)
##D
##D attach(crabs)
##D
##D ##Setting
##D y <- BD
##D x <- cbind(1,FL,sex)
##D
##D #Finding the best model for the 3rd quartile based on Akaike criterion
##D res = best.lqr(y, x, p = 0.75, criterion = "AIC")
##D
## End(Not run)
|
f7222773842e1bd14bd84057fb0ed965b2608578
|
a3018c84b625e2a15876509c21428e225fa02385
|
/Sepsis_Reports.R
|
24982dadaf031f270c213f6eb3055ecd5c96cb6c
|
[] |
no_license
|
dhidru/ED_code
|
ab850e2fc4384ef6a1105e9ec06264f0c8d42edc
|
e32a67c4b6aaafaa74cec3f7eb08a890cf865805
|
refs/heads/master
| 2020-08-07T10:03:29.859837
| 2019-09-04T20:30:20
| 2019-09-04T20:30:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,244
|
r
|
Sepsis_Reports.R
|
library(plotROC)
library(dplyr)
require(gridExtra)
library(pROC)
library(lubridate)
library(reshape2)
library(scales)
library(RColorBrewer)
library(data.table)
# load and process data
path <- "./data/EPIC_DATA/"
# ============ 1. Load Files ================= #
# a. Get Nurses files
RN_files = Sys.glob(paste0(path, "Sepsis_Reports/*RN.csv")); RN_files
# First apply read.csv, then rbind
RN_reports = do.call(rbind, lapply(RN_files, function(x) read.csv(x, stringsAsFactors = FALSE)))
RN_reports$Arrived <- as.POSIXct(RN_reports$Arrived, tz="EST", format="%d/%m/%y %H%M")
RN_reports <- RN_reports[order(RN_reports$Arrived),]
# b. Get doctors files
MD_files = Sys.glob(paste0(path, "/Sepsis_Reports/*MD.csv")); MD_files
MD_reports = do.call(rbind, lapply(MD_files, function(x) read.csv(x, stringsAsFactors = FALSE)))
MD_reports$Arrived <- as.POSIXct(MD_reports$Arrived, tz="EST", format="%d/%m/%y %H%M")
MD_reports <- MD_reports[order(MD_reports$Arrived),]
# c. Get EPIC file
EPIC <- fread(paste0(path, "EPIC.csv"))
EPIC$Arrived <- as.POSIXct(EPIC$Arrived, tz="EST", format="%d/%m/%y %H%M")
EPIC <- EPIC[order(EPIC$Arrived),]
EPIC <- EPIC[!is.na(EPIC$Arrived),]
EPIC$Month <- factor(month.abb[month(EPIC$Arrived)])
diagnosis <- grepl('*(S|s)epsis*', EPIC$Diagnosis); length(diagnosis)
diagnoses <- grepl('*(S|s)epsis*', EPIC$Diagnoses); length(diagnoses)
primary.dx <- grepl('*(S|s)epsis*', EPIC$Primary.Dx); length(primary.dx)
potential.Sepsis <- EPIC[(diagnosis | diagnoses | primary.dx),c("MRN", "CSN", "Diagnosis", "Diagnoses", "Primary.Dx")]
dim(potential.Sepsis)
nrow(potential.Sepsis[grepl("epsis", potential.Sepsis$Primary.Dx),])
table(potential.Sepsis[grepl("(S|s)epsis", potential.Sepsis$Diagnosis) &
!grepl("(S|s)epsis", potential.Sepsis$Primary.Dx),c("Diagnoses", "Primary.Dx")])
table(potential.Sepsis[grepl("(S|s)epsis", potential.Sepsis$Diagnoses) &
!grepl("(S|s)epsis", potential.Sepsis$Primary.Dx),c("Diagnoses", "Primary.Dx")])
EPIC$Age.at.Visit <- as.character(EPIC$Age.at.Visit)
month.indicies <- grep("m.o", EPIC$Age.at.Visit)
year.indicies <- grep("y.o", EPIC$Age.at.Visit)
day.indicies <- grep("days", EPIC$Age.at.Visit)
week.indicies <- grep("wk.o", EPIC$Age.at.Visit)
EPIC$Age.at.Visit[month.indicies] <- as.numeric(gsub("m\\.o\\.", "", EPIC$Age.at.Visit[month.indicies]))/12
EPIC$Age.at.Visit[year.indicies] <- as.numeric(gsub("y\\.o\\.", "", EPIC$Age.at.Visit[year.indicies]))
EPIC$Age.at.Visit[day.indicies] <- as.numeric(gsub("days", "", EPIC$Age.at.Visit[day.indicies])) / 365
EPIC$Age.at.Visit[week.indicies] <- as.numeric(gsub("wk.o", "", EPIC$Age.at.Visit[week.indicies])) / 52
EPIC$Age.at.Visit <- as.numeric(EPIC$Age.at.Visit)
# filter RN_reports and MD_reports to end where EPIC data ends
max.date <- min(c(EPIC$Arrived[which.max(EPIC$Arrived)],
RN_reports$Arrived[which.max(RN_reports$Arrived)],
MD_reports$Arrived[which.max(MD_reports$Arrived)])); max.date
EPIC <- EPIC %>% dplyr::filter(Arrived <= max.date)
RN_reports <- RN_reports %>% dplyr::filter(Arrived <= max.date)
MD_reports <- MD_reports %>% dplyr::filter(Arrived <= max.date)
# filter all data to start in october ---> when tools actually started working!
min.date <- max(c(EPIC$Arrived[which.min(EPIC$Arrived)],
RN_reports$Arrived[which.min(RN_reports$Arrived)],
MD_reports$Arrived[which.min(MD_reports$Arrived)])); min.date
EPIC <- EPIC %>% dplyr::filter(Arrived >= min.date)
RN_reports <- RN_reports %>% dplyr::filter(Arrived >= min.date)
MD_reports <- MD_reports %>% dplyr::filter(Arrived >= min.date)
# Find Sepsis labels --> need to change to correct label!
RN_diagnosed_sepsis <- RN_reports[grep('*(S|s)epsis*', RN_reports$Diagnosis),]; dim(RN_diagnosed_sepsis)
MD_diagnosed_sepsis <- MD_reports[grep('*(S|s)epsis*', MD_reports$Diagnosis),]; dim(MD_diagnosed_sepsis)
EPIC_diagnosed_sepsis <- EPIC[grep('*(S|s)epsis*', EPIC$Diagnosis, useBytes = TRUE),]; dim(EPIC_diagnosed_sepsis)
EPIC$RN_prediction <- ifelse(EPIC$CSN %in% RN_reports$CSN, 1, 0)
EPIC$RN_True_Sepsis <- ifelse(EPIC$CSN %in% RN_diagnosed_sepsis$CSN, 1, 0)
EPIC$MD_prediction <- ifelse(EPIC$CSN %in% MD_reports$CSN, 1, 0)
EPIC$MD_True_Sepsis <- ifelse(EPIC$CSN %in% MD_diagnosed_sepsis$CSN, 1, 0)
EPIC$True_Sepsis <- ifelse(EPIC$CSN %in% EPIC_diagnosed_sepsis$CSN, 1, 0)
# Remove super young false negatives as their Sepsis labels could be incorrect
tool.false.negs <- (EPIC %>% dplyr::filter(RN_prediction==0 &
MD_prediction==0 &
True_Sepsis==1 &
Age.at.Visit <= 0.33))$CSN; tool.false.negs
EPIC <- EPIC %>% dplyr::filter(!CSN %in% tool.false.negs)
RN_diagnosed_sepsis <- RN_diagnosed_sepsis %>% dplyr::filter(!CSN %in% tool.false.negs)
MD_diagnosed_sepsis <- MD_diagnosed_sepsis %>% dplyr::filter(!CSN %in% tool.false.negs)
EPIC_diagnosed_sepsis <- EPIC_diagnosed_sepsis %>% dplyr::filter(!CSN %in% tool.false.negs)
#check for differences between RN and MD tool firing
RN_MD_Discrepency_CSN <- setdiff(RN_diagnosed_sepsis$CSN, MD_diagnosed_sepsis$CSN); RN_MD_Discrepency_CSN # 3 alerts fired by nurses tool and missed by MD tool
MD_RN_Discrepency_CSN <- setdiff(MD_diagnosed_sepsis$CSN, RN_diagnosed_sepsis$CSN); MD_RN_Discrepency_CSN # no patients fired by MD tool and missed by RN tool
EPIC %>% filter(CSN %in% EPIC_RN_Discrepency_CSN) %>% select(MRN, CSN, Diagnosis)
# No RN alert fired for 37 patients who had Sepsis
EPIC %>% filter(CSN %in% EPIC_MD_Discrepency_CSN) %>% select(MRN, CSN, Diagnosis)
# No RN alert fired for 40 patients who had Sepsis
nrow(EPIC_diagnosed_sepsis)
# Stats
generateStats <- function(predictions, labels) {
TP <- sum(predictions==1 & labels==1)
TN <- sum(predictions==0 & labels==0)
FP <- sum(predictions==1 & labels==0)
FN <- sum(predictions==0 & labels==1)
FPR <- FP / (FP + TN )
FNR <- FN / (FN + TP)
Sensitivity <- TP / (TP + FN)
Specificity <- TN / (TN + FP)
roc_obj <- roc(labels, predictions)
auroc <- auc(roc_obj)
return(data.frame(TP=TP, TN=TN, FP=FP, FN=FN, FPR=FPR, FNR=FNR,
Sensitivity=Sensitivity, Specificity=Specificity,
AUROC=auroc))
}
nrow(EPIC); nrow(RN_diagnosed_sepsis); nrow(MD_diagnosed_sepsis); nrow(EPIC_diagnosed_sepsis)
# MD predictions
MDSepsis_MD_Predictions <- generateStats(EPIC$MD_prediction, EPIC$MD_True_Sepsis); MDSepsis_MD_Predictions
TrueSepsis_MD_Predictions <- generateStats(EPIC$MD_prediction, EPIC$True_Sepsis); TrueSepsis_MD_Predictions
# RN predictions
RNSepsis_RN_Predictions <- generateStats(EPIC$RN_prediction, EPIC$RN_True_Sepsis); RNSepsis_RN_Predictions
TrueSepsis_RN_Predictions <- generateStats(EPIC$RN_prediction, EPIC$True_Sepsis); TrueSepsis_RN_Predictions
# ROC plots
# RN predictions
ggplot(EPIC, aes(d=RN_True_Sepsis, m = RN_prediction)) + geom_roc(n.cuts=10) + style_roc()
ggplot(EPIC, aes(d=True_Sepsis, m = RN_prediction)) + geom_roc() + style_roc()
# MD predictions
ggplot(EPIC, aes(d=MD_True_Sepsis, m = MD_prediction)) + geom_roc() + style_roc()
ggplot(EPIC, aes(d=True_Sepsis, m = MD_prediction)) + geom_roc() + style_roc()
# If you want to create some disturbance around age so not plotted as discrete
# set.seed(0)
# EPIC$Age.at.Visit.Disturbed <- EPIC$Age.at.Visit + rnorm(nrow(EPIC), mean = 0, sd = 0.2)
# EPIC$Age.at.Visit.Disturbed[EPIC$Age.at.Visit.Disturbed<0] <- 0.1
# plot
plot.with.RN <- TRUE##FALSE#
if (plot.with.RN) {
EPIC.FN <- EPIC$RN_prediction==0 & EPIC$MD_prediction==0 & EPIC$True_Sepsis==1
EPIC.TP <- (EPIC$RN_prediction==1 | EPIC$MD_prediction==1) & EPIC$True_Sepsis==1
EPIC.FP <- (EPIC$RN_prediction==1 | EPIC$MD_prediction==1) & EPIC$True_Sepsis==0
} else {
EPIC.FN <- EPIC$MD_prediction==0 & EPIC$True_Sepsis==1
EPIC.TP <- EPIC$MD_prediction==1 & EPIC$True_Sepsis==1
EPIC.FP <- EPIC$RN_prediction==1 & EPIC$True_Sepsis==0
}
# Create Colour column
EPIC$Colour <- "No Sepsis"
EPIC$Colour[EPIC.FN] <- "Sepsis Not Detected (False Negative)"
EPIC$Colour[EPIC.TP] <- "Sepsis Correctly Detected"
EPIC$Colour[EPIC.FP] <- "Incorrectly Flagged As Sepsis (False Positive)"
plot.data <- EPIC %>% filter(True_Sepsis == 1 | (True_Sepsis == 0 & (RN_prediction == 1 | MD_prediction == 1)))
Tool <- factor(plot.data$Colour, levels=c("Incorrectly Flagged As Sepsis (False Positive)",
"Sepsis Correctly Detected",
"Sepsis Not Detected (False Negative)"))
plot.data.long <- table(plot.data[,c("Month", "Colour")])
plot.data.long <- melt(plot.data.long, id="Month")
# plot another version with no False Positives
plot.data.no.FPs <- EPIC %>% filter(True_Sepsis == 1)
Tool.no.FPs <- factor(plot.data.no.FPs$Colour, levels=c("Sepsis Correctly Detected",
"Sepsis Not Detected (False Negative)"))
plot.data.long.no.FPs <- table(plot.data.no.FPs[,c("Month", "Colour")])
plot.data.long.no.FPs <- melt(plot.data.long.no.FPs, id="Month")
# order correctly
plot.data.long$Month <- factor(plot.data.long$Month, levels = month.abb)
plot.data.long.no.FPs$Month <- factor(plot.data.long.no.FPs$Month, levels = month.abb)
# plots
all.points <- qplot(plot.data$Arrived, plot.data$Age.at.Visit, colour=Tool, size=I(4)) +
ylab("Age at Visit (years)") + xlab("Date Arrived") + theme_bw() +
scale_color_manual(values=c("red2", "green3", "purple2"), name = "") +
theme(legend.position="bottom") +
ylim(-0.5, 18.5) +
scale_fill_manual(values = alpha(c("gray30", "gray50", "gray70"), .8)) +
ggtitle("Outcome of Sepsis Alerts: Patients Wihtout Sepsis Incorrectly Identified, Patients With Sepsis Correctly \nIdentified and Patients With Sepsis Missed By Current RN and MD Tools"); all.points
no.FPs <- qplot(plot.data.no.FPs$Arrived, plot.data.no.FPs$Age.at.Visit, colour=Tool.no.FPs, size=I(4)) +
ylab("Age at Visit (years)") + xlab("Date Arrived") + theme_bw() +
scale_color_manual(values=c("green3", "purple2"), name = "") +
theme(legend.position="bottom") +
ylim(-0.5, 18.5) +
scale_fill_manual(values = alpha(c("gray30", "gray50", "gray70"), .8)) +
ggtitle("Outcome of Sepsis Alerts: Patients With Sepsis Correctly Identified and Patients With Sepsis\nMissed By Current RN and MD Tools"); no.FPs
# two below were going to be used in a paper
# --> looks strange because currently Sepsis Report data only runs Oct --> Feb
bar.percent <- ggplot(plot.data.long, aes(x = Month, y = value, fill = Colour)) +
geom_bar(position = "fill",stat = "identity") +
scale_y_continuous(labels = percent_format()) + theme_bw() +
ylab("Percentage") + ggtitle("Outcome of Sepsis Alerts") +
scale_fill_manual(values=c("red2", "green3", "purple2"), name = "") +
theme(legend.position="bottom",
legend.text=element_text(size=22),
plot.title = element_text(size=26),
axis.text=element_text(size=22),
axis.title=element_text(size=24,face="bold"),
axis.title.y = element_text(margin = margin(t = 0, r = 30, b = 0, l = 0)),
axis.title.x = element_text(margin = margin(t = 30, r = 0, b = 0, l = 0))) +
guides(fill=guide_legend(ncol=1,
keywidth=0.4,
keyheight=0.4,
default.unit="inch")); bar.percent
bar.count <- ggplot(plot.data.long, aes(x = Month, y = value, fill = Colour)) +
geom_bar(stat = "identity") + theme_bw() +
ylab("Number of Patients") + ggtitle("Outcome of Sepsis Alerts") +
scale_fill_manual(values=c("red2", "green3", "purple2"), name = "") +
theme(legend.position="bottom",
legend.text=element_text(size=24),
plot.title = element_text(size=26),
axis.text=element_text(size=24),
axis.title=element_text(size=24,face="bold"),
axis.title.y = element_text(margin = margin(t = 0, r = 30, b = 0, l = 0)),
axis.title.x = element_text(margin = margin(t = 30, r = 0, b = 0, l = 0))) +
guides(fill=guide_legend(ncol=1,
keywidth=0.4,
keyheight=0.4,
default.unit="inch")); bar.count
|
c1d11e3073c483b74a7eefc004230ce397e11284
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CRPClustering/vignettes/CRPClustering-vignette.R
|
65d38e0d87be6f089e470628d20c84a81b0c67b9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
CRPClustering-vignette.R
|
### This is an R script tangled from 'CRPClustering-vignette.pdf.asis'
|
2764ae34c886b4302c9af3e47f14a6b8172c22f4
|
62351b2ae150595a926b379adf17c23aa4c44f56
|
/R/wool_weigth_needed.R
|
1dba487dea3c3589f7ed5d3a8b7b212fdd872d75
|
[] |
no_license
|
Vaugoyeau/tricot
|
a3f0bb98200e9b73232c141794e7059ef27f2ef8
|
a4f3461fa08cfa4dfcf9b6ef66e9c4d7c2b7f1f6
|
refs/heads/master
| 2023-03-18T08:27:51.579653
| 2021-02-27T21:18:28
| 2021-02-27T21:18:28
| 335,958,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,957
|
r
|
wool_weigth_needed.R
|
#' Calculation of the wool quantity needed
#'
#' @param img_k ggplot image from knitting_image()
#' @param rg Row number to have 10 cm
#' @param m Stitch number to have 10 cm
#' @param p Weight (g) to make the test square of 10*10
#' @inheritParams dplyr::arrange
#' @inheritParams magick::image_raster
#' @inheritParams magick::image_scale
#' @inheritParams ggplot2::ggplot
#' @inheritParams ggplot2::aes
#' @inheritParams ggplot2::geom_point
#' @inheritParams ggplot2::scale_color_manual
#' @inheritParams dplyr::distinct
#' @inheritParams ggplot2::theme
#' @inheritParams ggplot2::xlab
#' @inheritParams ggplot2::ylab
#'
#' @return
#' Table with wool weight needed by color
#' @export
#'
#' @examples
#'
#' tricot::wool_weigth_needed(
#' tricot::knitting_image(
#' tricot::image_load(
#' here::here("img", "Montagne.pdf")
#' ),
#' tricot::grid_size(
#' 20,
#' 30,
#' tricot::square_size(
#' 35,
#' 26
#' )
#' )
#' ),
#' 35,
#' 26,
#' 100
#' )
#'
wool_weigth_needed <- function(img_k, rg, m, p) {
if(sum(class(img_k) != "ggplot") == 0) {stop("img_k must be ggplot")}
if (!is.numeric(rg)) {stop("rg should be numeric")}
if (length(rg) != 1) {stop("rg should have only one value")}
if (!is.numeric(m)) {stop("m should be numeric")}
if (length(m) != 1) {stop("m should have only one value")}
if (!is.numeric(p)) {stop("p should be numeric")}
if (length(p) != 1) {stop("p should have only one value")}
table <- dplyr::mutate(
dplyr::count(
img_k$data,
col
),
"wool_needed (g)" = round(n * p / (rg * m), digits = 2)
)
table_couleur <-
DT::formatStyle(
DT::datatable(
table,
rownames = FALSE
),
"col",
backgroundColor =
DT::styleEqual(
dplyr::distinct(table, col)$col,
dplyr::distinct(table, col)$col
)
)
return(table_couleur)
}
|
56685db0927c8dcb4d09ae7404394b6ebcfac3cc
|
2948fc466c0935a4d4d07b8dfc79ee564034aac6
|
/replications/Rastogi_2016/R/rq3_time1-3.R
|
c0beae86e835b7f89153351ad6743eeb5d38e8d7
|
[
"MIT"
] |
permissive
|
micheledinanni/Psychometric-tools-benchmark
|
5a0f8514e1a144787039c62f184a1af7d4fc5283
|
f9074c77c2a6151051a59853c19ce79ade276da7
|
refs/heads/master
| 2020-05-04T08:43:24.907430
| 2019-04-17T09:38:59
| 2019-04-17T09:38:59
| 179,052,341
| 1
| 1
| null | 2019-04-02T10:16:48
| 2019-04-02T10:16:48
| null |
UTF-8
|
R
| false
| false
| 2,014
|
r
|
rq3_time1-3.R
|
source(file="connectTodb.R")
# Import the personality score data
query="select * from year1_cc"
year1_cc<-dbGetQuery(con,query)
query="select * from year1_pr"
year1_pr<-dbGetQuery(con,query)
query="select * from year3_cc"
year3_cc<-dbGetQuery(con,query)
query="select * from year3_pr"
year3_pr<-dbGetQuery(con,query)
rm(con,query)
year1_pr$user_id=as.factor(year1_pr$user_id)
year1_cc$user_id=as.factor(year1_cc$user_id)
year3_pr$user_id=as.factor(year3_pr$user_id)
year3_cc$user_id=as.factor(year3_cc$user_id)
year1<-rbind(year1_cc,year1_pr)
year3<-rbind(year3_cc,year3_pr)
first_year<-aggregate(cbind(O,C,E,A,N)~user_id,data=year1,mean)
third_year<-aggregate(cbind(O,C,E,A,N)~user_id,data=year3,mean)
third_year<-head(third_year,374)
s_r13<-rbind(cbind(first_year,type="y1"),cbind(third_year,type="y3"))
par(mfrow=c(1,5))
boxplot(O~type,data=s_r13,notch=T,outline=F,xlab="O")
boxplot(C~type,data=s_r13,notch=T,outline=F,xlab="C")
boxplot(E~type,data=s_r13,notch=T,outline=F,xlab="E")
boxplot(A~type,data=s_r13,notch=T,outline=F,xlab="A")
boxplot(N~type,data=s_r13,notch=T,outline=F,xlab="N")
#media dei valori
mean(first_year$O)
mean(third_year$O)
mean(first_year$C)
mean(third_year$C)
mean(first_year$E)
mean(third_year$E)
mean(first_year$A)
mean(third_year$A)
mean(first_year$N)
mean(third_year$N)
#deviazione standard
sd(first_year$O)
sd(third_year$O)
sd(first_year$C)
sd(third_year$C)
sd(first_year$E)
sd(third_year$E)
sd(first_year$A)
sd(third_year$A)
sd(first_year$N)
sd(third_year$N)
t.test(first_year$O,third_year$O,paired = T)
t.test(first_year$C,third_year$C,paired = T)
t.test(first_year$E,third_year$E,paired = T)
t.test(first_year$A,third_year$A,paired = T)
t.test(first_year$N,third_year$N,paired = T)
#calcolo la d di Cohen per calcolare la differenza standardizzata fra le due medie campionarie
library(lsr)
cohensD(first_year$O,third_year$O)
cohensD(first_year$C,third_year$C)
cohensD(first_year$E,third_year$E)
cohensD(first_year$A,third_year$A)
cohensD(first_year$N,third_year$N)
|
bcfee18fab210335e3afe6ee7a9131e063555182
|
5c5d9d2dc10df401a70e525a742755fde71ac646
|
/LD heatmap of Vole SNPs_before.R
|
59603cbc099dbcf45bc13f35a520019bd08abb49
|
[] |
no_license
|
dcossyleon/Vole_Project
|
96ef5424f7c6907e11a43854d560a29348940ec1
|
b7e3566c2a0a719e5ea2eb74bbfb5b64ba334e14
|
refs/heads/master
| 2020-04-24T17:19:34.640061
| 2019-03-08T22:52:44
| 2019-03-08T22:52:44
| 172,139,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 733
|
r
|
LD heatmap of Vole SNPs_before.R
|
#Feb, 28, 2019
#LD heatmap of Vole SNPs
#install package SnpStats in bioConductor
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install("snpStats", version = "3.8")
install.packages("LDheatmap")
library(LDheatmap)
str(data(CEUData))
str(CEUSNP)
CEUData
g # df of SNP data from geno.R --> says it is not a genotype object
LDheatmap(g, genetic.distances=NULL, distances="physical",
LDmeasure="r", title="Pairwise LD", add.map=TRUE, add.key=TRUE,
geneMapLocation=0.15, geneMapLabelX=NULL, geneMapLabelY=NULL,
SNP.name=NULL, color=NULL, newpage=TRUE,
name="ldheatmap", vp.name=NULL, pop=FALSE, flip=NULL, text=FALSE)
head(g)
|
f3222063ffd41389ef67262cc1cc700118c8fdc7
|
c2c98c7973f7723f467f58d284f45b6126497e5b
|
/hmm/hist-length.r
|
fc8ac4a2413f2a976e31fcee35af320e4b17a815
|
[] |
no_license
|
EnzymeFunctionInitiative/GNT
|
57e5123eda5d1c26e9c2bfbcde12d22d5105f302
|
c8edb13e30a17f6af8feb9d4c3962ca5ba90881b
|
refs/heads/master
| 2023-04-22T22:46:01.904171
| 2023-02-02T03:35:19
| 2023-02-02T03:35:19
| 91,615,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,005
|
r
|
hist-length.r
|
args <- commandArgs(trailingOnly = TRUE)
type = args[1]
arg_offset = 0
data_file = ""
data_file = args[2]
png_file = args[3]
data_dir = ""
start = 0
stop = 0
raw_data = read.delim(data_file, header=FALSE, sep="\t", check.names = FALSE)
cols = raw_data[, 1]
plot_data = t(raw_data[, -1])
arg_offset = 3
jobnum = ""
extra_text = ""
if (length(args) > arg_offset && args[arg_offset+1] > 0) {
jobnum = paste("for Job ID", args[arg_offset+1])
}
if (length(args) > arg_offset+1) {
extra_text = args[arg_offset+2];
}
im_width = 1800
if (length(args) > arg_offset+2) {
im_width = strtoi(args[arg_offset+3])
}
im_height = 900
if (length(args) > arg_offset+3) {
im_height = strtoi(args[arg_offset+4])
}
colnames(plot_data) = cols
png(png_file, width=im_width, height=im_height, type="cairo");
par(mar=c(4,4,4,4))
barplot(plot_data, main = paste("Number of Sequences at Each Length", jobnum, extra_text), ylab = "Number of Sequences", xlab = "Length", col = "red", border = "blue")
dev.off()
|
d31f1ca89288da8b9c9de11480fc95ca9c43628e
|
ea0d40c303a1a4597f1b507c9746d485c866738b
|
/scaping.R
|
dcfd8f88637bdd2204a4271e12bc8546605b7ea9
|
[] |
no_license
|
venkatram64/rwork
|
38bdc7d399dc5bce113b807344ea78ae8d23a8f8
|
b6584b5356c9ca91581adb6cd71d30fd1aca19cc
|
refs/heads/master
| 2020-05-27T19:58:05.518963
| 2019-06-03T04:19:40
| 2019-06-03T04:19:40
| 188,770,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 110
|
r
|
scaping.R
|
#install.packages('rvest')
library(rvest)
#demo(package='rvest')
demo(package='rvest', topic='tripadvisor')
|
c1cc88485807d86bf179c2d60aca1627887a3fc2
|
9faa3cfb92ff2cb58db8739fefffe4fd248bcf48
|
/lib/R/ema.R
|
8249a60919b255b302b03b540ed8cf0d21a8d305
|
[
"MIT"
] |
permissive
|
joshterrell805-historic/StockPrediction
|
fd135e9b0d6f8033207511c2c5b6b2ba24cf150b
|
15795889d21421b2ab59c3a4669c36e0956a888d
|
refs/heads/master
| 2021-01-10T09:20:24.380517
| 2017-07-04T19:00:20
| 2017-07-04T19:00:20
| 49,034,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
r
|
ema.R
|
source('lib/sma.R');
ema = function(data, column, count) {
multiplier = (2 / (count + 1));
prev = NA;
return(sapply(1:nrow(data), function(i) {
if (i < count) {
prev <<- NA;
} else if (i == count) {
prev <<- sma(data[1:count,], column, count)[count];
} else {
prev <<- (data[i, column] - prev) * multiplier + prev;
}
prev;
}));
};
|
ea4a9bdaeccb62fd31bde8434afe3d1d8de338c2
|
e5d5a917043fe280a2cfb1d058a800eefe2ce3d6
|
/man/pRojects.Rd
|
79d93ec1d464c30f211e336e43d72bc685f43cfc
|
[] |
no_license
|
mdozmorov/pRojects
|
2ee8d3c715f57c9527b0d1c4757538963fdf24dc
|
8df07e9d1f5d506a73743a7d5019490a63d1a985
|
refs/heads/master
| 2021-01-19T08:59:09.906103
| 2017-04-25T01:22:40
| 2017-04-25T01:22:40
| 87,704,857
| 0
| 0
| null | 2017-04-09T11:53:18
| 2017-04-09T11:53:18
| null |
UTF-8
|
R
| false
| true
| 217
|
rd
|
pRojects.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pRojects-package.r
\docType{package}
\name{pRojects}
\alias{pRojects}
\alias{pRojects-package}
\title{pRojects.}
\description{
pRojects.
}
|
51924545a3fa65853e9e2c8249ee81abaa3d7d4b
|
c09fa54a0adfeca7d21270960ccdd82ebb61a882
|
/lib/general_functions.R
|
78c09568fa6adb82fef87be6f7934e4f83c93eef
|
[] |
no_license
|
eclarke/scid_tcrb_analysis
|
717741513a2c8a778365ee090350a0c5fd16b198
|
e0cdb3f314d185fcf07890b36ce6cb65c8a75ae5
|
refs/heads/master
| 2016-08-11T09:11:23.889170
| 2015-10-08T21:37:43
| 2015-10-08T21:37:43
| 43,841,572
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
general_functions.R
|
#' Reorders a factor's levels to match the order given by calling unique() on
#' the factor itself. Useful for character vectors in data frames that are
#' already ordered correctly, but when coerced to a factor, the levels are in
#' alphabetical order.
reorder_levels_by_uniq <- function(f) {
if (!(is.factor(f))) f <- as.factor(f)
f <- factor(f, levels=levels(f)[match(unique(f), levels(f))])
return(f)
}
|
799cf148fe81f8d88ac9959d71ebed9b4a820f0a
|
d8b128ac81ac68a78e8247e9f0027a62b62d5a3b
|
/STRING.db/man/getAllLinks.Rd
|
a683ae2a0df9e069192bca00ff84c652f4ce8b47
|
[] |
no_license
|
stefanedwards/r-string
|
6e2c51c9cba9b9ac8a2fb700a254fc383f1d4fe0
|
2fa569174a2989c3eb129aec397936555f796847
|
refs/heads/master
| 2021-01-23T16:40:00.804970
| 2013-04-05T11:35:44
| 2013-04-05T11:35:44
| 32,138,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 729
|
rd
|
getAllLinks.Rd
|
\name{getAllLinks}
\alias{getAllLinks}
\alias{getAllLinks.0.2}
\title{Generic function for getting all protein-protein links}
\usage{
getAllLinks(conn, encoding, cutoff = 0)
getAllLinks.0.2(conn, encoding, cutoff)
}
\arguments{
\item{conn}{Database connection to STRING.db-sqlite
database.}
\item{encoding}{String of which encoding \code{proteins}
is set in. Defaults to primary encoding.}
\item{cutoff}{Cut-off of score, i.e. get all links with
score greater than or equal to.}
}
\value{
data.frame with three columns.
}
\description{
\code{getAllLinks.x.y} are database schema dependant
functions for doing the actual work.
}
\seealso{
\link{getNames}, \link{getPPI}
}
|
14e5764d8549e52a8f7a00acbce0551f590d5f44
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/itsmr/examples/deaths.Rd.R
|
bcbef1451236ac3ab7a66b0a46fd95a5a9b8b20e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 159
|
r
|
deaths.Rd.R
|
library(itsmr)
### Name: deaths
### Title: USA accidental deaths, 1973 to 1978
### Aliases: deaths
### Keywords: datasets
### ** Examples
plotc(deaths)
|
c72a6255b3f0e2f39ef44c8a5dea4505103f5861
|
e47a4995c1f02d90521f4cdb5a3becdba2520d49
|
/R/WriteStripeProp.R
|
66e776cd0425ee4a998eebd6c089c0ef0266178c
|
[] |
no_license
|
SenguptaLab/MF.matR
|
9af3fbf77628250748af1a2a4a4515c09093e936
|
c64b0097131c6a2103e9489aa76692ae78085533
|
refs/heads/master
| 2023-01-24T22:06:28.450669
| 2023-01-12T22:19:19
| 2023-01-12T22:19:19
| 167,600,474
| 0
| 4
| null | 2023-01-12T22:19:20
| 2019-01-25T19:17:28
|
R
|
UTF-8
|
R
| false
| false
| 686
|
r
|
WriteStripeProp.R
|
#' using all_track_data.csv file, calculates proportion of worm tracks in (1) or out (0) of a stripe
#' uses GetStripeProp to determine whether tracks are in or out
#' Writes the proportion data to a csv within the same folder.
#' @importFrom magrittr %>%
#' @importFrom magrittr %<>%
#' @export
#' @examples
#' WriteStripeProp()
WriteStripeProp <- function(...) {
filename <- file.choose()
file.pref <- basename(filename)
folder <- dirname(filename)
data <- read.csv(filename)
data %>% GetSripeProp() %>%
summarise(prop = sum(InStripe)/nrow(.),
filename = file.pref) -> data
data.table::fwrite(data, file.path(folder,paste0("prop_in_stripe.csv")))
}
|
b34029f2ae0b4e2d8d69e1f6759dc95e8b08ea4a
|
4b871231c9007b3527198e9208243f2a78fd1cf1
|
/ThePlantGenome_Version/U_Calculations/U_Code_Calculation.R
|
0d9e95321787235b2a6117c591a3602d3158f132
|
[] |
no_license
|
mdzievit/Genomic_Prediction
|
33c9a7be55bc99b22159cb3f6d62a8b6511f9bad
|
60d3b6aa1dfec35bb4344b0ade70e12846a8f61d
|
refs/heads/master
| 2021-12-28T10:49:32.186901
| 2021-12-21T19:05:40
| 2021-12-21T19:05:40
| 141,373,493
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,764
|
r
|
U_Code_Calculation.R
|
#### This script was setup to run on a server, you'll need to configure it for that accordingly.
#lib_loc <- "~/R/x86_64-pc-linux-gnu-library/3.4/"
##Use this to run on desktop R studio
#lib_loc <- NULL
# library(MASS, lib.loc = (lib_loc))
# library(data.table, lib.loc = (lib_loc))
# library(doSNOW, lib.loc = (lib_loc))
# library(readr, lib.loc = (lib_loc))
library(MASS)
library(data.table)
library(doSNOW)
library(readr)
#### Input data ####
#### These are gen x markers
#### In 1 and -1 format
#### Example Data Sets
train <- as.matrix(fread("RRBLUP_Input_Files/Examples/Training_Imputed.rrblup"))
vali <- as.matrix(fread("RRBLUP_Input_Files/Examples/Validation_Imputed.rrblup"))
#### Location of the full data if it was available. Check `Prepare_Imputed_File-RR-BLUP.md`
# train <- as.matrix(fread("RRBLUP_Input_Files/Training_Imputed.rrblup"))
# vali <- as.matrix(fread("RRBLUP_Input_Files/Validation_Imputed.rrblup"))
#### Parallelized U-Value Function ####
par_U <- function(train,
vali,
cores) {
##Sets the variables
train <- train
vali <- vali
cores <- cores
##Determines the number if individuals in the validation population
nind <- nrow(vali)
##Calculates the Inverse matrix of the training population first using an identity matrix
invTTp <- ginv(tcrossprod(train))
##Registers the number of cores specified by the user
cl <- makeSOCKcluster(cores)
registerDoSNOW(cl)
##Sets up a progress bar to display
pb <- txtProgressBar(max = nind, style = 3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
##Parallelizes each individual's U calculation and then rbinds them back together
##Keeps track of each individual by 'i'
U_Results <- foreach(i = 1:nind,
.combine = 'rbind',
.packages = c('MASS'),
.options.snow = opts) %dopar% {
k <- as.matrix(vali[i,])
kHat <- crossprod(train,(invTTp %*% (train %*% k)))
U <- crossprod(kHat)/(crossprod(k))
out <- c("Indv" = i, "Result" = U[1,1])
return(out)
}
stopCluster(cl)
return(U_Results)
}
#### Running the function ####
U_Results <- par_U(train = train,
vali = vali,
cores = 2)
#### Output ####
#### Example output
write_tsv(x = as.data.frame(U_Results),
path = "U_Calculations/Example_U_Results_Valid.txt")
#### Full dataset output
# write_tsv(x = as.data.frame(U_Results),
# path = "U_Calculations/U_Results_Valid.txt")
|
dc830408c5ee74c46ebbf67afc0582d56d85fe67
|
dfea55706e15d6fded905df685f6b756b4052836
|
/R/mass.R
|
9e82e4d1975b7bf4bad2bfb89d8434a5f4785cac
|
[] |
no_license
|
muschellij2/massr
|
d5691169a37b80a87da2d84ab02dfbeb0716f561
|
1cd6044e50a729671c65564249712bfb47790c0d
|
refs/heads/master
| 2021-01-22T21:18:00.296509
| 2015-03-23T17:00:25
| 2015-03-23T17:00:25
| 32,252,744
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,572
|
r
|
mass.R
|
#' @title Run MASS
#'
#' @description Runs MASS on source and target
#' @param file Filename (or nifti) to match to target
#' @param outdir Output directory for results
#' @param nregs Number of registrations to do (default is 6)
#' @param template_directory Directory
#' @param retimg return nifti object versus output image
#' @import fslr
#' @export
#' @return Result of \code{\link{system}} command
#' @examples \dontrun{
#' require(fslr)
#' temp = file.path(fsldir(), "data", "standard", "MNI152_T1_1mm.nii.gz")
#' res = mass(file = temp)
#' }
mass <- function(
file, # Filename (or nifti) to match to target
outdir = dirname(file),
nregs = NULL, # number of template to use, default 6
template_directory =
system.file(file.path("mass/share/data/Templates",
ifelse(cerebellum, "WithCerebellum",
"WithoutCerebellum")),
package="massr"),
cerebellum =TRUE # use templates with cerebellum
){
suppressWarnings({x = fix_mass()})
file = checkimg(file)
outdir = path.expand(outdir)
if (!is.null(template_directory)){
template_directory = path.expand(template_directory)
}
if (outdir %in% c(".", "./")){
outdir = shQuote(getwd())
}
args = c("-in"=file,
"-ref"=template_directory,
"-dest"=outdir,
"-regs"=nregs,
ifelse(!cerebellum, "-noCere", ""))
cmd = "mass"
cmd = mass_cmd_maker(cmd=cmd, args = args)
res = system(cmd)
if (res != 0){
stop("MASS command failed")
}
return(outimg)
}
|
4c135c70e44948827db839828a047c44a7218063
|
7520bef4b0336f89b6d9706f3456ecd9894a3153
|
/RecruitPackage/recruitjapan/man/xgboost_predict.Rd
|
ccc3a0ecbbbddff93377af312871ee2539c75a40
|
[
"MIT"
] |
permissive
|
ALaks96/Kaggle_competition_Restaurant_Visit_Forecast
|
fb918beea46132aca7be47072e0b671a7cb9d40e
|
6d3fc45aaebe722d8151f24aa92a46f9a9b98a34
|
refs/heads/master
| 2020-08-03T10:01:45.338795
| 2020-07-19T20:52:34
| 2020-07-19T20:52:34
| 211,711,943
| 0
| 0
|
MIT
| 2020-07-19T20:52:36
| 2019-09-29T19:04:02
|
R
|
UTF-8
|
R
| false
| true
| 454
|
rd
|
xgboost_predict.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb_fcts.R
\name{xgboost_predict}
\alias{xgboost_predict}
\title{xgboost_predict}
\usage{
xgboost_predict(xgb1, dTEST)
}
\arguments{
\item{xgb1}{a classifier trained with xgboost}
\item{dTEST}{a test matrix in the xgbboost_matrix format}
}
\value{
returns the prediction
}
\description{
xgboost_predict
}
\examples{
#xgboost_predict(xgb_classifier, test_matrix_xgb_matrix)
}
|
70215fd23e6d76f06c5edca7203942394b719aaa
|
3112da48891821abba3ff40c7d1e7e40fdc80f0e
|
/code_notforpackage/Prior_sampling_plotting.R
|
16e49ef91b400181e8a6934a8b033aba60ba8b52
|
[] |
no_license
|
TheoreticalEcology/NUCOMBog
|
a337d3d5177edef22d84ee4e4906dabf152c5c96
|
648c598c6823f343a84d174c197824abcd3e249d
|
refs/heads/master
| 2023-03-15T18:44:22.226411
| 2018-05-20T15:15:28
| 2018-05-20T15:15:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,288
|
r
|
Prior_sampling_plotting.R
|
rm(list=ls())
library(NUCOMBog)
library(BayesianTools)
# library(Rmpi)
setwd("~/")
setup_SMC<- setupNUCOM(mainDir = "/home/jeroen/MERBLEUE_decomp_JENKINSON/", climate = "MERBLEUE_RANDOM_soiltemp_1766_2013.txt",environment = "Env_Mer_Bleue.txt",inival = "Inival_Mer_Bleue.txt",start=1766,end=2013,type=c("NEE","WTD"),numFolders=numFolders,separate = F,startval=2797,parallel = F)
setwd(setup_SMC[[1]]$mainDir)
data<-read.csv("input/NEE_WTD_GPP_MERBLEUE_1999_2013.csv",sep="\t",as.is=T)
data<-data[2:nrow(data),]
data<-as.data.frame(lapply(data,as.numeric))
data[data==-9999]<-NA
names <- c("gram_KExt","gram_MaxGr","gram_MortFrLvmsleaf","gram_SLA","gram_TOpt1Gr","gram_TOpt2Gr","gram_TmaxGr","eric_KExt","eric_MaxGr","eric_MortFrLvmsleaf","eric_SLA","eric_TOpt1Gr","eric_TOpt2Gr","eric_TmaxGr","eric_WLOpt1","humm_CAllocFrshoot","humm_MaxGr","humm_MortFrAcroshoot","humm_TMaxGr","humm_TOpt1Gr","humm_TOpt2Gr","lawn_CAllocFrshoot","lawn_MaxGr","lawn_MortFrAcroshoot","lawn_TMaxGr","lawn_TOpt1Gr","lawn_TOpt2Gr","holl_CAllocFrshoot","holl_MaxGr","holl_MortFrAcroshoot","holl_TMaxGr","holl_TOpt1Gr","holl_TOpt2Gr","sd_NEE1","sd_NEE2","sd_WTD1")
values<-c(0.5,70,0.08,0.012,12,20,25,0.8,60,0.04,0.012,5,14,25,300,1,45,0.04,25,14,18,1,50,0.04,25,14,18,1,60,0.08,25,10,18,1,1,0.1)
min<- 0.1*values
max<- c(2,5,5,2,0.4,1.7,1.4,1.25,5,5,2,0.4,2.2,1.4,1.67,5,5,5,1.4,1.4,2.2,5,5,5,1.4,1.4,2.2,5,5,5,1.4,1.4,2.2,30,30,5)*values
# names <- c("gram_KExt","gram_MaxGr","gram_MortFrLvmsleaf","gram_SLA","eric_KExt","eric_MaxGr","eric_MortFrLvmsleaf","eric_SLA","eric_WLOpt1","humm_CAllocFrshoot","humm_MaxGr","humm_MortFrAcroshoot","humm_TMaxGr","humm_TOpt1Gr","humm_TOpt2Gr","lawn_CAllocFrshoot","lawn_MaxGr","lawn_MortFrAcroshoot","lawn_TMaxGr","lawn_TOpt1Gr","lawn_TOpt2Gr","holl_CAllocFrshoot","holl_MaxGr","holl_MortFrAcroshoot","holl_TMaxGr","holl_TOpt1Gr","holl_TOpt2Gr","sd_NEE1","sd_NEE2","sd_WTD1")
# values<-c(0.5,70,0.08,0.012,0.8,60,0.04,0.012,300,1,45,0.04,25,14,18,1,50,0.04,25,14,18,1,60,0.08,25,10,18,1,1,0.1)
# min<- 0.1*values
# max<- c(2,5,5,2,1.25,5,5,2,1.67,5,5,5,1.4,1.4,2.2,5,5,5,1.4,1.4,2.2,5,5,5,1.4,1.4,2.2,30,30,5)*values
outprior<-NULL
pb<-txtProgressBar(min = 0,max = 10000,style=3)
for(i in 1:10000){
parameters<-data.frame(names,as.data.frame(runif(36,min=min,max=max)))
names(parameters)<-c("names","values")
# parameters<-parameters[1:27,]
outprior[[i]]<-runNUCOM(setup=setup_SMC,parameters = parameters)
setTxtProgressBar(pb, i)
}
close(pb)
save.image(file="outprior_decomp_JENKINSON2_10000.Rdata")
pdf("NEE_prior_10000_JENKINSON.pdf")
for(i in 1:length(outprior)){
plot(outprior[[i]]$NEE,ylim=c(-60,60),col="lightgray",type="l")
par(new=T)
}
points(data$NEE,col=2,type="l")
par(new=F)
dev.off()
pdf("WTD_prior_10000_JENKINSON.pdf")
for(i in 1:length(outprior)){
plot(outprior[[i]]$WTD,ylim=c(-0.7,0.1),col="lightgray",type="l")
par(new=T)
}
points(data$WTD/100,col=2,type="l")
par(new=F)
dev.off()
pdf("hetero_resp_prior_10000_JENKINSON.pdf")
for(i in 1:length(outprior)){
plot(outprior[[i]]$hetero_resp,ylim=c(0,150),col="lightgray",type="l")
par(new=T)
}
dev.off()
pdf("npp_10000_JENKINSON.pdf")
for(i in 1:length(outprior)){
plot(outprior[[i]]$NPP,ylim=c(0,150),col="lightgray",type="l")
par(new=T)
}
dev.off()
# {}
|
625a8aacd0830d17530c24b8f2df76de5eb8503b
|
e624cc888255935e3369a2236269ba12068e4892
|
/man/construct_yml_config.Rd
|
5e3a5baea8532f1d9ad189edc2909894b39e2fbb
|
[] |
no_license
|
cran/DataPackageR
|
95e9a4005d070897a499c16a97c087ef07af830d
|
aada7b8e22af5b6c307844daca778a75b53d53ca
|
refs/heads/master
| 2021-06-09T19:09:11.147397
| 2021-03-17T08:50:03
| 2021-03-17T08:50:03
| 145,911,847
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,097
|
rd
|
construct_yml_config.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yamlR.R
\name{construct_yml_config}
\alias{construct_yml_config}
\title{Construct a datapackager.yml configuration}
\usage{
construct_yml_config(code = NULL, data = NULL, render_root = NULL)
}
\arguments{
\item{code}{A vector of filenames}
\item{data}{A vector of quoted object names}
\item{render_root}{The root directory where the package data processing code will be rendered.
Defaults to is set to a randomly generated named subdirectory of \code{tempdir()}.}
}
\value{
a datapackager.yml configuration represented as an R object
}
\description{
Constructs a datapackager.yml configuration object from a vector of file names and a vector of object names (all quoted).
Can be written to disk via \code{yml_write}.
\code{render_root} is set to a randomly generated named subdirectory of \code{tempdir()}.
}
\examples{
conf <- construct_yml_config(code = c('file1.rmd','file2.rmd'), data=c('object1','object2'))
tmp <- normalizePath(tempdir(), winslash = "/")
yml_write(conf,path=tmp)
}
|
c2b992cb24c59144be297ea51b0df4ff29eb966f
|
83c43061a32c1c899aefb564ba6c63c32f48b210
|
/General Code/CMcCode/man/reglines.Rd
|
5355f189e6bc3ec1213be194081088b95a3886ae
|
[] |
no_license
|
chacemcneil/Personal
|
c694b71439d4884884a22a599f7dfd6e3549f49c
|
021c2f270f644654730c601b33458fad23185c26
|
refs/heads/master
| 2021-12-10T22:49:48.057640
| 2021-11-09T01:36:40
| 2021-11-09T01:36:40
| 45,845,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,227
|
rd
|
reglines.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Graphing.R
\name{reglines}
\alias{reglines}
\title{Plot Regression Lines}
\usage{
reglines(mod)
}
\arguments{
\item{mod}{Linear regression model as returned by \code{lm()}.}
}
\description{
Plots response variable against all explanatory variables with estimated regression lines.
}
\examples{
library(data.table)
n <- 1e3
# Three independent explanatory variables
coef <- c(2.1, -0.8, 1.2)
dt <- data.table(x1 = rnorm(n), x2 = rnorm(n, , 2), x3 = rnorm(n, 4))
dt[, Mean := cbind(x1, x2, x3) \%*\% coef]
dt[, y := rnorm(.N, Mean, .5)]
mod <- lm(y ~ x1 + x2 + x3, data = dt)
reglines(mod)
# Three correlated explanatory variables
coef <- c(2.1, -0.8, 1.2)
dt <- data.table(x1 = rnorm(n))
dt[, x2 := rnorm(.N, -.8 + .7*x1)]
dt[, x3 := rnorm(.N, 1.2 + 1.1*x1 + .5*x2)]
dt[, Mean := cbind(x1, x2, x3) \%*\% coef]
dt[, y := rnorm(.N, Mean, .5)]
mod <- lm(y ~ x1 + x2 + x3, data = dt)
reglines(mod)
# Additional interaction term
coef <- c(2.1, -0.8, 1.2, -0.9)
dt <- data.table(x1 = rnorm(n))
dt[, x2 := rnorm(.N, -.8 + .7*x1)]
dt[, x3 := rnorm(.N, 1.2 + 1.1*x1 + .5*x2)]
dt[, Mean := cbind(x1, x2, x3, x1*x2) \%*\% coef]
dt[, y := rnorm(.N, Mean, .5)]
mod <- lm(y ~ x1 + x2 + x3 + x1*x2, data = dt)
reglines(mod)
# Additional categorical variable
coef <- c(2.1, -0.8, 1.2, -0.9, 2)
dt <- data.table(x1 = rnorm(n))
dt[, x2 := rnorm(.N, -.8 + .7*x1)]
dt[, Group := letters[rbinom(.N, 2, .5) + 1]]
dt[, x3 := rnorm(.N, 1.2 + 1.1*x1 + .5*x2)]
dt[, Mean := cbind(x1, x2, x3, x1*x2, factor(Group)) \%*\% coef]
dt[, y := rnorm(.N, Mean, .5)]
mod <- lm(y ~ x1 + x2 + Group + x3 + x1*x2, data = dt)
reglines(mod)
# Interaction with poorly fitted model
coef <- c(2.1, -0.8, 1.2, -0.9, 2, -3)
dt[, Mean := cbind(x1, x2, x3, x1*x2, factor(Group), as.numeric(factor(Group))*x3) \%*\% coef]
dt[, y := rnorm(.N, Mean, .5)]
mod <- lm(y ~ x1 + x2 + Group*x3 + x3 + x1*x2, data = dt)
reglines(mod)
# Interaction with better model
coef <- c(2.1, -0.8, 1.2, -0.9, 2, -3)
dt[, Mean := cbind(x1, x2, x3, x1*x2, factor(Group), as.numeric(factor(Group))*x3) \%*\% coef]
dt[, y := rnorm(.N, Mean, .5)]
mod <- lm(y ~ (x1 + x2 + x3)*Group + x1*x2, data = dt)
reglines(mod)
}
|
0605f0625c2bbccaf092c5fb0c9455a7f529a388
|
7d2d4cdcea2e65e4d93ee7d052de1af938d3eaf3
|
/man/hor.Rd
|
324676dca851b56273636e8e0f85486781b3b167
|
[] |
no_license
|
yangxhcaf/astsa
|
4b9472d9e2bab77d8e697d268c5d9e0d35014f0b
|
6a0ac28d5c2c31311421a089f19c02eec8f52cf4
|
refs/heads/master
| 2020-07-01T15:43:24.406600
| 2019-05-08T04:20:07
| 2019-05-08T04:20:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 685
|
rd
|
hor.Rd
|
\name{hor}
\alias{hor}
\docType{data}
\title{Hawaiian occupancy rates}
\description{Quarterly Hawaiian hotel occupancy rate (percent of rooms occupied) from
1982-I to 2015-IV
}
\format{
The format is:
Time-Series [1:136] from 1982 to 2015: 79 65.9 70.9 66.7 ...
}
\source{\url{http://dbedt.hawaii.gov/economic/qser/tourism/}
}
\references{\url{http://www.stat.pitt.edu/stoffer/tsa4/} and \url{http://www.stat.pitt.edu/stoffer/tsda/}
}
\examples{
plot(hor, type='c') # plot data and
text(hor, labels=1:4, col=c(1,4,2,6), cex=.9) # add quarter labels
#
plot(stl(hor, s.window=15)) # fit structural model
}
\keyword{datasets}
|
d9a8b67710ebac805902a83ee52d766a125dcb0d
|
5a393f7931112161745cb0fd8b142de22432d54b
|
/SC.edgeR.perm.Pval.01.02.18.R
|
921b8074385504c1e3e8d406812c4b67325b7ecf
|
[] |
no_license
|
bukhariabbas/stickleback-paternal-care
|
6de7adcc4b2260891358b3f00052e2af6ca469e2
|
a4cc41852b880303c98240d6f5686cad0193b918
|
refs/heads/master
| 2020-06-20T08:29:16.449781
| 2019-07-16T04:13:58
| 2019-07-16T04:13:58
| 197,059,308
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,696
|
r
|
SC.edgeR.perm.Pval.01.02.18.R
|
rm (list =ls())
setwd ("/home/n-z/sbukhar/eFDR/sc_counts/")
stopifnot(require("edgeR"))
stopifnot(require("plyr"))
#### Parameters
Brain_region = "D"
####
targets = readTargets("Targets.txt")
targets = targets[which(targets$BrainRegion == Brain_region),]
get_DGE = function(targets)
{
Raw_DGE = readDGE(targets)
MetaTags <- grep("^__", rownames(Raw_DGE))
Raw_DGE = Raw_DGE[-MetaTags,]# removing meta tags
keep <- rowSums(cpm(Raw_DGE)>1) >= 2
print(table(keep))
Filtered_DGE = Raw_DGE[keep,]
Filtered_DGE = calcNormFactors(Filtered_DGE)
return(Filtered_DGE)
}
DGE_all = get_DGE(targets)
time = as.factor(targets$TimePoint)
intruder = as.factor(targets$Treatment)
intruder = relevel(intruder, ref="Control")
group = paste(time, intruder, sep = ".")
get_fit = function(y, design)
{
y = estimateGLMCommonDisp(y,design)
y = estimateGLMTrendedDisp(y,design)
y = estimateGLMTagwiseDisp(y,design)
fit = glmFit (y, design)
return(fit)
}
design.pairwise = model.matrix(~time+time:intruder)
fit = get_fit(DGE_all,design.pairwise)
library(doMC)
ncore = parallel::detectCores()
registerDoMC(cores = ncore)
c30_o = as.data.frame(topTags(glmLRT(fit, coef=4), n=Inf, sort.by="PValue"))
c30_o$names = rownames(c30_o)
c60_o = as.data.frame(topTags(glmLRT(fit, coef=5), n=Inf, sort.by="PValue"))
c120_o = as.data.frame(topTags(glmLRT(fit, coef=6), n=Inf, sort.by="PValue"))
setwd("/home/n-z/sbukhar/eFDR/sc_perm/")
c30_null = read.delim(paste(Brain_region,"_null30_2017-10-20.txt", sep = ""), header = T, sep = "\t")
c60_null = read.delim(paste(Brain_region,"_null60_2017-10-20.txt", sep = ""), header = T, sep = "\t")
c120_null = read.delim(paste(Brain_region,"_null120_2017-10-20.txt", sep = ""), header = T, sep = "\t")
get_eFDR = function(c30_null, c30_o)
{
rownames(c30_null) = c30_null$names
c30_null = c30_null[,c(-2)]
R = dim(c30_null)[2]
qval = c()
qval <- foreach(i = 1:dim(c30_o)[1], .combine=c) %dopar% {
# how many p-value are lower among the random matrix (lower is better)
length(which(c30_null <= c30_o$PValue[i])) / R
}
#to avoide getting a q value as +1 in both sides.
qval <- (qval+1)/(length(qval)+1)
return(qval)
}
c30_o$eFDR = get_eFDR (c30_null, c30_o)
c60_o$eFDR = get_eFDR (c60_null, c60_o)
c120_o$eFDR = get_eFDR (c120_null, c120_o)
write.table(c30_o, file = paste(Brain_region, "con_30_eFDR", Sys.Date(),"txt", sep = "."), quote = F, row.names = F, sep = "\t")
write.table(c60_o, file = paste(Brain_region, "con_60_eFDR", Sys.Date(),"txt", sep = "."), quote = F, row.names = F, sep = "\t")
write.table(c120_o, file = paste(Brain_region, "con_120_eFDR", Sys.Date(),"txt", sep = "."), quote = F, row.names = F, sep = "\t")
|
f589bcb73975f5f77aff42e8e36890b3481c1a02
|
a369ea4c55b65c57009fc6f8ac7930d54bd22bd2
|
/get_data.R
|
92529c1b64bad53dbc2b33ae8bcd5a1b92547a47
|
[] |
no_license
|
vadimus202/PDT_2015
|
3af23fe4443a67793202fd85e9704b1482d5dfa4
|
8325fa793b775432a5c9738bddc3475d58e17d85
|
refs/heads/master
| 2021-01-25T06:40:08.673196
| 2015-07-09T02:26:51
| 2015-07-09T02:26:51
| 38,761,396
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,671
|
r
|
get_data.R
|
require(readxl)
require(dplyr)
raw <- read_excel('PDT2015 Attendee List for Sponsors_Exhibitors_v2.xlsx', sheet = 1)
dat <- raw %>%
mutate(
name = paste0(FIRST_NAME, ' ', LAST_NAME),
name = ifelse(is.na(DESIGNATION), name,
paste0(name, ', ', DESIGNATION)),
# clean up DC
city = ifelse(
CITY %in% c('Washington DC','Washington, DC'),
'Washington', CITY),
state = ifelse(
CITY %in% c('Washington DC','Washington, DC'),
'DC',STATE_PROVINCE),
# CLean up AP
city = ifelse(state=='AP',NA,city),
state = ifelse(state=='AP',NA,state),
loc = ifelse(is.na(state), NA,
paste(city, state, sep = ', '))
) %>%
select(name, title = TITLE, company = COMPANY,
loc, state, city = CITY, zip=ZIP,
first = FIRST_NAME, last=LAST_NAME, desig = DESIGNATION)
# Fuzzy matching
unique(agrep('McLean, VA', dat$loc, value = T, ignore.case = T))
unique(agrep('Washington, DC', dat$loc, value = T, ignore.case = T))
unique(agrep('La Plata, MD', dat$loc, value = T, ignore.case = T))
fuzz <- lapply(unique(dat$loc),
function(x) agrep(x, unique(dat$loc), value = T, ignore.case = T))
fuzz <- fuzz[sapply(fuzz, length)>1]
sort(unique(unlist(fuzz)))
# clean up city names
e <- environment()
loc_clean <- function(loc, repl=''){
if(repl=='') repl <- loc
cat(sort(unique(dat$loc[agrep(loc, dat$loc, ignore.case = T)])))
e$dat$loc[agrep(loc, dat$loc, ignore.case = T)] <- repl
}
loc_clean("Atlanta, GA")
loc_clean("Falls Church, VA")
loc_clean("Ft. George G. Meade, MD", "Fort George G. Meade, MD")
loc_clean("Herndon, VA")
loc_clean("Indianapolis, IN")
loc_clean("McLean, VA")
loc_clean("La Plata, MD")
loc_clean("St. Louis, MO")
dat$loc[dat$loc=='Rockivlle, MD'] <- 'Rockville, MD'
dat$loc[dat$loc %in%
c("Washington Navy Yard, DC",
"washington, DC", "WASHINGTON, DC")] <- "Washington, DC"
# add cities latitude/longitude - kindly provided by google:
if(file.exists('geo_codes.RData')){
load('geo_codes.RData')
} else {
loc <- sort(unique(dat$loc))
latlon <- ggmap::geocode(loc)
latlon <- data.frame(loc, latlon, stringsAsFactors = F)
save(latlon, file = 'geo_codes.RData')
}
# Merge final dataset
final <- dat
# Aggregate
agg.city <- final %>%
filter(!is.na(loc)) %>%
group_by(state, loc) %>%
summarize(Attendees=n()) %>%
left_join(latlon, by='loc') %>%
rename(city = loc)
agg.state <- final %>%
filter(!is.na(state)) %>%
group_by(state) %>%
summarize(Attendees=n()) %>%
arrange(-Attendees)
# save datasets
save(final, agg.city, agg.state,
file = 'PDT_2015.RData')
|
9c518f642705884d540066b04a0b171e70d35c0f
|
dc66fe1b9e4c21615711fc5513ebbf954f959da7
|
/Opportunities2.R
|
41a9223bb338574ea9c077559446f367f17965f2
|
[] |
no_license
|
lyonslj/CTS
|
faaf1e7a77fcbca53e549c7ed064849297d5fbe7
|
79991cf56d59f0ec2efcc3dc0a30e619b1e2ca88
|
refs/heads/master
| 2021-01-19T10:14:25.418036
| 2020-11-12T07:29:47
| 2020-11-12T07:29:47
| 82,169,315
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,694
|
r
|
Opportunities2.R
|
Opportunities <- function(DaysAgo = 1 ) {
#*****************************************************************
# Find all instruments whose RSC(alsi40) > than its 24dma #
#*****************************************************************
library(reshape2)
y <- NULL
mydata <- head(y,0)
#only need 24 records
alldata2 <- subset(JSEdat,JSEdat$Date >= "2016-05-01")
#group by instm
z <- aggregate(Close ~ Name, alldata2, length)
#choose instm who have more than 30 record
lst <- subset(z$Name,z$Close > 30)
alsi <- subset(alldata2[,2:6],alldata2$Name=="JH-ALSI40")
alsi <- alsi[,-2:-4] # Drop columns
names(alsi) <- c("Date","Alsi.Close")
alldata2 <- merge(alldata2,alsi,by = "Date")
alldata2$rsc <- (alldata2$Close / alldata2$Alsi.Close) # Add rsc
for(i in lst) {
y <- subset(alldata2,alldata2$Name == i)
y$rsc.ma24 <- SMA(y$rsc,n=24) # Calculate 24 dma of RSC, Need package TTR#
y$pm <- (Delt(y$Close))*100 #Calculate daily %age move
mydata <- rbind(y,mydata)
}
opportunities <- as.character(subset(mydata$Name,mydata$Date == (Sys.Date() - DaysAgo)
& mydata$rsc > mydata$rsc.ma24
& mydata$Volume > 30000
& mydata$pm > 1))
backdt <- Sys.Date()-20 # show over 20 days
pth = "RPlots/Scans/"
for(i in opportunities){
CumulativeReturns(backdt,i,i,pth) # call to function to show %age moves
}
}
|
d52215e41e5f379721fd6755d8f23cbd7e5174cb
|
9edbb0f6dab00623bd0b0042062e0201042090f0
|
/R/normalize.R
|
2b47736956c0b99f1b4d69eb52e9f08e43ff3a0c
|
[] |
no_license
|
bozenne/butils
|
87161bb1e31570984f5a80e00d783f59297516fb
|
358a62cb656bf0b8ac335e71bf8b79102b458d8e
|
refs/heads/master
| 2023-07-23T10:19:58.756236
| 2023-07-18T13:00:10
| 2023-07-18T13:00:10
| 63,180,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,174
|
r
|
normalize.R
|
### normalize.R ---
#----------------------------------------------------------------------
## author: Brice Ozenne
## created: apr 25 2017 (14:13)
## Version:
## last-updated: maj 25 2017 (18:52)
## By: Brice Ozenne
## Update #: 17
#----------------------------------------------------------------------
##
### Commentary:
##
### Change Log:
#----------------------------------------------------------------------
##
### Code:
#' @title Transform a variable to obtain approximate normality
#' @description (Slow) implementation of the transformation described in Albada, 2007.
#'
#' @param X the vector of values
#' @param na.rm should the NA values be ignored when estimating the cumulative distribution function.
#'
#' @references Albada et al. Transformation of arbitrary distributions to the normal distribution with application to EEG test-retest reliability (2007, journal of Neuroscience Methods)
#' @examples
#'
#' n <- 1000
#'
#' ## normal distribution ##
#' X <- rnorm(n)
#' Xnorm <- normalize(X)
#' shapiro.test(Xnorm)
#' # plot(X, Xnorm)
#'
#' X.NA <- c(NA,X,NA)
#' Xnorm.NA <- normalize(X.NA, na.rm = TRUE)
#'
#' ## gamma distribution
#' X <- rgamma(n, shape = 1)
#' shapiro.test(X)
#' # hist(X)
#'
#' Xnorm <- normalize(X)
#' shapiro.test(Xnorm)
#' # hist(Xnorm)
#' # plot(X,Xnorm)
#' @export
normalize <- function(X, na.rm = FALSE){
if(!is.numeric(X)){
stop("\'X\' must be a numeric vector \n")
}
if(any(is.na(X))){
if(na.rm){
Xsave <- X
X <- as.numeric(na.omit(X))
test.NA <- TRUE
}else{
stop("\'X\' contains NA \n",
"set na.rm=TRUE to ignore them \n")
}
}else{
test.NA <- FALSE
}
n <- length(X)
X <- sapply(X, function(x){
sqrt(2)*erfinv(2*EDF(X,x,n)-1)
})
if(test.NA){
Xsave[!is.na(Xsave)] <- X
X <- Xsave
}
return(X)
}
EDF <- function(X,x,n){
mean(X<=x)-1/(2*n)
}
erfinv <- function (x){ # from http://stackoverflow.com/questions/29067916/r-error-function-erfz
qnorm((1 + x)/2)/sqrt(2)
}
#----------------------------------------------------------------------
### normalize.R ends here
|
1fafaa56bd33e1627cf3083b75e0f83edb8b4a53
|
2e4174b0ac70ee8b0310fa5b03b9f7556e57c061
|
/port_fhs.R
|
d8bdd16abc6013fd689c95db760094ff0fe0c52c
|
[] |
no_license
|
mathtester/fhs_index
|
5ef4b0ed185c5d70777fe5d4273d0aebf0488383
|
de282737ce1370c31b0dd7bcf37cb812076d6b12
|
refs/heads/main
| 2023-02-25T04:43:23.518195
| 2021-01-31T18:29:03
| 2021-01-31T18:29:03
| 328,405,403
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,152
|
r
|
port_fhs.R
|
library(fGarch)
library(MASS)
options(warn=-1)
# The total run time is about 20 min on a PC.
calc.var <- function(price.df, garch.period, corr.period,
var.period, var.rank, idx.cnt) {
# price.df is a data frame with dates as row names.
# Each column is a series of historical prices.
# idx.cnt is the number of indices in the columns of price.df.
# It is assumed the stock indices are in the first columns.
symbols <- colnames(price.df)
stocks <- symbols[(idx.cnt + 1):length(symbols)] # individual stock tickers.
rtn.df <- price.df[-1, ] / price.df[-nrow(price.df), ] - 1
garch.cols <- c('omega', 'alpha', 'beta', 'sigma', 'residual', 'return')
subcols <- c(garch.cols, 'var')
sym.cols <- outer(symbols, subcols, FUN = 'paste', sep = '.')
sym.cols <- t(sym.cols)
dim(sym.cols) <- NULL # Flatten to a 1D vector.
all.cols <- c('date', 'corr.th', 'corr.ex', 'port.var', sym.cols)
garch.df <- data.frame(matrix(nrow = 0, ncol = length(all.cols)))
colnames(garch.df) <- all.cols
garch.days <- nrow(rtn.df) - garch.period + 1
for (j in 1:garch.days) {
selected = rtn.df[j:(j+garch.period-1), ]
curr.date <- rownames(rtn.df)[j+garch.period-1]
garch.df[j, 'date'] <- curr.date
for (s in symbols) {
# skip outputs of garchFit().
capture.output( {
gm <- garchFit(~garch(1,1), data = selected[, s], include.mean = FALSE)
} )
# This is the forecast sigma using data up to prior period.
sig <- gm@sigma.t[garch.period]
# Residuals are not normalized.
rtn <- gm@residuals[garch.period]
garch.df[j, paste(s, garch.cols, sep = '.')] <-
c(gm@fit$par, sig, rtn / sig, rtn)
}
if (j > var.period) {
# Now compute average correlations.
norm.rtn <- as.matrix(garch.df[(j-corr.period+1):j,
paste(stocks, 'residual', sep = '.')],
nrow = corr.period)
corr.mat <- cor(norm.rtn)
d <- nrow(corr.mat)
garch.df[j, 'corr.ex'] <- (sum(corr.mat) - d) / d / (d-1)
last.rtn <- matrix(norm.rtn[nrow(norm.rtn), ], nrow = 1)
garch.df[j, 'corr.th'] <- (sum(t(last.rtn) %*% last.rtn) - sum(last.rtn^2))/d/(d-1)
# Compute FHS VaR.
residual <- garch.df[(j-var.period):(j-1), paste(symbols, 'residual', sep = '.')]
sigma <- matrix(as.numeric(rep(garch.df[j, paste(symbols, 'sigma', sep = '.')],
var.period)), nrow = var.period, byrow = TRUE)
pl.df <- residual * sigma
sorted.pls <- apply(pl.df, 2, 'sort', partial = var.rank)
garch.df[j, paste(symbols, 'var', sep = '.')] <- -sorted.pls[var.rank, ]
# Compute portfolio VaR.
prices <- data.matrix(price.df[curr.date, stocks])
port.pl <- data.matrix(pl.df[, (idx.cnt+1):length(symbols)]) %*%
t(prices) / sum(prices)
port.pl <- sort(port.pl, partial = var.rank)
garch.df[j, 'port.var'] <- -port.pl[var.rank]
}
}
return(garch.df)
}
load.dji <- function() {
# This file is obtained from DJI component at the end of 2020,
# with DOW removed due to its short history.
compo <- read.csv('input/components.csv', header = TRUE)
start.date <- '2018-01-01'
tickers <- compo[, 1]
price.df <- NULL
for (t in tickers) {
prc.history <- read.csv(paste('input/', t, '.csv', sep=''), header = TRUE)
if (is.null(price.df)) {
price.df <- as.data.frame(
prc.history[prc.history['Date'] >= start.date, 'Adj.Close'],
row.names = prc.history[prc.history['Date'] >= start.date, 'Date'])
} else {
price.df <- cbind(price.df, prc.history[prc.history['Date'] >= start.date, 'Adj.Close'])
}
}
colnames(price.df) <- tickers
port.cor <- cor(price.df)
d <- nrow(port.cor)
avg.cor <- (sum(port.cor) - d) / d / (d-1)
print(sprintf('%s Average return correlation in DJI is %f', Sys.time(), avg.cor))
price.df['index'] <- rowSums(price.df)
prc.history <- read.csv('input/^DJI.csv', header = TRUE)
price.df['DJI'] <- prc.history[prc.history['Date'] >= start.date, 'Adj.Close']
price.df <- price.df[c(d+2, d+1, 1:d)]
return(price.df)
}
simu.port <- function() {
nstock <- 30
regimes <- c( 800, 50, 50, 50, 50, 50, 50, 50, 100 )
sigmas <- c(0.01, 0.02, 0.02, 0.01, 0.01, 0.01, 0.02, 0.02, 0.02 )
correls <- c( 0.3, 0.3, 0.9, 0.9, 0.3, 0, 0, 0.9, 0.4 )
init.prc <- rep(100, nstock)
price.df <- data.frame(matrix(init.prc, nrow = 1))
parm.df <- data.frame(matrix(ncol = 2, nrow = 0))
colnames(parm.df) <- c('sigma', 'correl')
ones <- matrix(rep(1, nstock^2), nrow = nstock)
mu <- rep(0, nstock)
cnt <- 1
for (j in 1:length(regimes)) {
corr.mat <- correls[j] * ones + (1 - correls[j]) * diag(nstock)
rtns <- mvrnorm(regimes[j], mu, corr.mat)
for (d in 1:regimes[j]) {
parm.df[cnt, ] <- c(sigmas[j], correls[j])
cnt <- cnt + 1
price.df[cnt, ] <- price.df[cnt-1, ] * (1 + rtns[d, ] * sigmas[j])
}
}
price.df['index'] <- rowSums(price.df)
price.df <- price.df[c(nstock+1, 1:nstock)]
return(list(price.df, parm.df))
}
print(paste(Sys.time(), 'Start'))
# Construct price data frame for all symbols and the portfolio.
garch.period <- 250
corr.period <- 10
set.seed(5)
if (TRUE) {
# Use historical DJI component prices.
var.period <- 250
var.rank <- 2
price.df <- load.dji()
fhs <- calc.var(price.df, garch.period, corr.period, var.period, var.rank, 2)
outfile <- 'output/fhs_dji.csv'
write.csv(fhs, file = outfile, quote = FALSE, sep = ',')
print(paste(Sys.time(), 'Finished DJI. See', outfile))
}
if (TRUE) {
# Use simulated prices.
var.period <- 500
var.rank <- 5
x <- simu.port()
price.df <- x[[1]]
parm.df <- x[[2]]
fhs <- calc.var(price.df, garch.period, corr.period, var.period, var.rank, 1)
fhs <- cbind(parm.df[garch.period:nrow(parm.df), ], fhs)
fhs <- fhs[, c(3, 1, 2, 4:ncol(price.df))]
outfile <- 'output/fhs_sim.csv'
write.csv(fhs, file = outfile, quote = FALSE, row.names = FALSE, sep = ',')
print(paste(Sys.time(), 'Finished simulation. See', outfile))
}
|
a02b285ab79ad7210fb4b421ac6b85ead8dd2a5c
|
a0d07114ee4061ce98714c914a732dfa41f828ff
|
/Scripts/utilityRules/emptyEventsCollection.r
|
3d244072b75e5b83b6a1216b92d5ac987f8cf287
|
[
"Apache-2.0"
] |
permissive
|
VEuPathDB/EuPathDBIrods
|
6459d2abf86102068c379dde1df03e8de9b77d36
|
750d75b7e06621274dbecfd870e7908a2b34ac6a
|
refs/heads/master
| 2023-07-22T10:50:52.205619
| 2023-03-07T18:24:43
| 2023-03-07T18:24:43
| 201,275,608
| 0
| 0
|
Apache-2.0
| 2023-02-24T15:35:30
| 2019-08-08T14:29:11
|
C++
|
UTF-8
|
R
| false
| false
| 523
|
r
|
emptyEventsCollection.r
|
# Flushes all event files from the events collection
utilEmptyEventsCollection {
writeLine("stdout", "Starting deletion of all events");
*results = SELECT DATA_NAME WHERE COLL_NAME == *eventCollection;
foreach(*results) {
*eventDataObject = *results.DATA_NAME;
*eventDataObjectPath = "*eventCollection/*eventDataObject";
msiDataObjUnlink("objPath=*eventDataObjectPath",*Status);
writeLine("stdout", "Removed *eventDataObject");
}
}
input *eventCollection = "/ebrc/workspaces/events"
output ruleExecOut
|
a9fa1ba2ffa7b16c1d6e80d1f180ceb0f985c197
|
61aec7bf3e3d5908151e3936daf35181f23b979a
|
/02_consulta_apis/01_intro_apis.R
|
5511e5adc76922455718e30fecc5a44af4ad0fba
|
[] |
no_license
|
BlueRober/Rep_BlueRober
|
0149d5a06825ad7134d22ea302b842b10c2d6307
|
621a386b65fd46cb78b23edebf55766d011259c3
|
refs/heads/master
| 2021-08-23T12:47:44.428805
| 2017-12-04T23:58:03
| 2017-12-04T23:58:03
| 110,749,648
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 668
|
r
|
01_intro_apis.R
|
#Instala la librería "httr"
#Consulta la documentación y haz la siguiente petición:
#URL: http://www.cartociudad.es/services/api/geocoder/reverseGeocode
#Verbo: GET
#Parámetros: lat=36.9003409 y lon=-3.4244838
#De la respuesta, imprime:
#El cuerpo
#El código HTTP de estado
#Las cabeceras
#La respuesta a este ejercicio debe llamarse 01_intro_apis.[R/py]
install.packages("httr")
library(httr)
rm(list = ls())
url <- ("http://www.cartociudad.es/services/api/geocoder/reverseGeocode/?lat=36.9003409&lon=-3.4244838")
l_verbo <- httr::GET(url)
#body?
print(l_verbo[["body"]])
content(l_verbo)
print(l_verbo[["status_code"]])
print(l_verbo[["headers"]])
|
4b0120c5f3f0b2f332fe7a70a2033db202898d9f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/synRNASeqNet/examples/parMIEstimate.Rd.R
|
01e3339c1ba97c1333b8ce292e68d44b7f1db055
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 796
|
r
|
parMIEstimate.Rd.R
|
library(synRNASeqNet)
### Name: parMIEstimate
### Title: Parallel Mutual Information Estimation
### Aliases: parMIEstimate
### Keywords: parMIEstimate
### ** Examples
simData <- simulatedData(p = 5, n = 10, mu = 100, sigma = 0.25,
ppower = 0.73, noise = FALSE)
counts <- simData$counts
adjMat <- simData$adjMat
miML <- parMIEstimate(counts, method = "ML", unit = "nat", nchips = 2)
miBJ <- parMIEstimate(counts, method = "Bayes", unit = "nat",
nchips = 2, priorHyperParam = "Jeffreys")
miSH <- parMIEstimate(counts, method = "Shrink", unit = "nat",
nchips = 2)
miKD <- parMIEstimate(counts, method = "KD", nchips = 2)
miKNN <- parMIEstimate(counts, method = "KNN", unit = "nat", k = 3,
nchips = 2)
|
9042faadc0f0ea510f313f62c31fa508ee46c08d
|
b8c43e421f7216167380682c06ed9040db053627
|
/scripts/13.dc_enrichplot.R
|
76aa028ab2265f2c5b6fa86e271952aab6451396
|
[] |
no_license
|
hmtzg/geneexp_mouse
|
5a896cb4722794c85f464a75d459caf84021ffa0
|
1f2434f90404a79c87d545eca8723d99b123ac1c
|
refs/heads/master
| 2022-02-22T13:31:09.135196
| 2022-02-02T09:02:15
| 2022-02-02T09:02:15
| 267,553,488
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,400
|
r
|
13.dc_enrichplot.R
|
library(tidyverse)
library(ggpubr)
library(RColorBrewer)
library(goseq)
theme_set(theme_pubr(base_size = 6, legend = 'top') +
theme(legend.key.size = unit(2,'pt')))
pntnorm <- (1/0.352777778)
dc = readRDS('./data/processed/raw/dc_gse.rds')
signifGO = dc@result[,1:10] %>%
filter( p.adjust< 0.1 & NES < 0) %>%
dplyr::select(ID, Description, NES) # 184 DiCo enriched
# write.table(signifGO, file = 'results/figure4/signifGO_DiCo.csv', row.names = F, quote = F,
# sep = '\t')
# get dico genes
genes = unique(names(which(readRDS('./data/processed/raw/dev_divergent_genes_dc_rank.rds')<0)))
allgo = getgo(genes,"mm9","ensGene")
allgo = allgo[!sapply(allgo,is.null)] # 4741 DiCo genes present in gos
allgo = reshape2::melt(allgo) %>%
set_names(c('ID','gene'))
#
signifGO_genes = left_join(signifGO,allgo)
gogenemat = signifGO_genes %>%
dplyr::select(ID, gene) %>%
unique() %>%
mutate(value = 1) %>%
spread(ID, value, fill = 0)
gogenemat = as.data.frame(gogenemat)
rownames(gogenemat) = gogenemat$gene
gogenemat$gene = NULL
gogenemat = as.matrix(gogenemat)
# gogenemat: list of all genes present in GO categories
jaccardsim = apply(gogenemat, 2, function(x){
apply(gogenemat,2,function(y){
sum(x==1 & y==1) / sum(x==1 | y==1)
})
})
gocatx = signifGO
simmat = jaccardsim[gocatx$ID,gocatx$ID] # change column/rows orders back
k = 25
treex = hclust(dist( t(gogenemat) ))
treecl = cutree(treex, k)
# choose representative GO groups based on max mean similarity to other groups in the same cluster:
reps = sapply(1:k, function(i){
xx=names(which(treecl==i))
if(length(xx)>1){
xx = simmat[xx,xx]
names(which.max(rowMeans(xx)))
} else{
xx
}
})
repclus = setNames(lapply(1:k,function(i) names(which(treecl==i)) ),reps)
newdf = reshape2::melt(repclus) %>%
set_names(c('ID','rep')) %>%
arrange(rep) %>%
left_join(signifGO) %>%
unique()
# representative categories:
newdf %>%
mutate(rep = ID == rep) %>%
filter(rep) %>%
dplyr::select(ID, Description)
# check median jaccard similarities of categories in the same cluster:
streps = sort(sapply(names(repclus), function(i){ median(jaccardsim[repclus[[i]], i]) }))
sort(sapply(names(repclus), function(i){ mean(jaccardsim[repclus[[i]], i]) }))
newdf %>%
filter(rep%in%names(streps)[1]) %>%
dplyr::select(Description) # unrelated groups
newdf %>%
filter(rep%in%names(streps)[2]) # related groups
newdf %>%
filter(rep%in%names(streps)[3]) # related groups
newdf %>%
filter(rep%in%names(streps)[23]) # related groups
reprgenes = signifGO_genes %>%
filter(ID %in%names(repclus)) %>%
dplyr::select(ID, gene, Description) %>%
set_names('ID','gene_id', 'Description')
reprg = reprgenes %>%
mutate(repNames = ifelse(ID%in%'GO:0030193','Other GO', Description ) )
reprg = reprg %>%
group_by(ID) %>%
summarise(n=n()) %>%
arrange(n) %>%
right_join(reprg)
saveRDS(reprg, file='./results/figure4/gorepresentatives.rds')
#####
#####
#####
## re-cluster the out-group: GO:0030193
sort(sapply(names(repclus), function(i){ median(jaccardsim[repclus[[i]], i]) }))
# out-group: 'GO:0030193'
outg = repclus[['GO:0030193']]
gogenemat2 = gogenemat[,outg]
jaccardsim2 = apply(gogenemat2,2,function(x){
apply(gogenemat,2,function(y){
sum(x==1 & y==1) / sum(x==1 | y==1)
})
})
treex2 = hclust(dist( t(gogenemat2)))
##
k2 = 20
treecl2 = cutree(treex2,k2)
reps2 = sapply(1:k2, function(i){
xx=names(which(treecl2==i))
if(length(xx)>1){
xx = simmat[xx,xx]
names(which.max(rowMeans(xx)))
} else{
xx
}
})
simmat['GO:0032543','GO:0072655']
repclus2 = setNames(lapply(1:k2,function(i)names(which(treecl2==i))),reps2)
newdf2 = reshape2::melt(repclus2) %>%
set_names(c('ID','rep')) %>%
arrange(rep) %>%
left_join(signifGO) %>%
unique()
newdf2 %>%
mutate(rep = ID == rep) %>%
filter(rep) %>%
dplyr::select(ID, Description)
sort(sapply(names(repclus2), function(i){ median(jaccardsim2[repclus2[[i]], i]) }))
# outgroup: median jaccardsim: 0.00725
newdf2 %>%
filter(rep=='GO:0072577')
reprgenes2 = signifGO_genes %>%
filter(ID %in%names(repclus2)) %>%
dplyr::select(ID, gene, Description) %>%
set_names('ID','gene_id', 'Description')
reprg2 = reprgenes2 %>%
mutate(repNames = ifelse(ID%in%'GO:0072577','Other GO', Description ) )
reprg2 = reprg2 %>%
group_by(ID) %>%
summarise(n=n()) %>%
arrange(n) %>%
right_join(reprg2)
reprg2
saveRDS(reprg2, file='./results/figure4/gorepresentatives2.rds')
sort(sapply(names(repclus2), function(i){ median(jaccardsim[repclus2[[i]], i]) }))
# out-group: 'GO:0072577'
outg = repclus2[['GO:0072577']]
################
################
################
################
##
expch = readRDS('./data/processed/tidy/expression_change.rds') %>%
mutate(period = gsub('aging', 'Ageing', period)) %>%
mutate(period = str_to_title(period) ) %>%
mutate(period = factor(period, levels = c('Development', 'Ageing'))) %>%
dplyr::rename(Period = period)
expch %>%
inner_join(reprg) %>% head
enricplot_dat = expch %>%
inner_join(reprg) %>%
group_by(Period, ID, tissue, Description, repNames, n) %>%
summarise(mrho = mean(`Expression Change`),
medrho = median(`Expression Change`))
range(enricplot_dat$n)
#GO:0009611 : repsonse to wounding
# gx = reprg %>% filter(ID%in%'GO:0009611') %>% pull(gene_id)
#
# expch %>%
# filter(gene_id%in%gx) %>%
# group_by(Period, tissue) %>%
# mutate(mrho = median(`Expression Change`)) %>%
# ggplot(aes(fill=Period, y=mrho, x=tissue)) +
# geom_bar(stat='identity', position= position_dodge()) +
# geom_point( aes(x=tissue, y=`Expression Change`), inherit.aes = F )
enricplot_ogr_dat = expch %>%
inner_join(reprg2) %>%
group_by(Period, ID, tissue, Description, repNames, n) %>%
summarise(mrho = mean(`Expression Change`),
medrho = median(`Expression Change`))
range(unique(enricplot_ogr_dat$n))
enricplot_ogr = expch %>%
inner_join(reprg2) %>%
group_by(Period, ID, tissue, Description, repNames, n) %>%
summarise(mrho = mean(`Expression Change`),
medrho = median(`Expression Change`)) %>%
ggplot(aes(fill=Period, y=mrho, x=reorder(repNames, n) ) ) +
geom_bar(stat='identity', position=position_dodge()) +
facet_wrap(~tissue, ncol=4) +
scale_fill_manual(values=brewer.pal(3,"Set1")[c(2,1)]) +
coord_flip() +
geom_hline(yintercept = 0, size=0.3, linetype='solid', color='gray30') +
geom_vline(xintercept = seq(1.5,25, by=1), linetype = 'dashed', size = 0.2, color = 'gray') +
theme(legend.position = 'top',
axis.text.x = element_text(size=4, vjust=2),
axis.ticks.length.x = unit(0,'pt'),
axis.text.y = element_text(size=5),
panel.border = element_blank(),
axis.line.y = element_blank(),
axis.line.x = element_blank(),
plot.title = element_text(vjust = -0.5),
legend.background = element_rect(color='black', size=0.1),
legend.key.size = unit(3, 'pt'),
axis.title.x = element_text(size=6)) +
xlab('') +
ylab(bquote('Mean Expression Change ('*rho*')'))
enricplot_ogr
saveRDS(enricplot_ogr_dat, 'results/source_data/f4/fs1.rds')
ggsave('./results/figure_supplements/fs4/FS1.pdf', enricplot_ogr, units='cm', width = 16, height = 12,
useDingbats=F)
ggsave('./results/figure_supplements/fs4/FS1.png', enricplot_ogr, units='cm', width = 16, height = 12)
#
|
c6303bfd1b0698cffda19d1c9ce1acaa5ccd9997
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/signal/examples/fftfilt.Rd.R
|
671f33cec90f80aefc90326e4e31548cd1e9a3f9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 424
|
r
|
fftfilt.Rd.R
|
library(signal)
### Name: fftfilt
### Title: Filters with an FIR filter using the FFT
### Aliases: fftfilt filter.FftFilter FftFilter
### Keywords: math
### ** Examples
t <- seq(0, 1, len = 100) # 1 second sample
x <- sin(2*pi*t*2.3) + 0.25*rnorm(length(t)) # 2.3 Hz sinusoid+noise
z <- fftfilt(rep(1, 10)/10, x) # apply 10-point averaging filter
plot(t, x, type = "l")
lines(t, z, col = "red")
|
03e8260a269d6d23b2801207036ceed94cbc63ad
|
0fd24a91d518bb7fba84bea5b18b9f9f8daeb4c4
|
/Code/exploratory/weather_Jasper.R
|
b184ea33ee9cb3e03c55aa040f9498c471ac1303
|
[] |
no_license
|
hayleykilroy/Dimensions
|
918ee5d76f54dbdc6ab4ad00113f00428f3f090e
|
fffa946fdfa8bba80f0e057f950dad0e8e0e43ad
|
refs/heads/master
| 2021-01-24T06:39:52.294012
| 2014-02-21T15:35:06
| 2014-02-21T15:35:12
| 2,681,118
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,736
|
r
|
weather_Jasper.R
|
###Script to analyse Cape Point weather data
library(bfast)
dat<-read.table("D:\\Jasper\\Side projects\\Taylor plots\\Climate\\weatherdataforcapepoint\\TM_all_Cpoint_Slangkop.txt", header=T, stringsAsFactors =F)
###Some data summarizing
dat[,1]<-as.Date(dat$Date)
month<-cut(dat[,1],"month")
mmp<-tapply(dat$Rain.1, month, "sum")
mxt<-tapply(dat$MaxTemp.1, month, "mean")
mnt<-tapply(dat$MinTemp.1, month, "mean")
mmp<-cbind(mmp,levels(month))
mxt<-cbind(mxt,levels(month))
mnt<-cbind(mnt,levels(month))
###bfast time-series analysis
#Rainfall
x<-na.omit(mmp)
datTS<-ts(as.numeric(x[,1]), frequency =12) #
fit <- bfast(datTS,h=12/length(x[,1]), season="dummy", max.iter=10)
pdf("D:\\SAEON\\Projects\\Cape Point\\Taylor plots\\Results_temp\\monthlyrain.pdf",width=6, height=6)
plot.bfast(fit, main="Mean monthly rainfall")
dev.off()
#Max Temp
x<-mxt[757:1320,]
#x<-na.omit(x)
datTS<-ts(as.numeric(x[,1]), frequency =12) #
datTS<-na.spline(datTS)#, xout=levels(month))
datTS<-ts(datTS, frequency =12)
fit <- bfast(datTS,h=12/length(x[,1]), season="dummy", max.iter=10)
pdf("D:\\SAEON\\Projects\\Cape Point\\Taylor plots\\Results_temp\\monthlymaxtemp_splined.pdf",width=6, height=6)
plot(fit, main="Mean monthly maximum temperature")
dev.off()
#Min Temp
x<-mnt[757:1320,]
#x<-na.omit(mnt)
datTS<-ts(as.numeric(x[,1]), frequency =12) #
datTS<-na.approx(datTS)#, xout=levels(month))
datTS<-ts(datTS, frequency =12)
fit <- bfast(datTS,h=12/length(x[,1]), season="dummy", max.iter=10)
pdf("D:\\SAEON\\Projects\\Cape Point\\Taylor plots\\Results_temp\\monthlymintemp_approxed.pdf",width=6, height=6)
plot(fit, main="Mean monthly minimum temperature")
dev.off()
###Climate indices
#install.packages("RClimDex")
#library(RClimDex)
#
|
4391d2b206754f1c09b0979b095c0fffd7027420
|
7daecd71c0afb957233378de9a01ffe97c977898
|
/app.R
|
dc5a365de5cca2949023ba84632b392a02280c73
|
[] |
no_license
|
simongonzalez/ausflightviewer
|
a73312efab401de35914383f34e05271327167f6
|
cd0f7ab72125083f0116910474a966f06ae0d0e5
|
refs/heads/master
| 2020-08-31T13:01:59.430070
| 2019-11-04T02:02:28
| 2019-11-04T02:02:28
| 218,697,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,151
|
r
|
app.R
|
#author: Simon Gonzalez
#email: simon.gonzalez@anu.edu.au
#date: 31 October 2019
library(shiny)
library(shinydashboard)
library(jsonify)
library(mapdeck)
library(tidyverse)
library(highcharter)
library(streamgraph)
library(viridis)
library(shinyjqui)
library(shinyalert)
library(shinyBS)
library(shinyWidgets)
ui <- dashboardPage(
dashboardHeader(title = "AU Flight Viewer",
dropdownMenu(badgeStatus = NULL,icon = icon('info'), headerText = 'App creator', type = 'messages',
notificationItem(
text = "Simon Gonzalez",
icon("user")
),
notificationItem(
text = "www.visualcv.com/simongonzalez/",
icon("link"),
status = "success",
href = 'https://www.visualcv.com/simongonzalez'
)
)),
dashboardSidebar(
bsButton("info1", label = "What's this app?", icon = icon("globe"), style = 'info'),
selectInput('plotVar', label = 'What do you want to plot?', choices = c('Passengers', 'Mail', 'Freight'), selected = 'Passengers'),
uiOutput('ausportUI'),
radioButtons("plotBy", label = 'Plot by Destination or by Country',
choices = list("ForeignPort" = "ForeignPort", "Country" = "Country"),
selected = "ForeignPort"),
uiOutput('foreignPortUI'),
uiOutput('yearUI'),
checkboxInput("proportionalSize", label = "Proportional Size", value = TRUE)
),
dashboardBody(
bsButton("btn1", label = "Map not loading?", icon = icon("exclamation-circle"), style = 'danger'),
jqui_resizable(mapdeckOutput(
outputId = 'myMap'
)),
streamgraphOutput(outputId = 'yearPlot')
)
)
server <- function(input, output, session) {
df <- reactive({
df <- read.csv('airlines.csv')
df$Passengers <- df$Passengers / 5000
df$Freight <- df$Freight / 1000
df$Mail <- df$Mail / 1000
return(df)
})
output$ausportUI <- renderUI({
if(is.null(df()))
return()
df <- df()
tmpList <- sort(unique(df$AustralianPort))
selectInput('ausport', 'Select Australian Port Location', choices = tmpList, selected = tmpList, multiple = T)
})
output$foreignPortUI <- renderUI({
if(is.null(df()))
return()
if(is.null(input$ausport))
return()
df <- df()
df <- df[df$AustralianPort %in% input$ausport,]
tmpList <- sort(unique(df[[input$plotBy]]))
selectInput('foreignPort', label = NULL, choices = tmpList, selected = tmpList[1:10], multiple = T)
})
output$yearUI <- renderUI({
if(is.null(df()))
return()
if(is.null(input$ausport))
return()
if(is.null(input$foreignPort))
return()
df <- df()
df <- df[df$AustralianPort %in% input$ausport,]
df <- df[df[[input$plotBy]] %in% input$foreignPort,]
tmpList <- sort(unique(df$Year))
sliderInput('year', 'Year of the data collected', min = min(tmpList), max = max(tmpList), step = 1, value = c(min(tmpList), max(tmpList)), animate = T)
})
output$myMap <- renderMapdeck({
mapdeck(style = mapdeck_style('dark'), pitch = 35 )
})
observe({
if(is.null(df()))
return()
if(is.null(input$ausport))
return()
if(is.null(input$foreignPort))
return()
if(is.null(input$year))
return()
df <- df()
df <- df[df$AustralianPort %in% input$ausport,]
df <- df[df[[input$plotBy]] %in% input$foreignPort,]
df <- df[df$Year >= input$year[1] & df$Year <= input$year[length(input$year)],]
df <- df[df[[input$plotBy]] != 0,]
if(nrow(df) == 0)
return()
#df <- df %>% group_by()
set_token('pk.eyJ1Ijoic2ltb25nb256YWxleiIsImEiOiJjazJjc3Y4dGUyMXR3M21vNnp6b29xdGQ0In0.9jrdfph8jioWuTA3XXY1UQ')
key <- 'pk.eyJ1Ijoic2ltb25nb256YWxleiIsImEiOiJjazJjc3Y4dGUyMXR3M21vNnp6b29xdGQ0In0.9jrdfph8jioWuTA3XXY1UQ'
if(input$proportionalSize){
mapdeck_update(map_id = 'myMap') %>%
add_arc(
data = df
, layer_id = "arc_layer"
, origin = c("aus_lon", "aus_lat")
, destination = c("for_lon", "for_lat")
, stroke_from = 'AusCol'
, stroke_to = 'ForCol'
, stroke_width = input$plotVar
, tooltip = 'pair'
, auto_highlight = T
)%>%
add_scatterplot(
data = df
, lon = "for_lon"
, lat = "for_lat"
, radius = 100000
, fill_colour = 'ForCol'
, layer_id = "scatter"
, fill_opacity = 1
)
}else{
mapdeck_update(map_id = 'myMap') %>%
add_arc(
data = df
, layer_id = "arc_layer"
, origin = c("aus_lon", "aus_lat")
, destination = c("for_lon", "for_lat")
, stroke_from = 'AusCol'
, stroke_to = input$plotVar
, tooltip = 'pair'
, auto_highlight = T
)%>%
add_scatterplot(
data = df
, lon = "for_lon"
, lat = "for_lat"
, radius = 100000
, fill_colour = 'ForCol'
, layer_id = "scatter"
, fill_opacity = 1
)
}
})
observeEvent(input$info1, {
sendSweetAlert(
session = session,
title = "Australian Flight Viewer",
text = HTML('This app allows users to see the fligh history from Australian ports to international ones. The data is publically available at https://www.bitre.gov.au/publications/ongoing/airport_traffic_data.aspx'),
closeOnClickOutside = T, type = 'info'
)
})
observeEvent(input$btn1, {
sendSweetAlert(
session = session,
title = "Reload App",
text = 'If lines are shown but not the map, please reload the app.',
closeOnClickOutside = T, type = 'warning'
)
})
}
shinyApp(ui, server)
|
ffa01d1aba7a8fed238adbaeba8d02f1dfcfc063
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/koRpus/examples/K.ld.Rd.R
|
f4977b9f0b009bd684248f4e06141fb8c3259cdf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 179
|
r
|
K.ld.Rd.R
|
library(koRpus)
### Name: K.ld
### Title: Lexical diversity: Yule's K
### Aliases: K.ld
### Keywords: LD
### ** Examples
## Not run:
##D K.ld(tagged.text)
## End(Not run)
|
9611a0704552db45dd616fd8a7f0617274acebb6
|
e2e88ba6101c30b4d1a3c7c381f8bd92948744df
|
/R/data_utils.R
|
25c5d6a40db0022fe515f42a55171969585b1200
|
[] |
no_license
|
RoonakR/RandomisedTrialsEmulation
|
d5c94a979658230f10017d4232cff5c24f6b5005
|
309f7bd0fe68f83638de9b6ef67d7f50062c9084
|
refs/heads/main
| 2023-08-03T07:36:34.515635
| 2021-06-02T09:35:45
| 2021-06-02T09:35:45
| 364,294,343
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,502
|
r
|
data_utils.R
|
#' Read Data Function
#'
#' This function read data from csv file and select the columns you need
#' @param data_address Address for data read with bigmemory
#' @param data_path Path of the csv file
#' @param id_num Id number
#' @param id Name of the data column for id feature Defaults to id
#' @param period Name of the data column for period feature Defaults to period
#' @param treatment Name of the data column for treatment feature Defaults to treatment
#' @param outcome Name of the data column for outcome feature Defaults to outcome
#' @param eligible Indicator of whether or not an observation is eligible to be expanded about Defaults to eligible
#' @param eligible_wts_0 Eligibility criteria used in weights for model condition Am1 = 0
#' @param eligible_wts_1 Eligibility criteria used in weights for model condition Am1 = 1
#' @param outcomeCov_var List of individual baseline variables used in final model
#' @param cov_switchn Covariates to be used in logistic model for switching probabilities for numerator model
#' @param cov_switchd Covariates to be used in logistic model for switching probabilities for denominator model
#' @param cov_censed Covariates to be used in logistic model for censoring weights for denominator model
#' @param cov_censen Covariates to be used in logistic model for censoring weights for nominator model
#' @param cense Censoring variable
#' @param where_var Variables used in where conditions used in subsetting the data used in final analysis (where_case), the variables not included in the final model
#' read_data()
read_data <- function(data_address, data_path=NA, id_num=NA,
id="id",
period="period",
treatment="treatment",
outcome="outcome",
eligible="eligible",
eligible_wts_0=NA,
eligible_wts_1=NA,
outcomeCov_var=NA,
cov_switchn=NA, cov_switchd=NA,
cov_censed=NA, cov_censen=NA, cense=NA, where_var=NA){
covs <- c()
if(any(!is.na(eligible_wts_0))){
covs <- c(covs, eligible_wts_0)
}
if(any(!is.na(eligible_wts_1))){
covs <- c(covs, eligible_wts_1)
}
if(any(!is.na(outcomeCov_var))){
covs <- c(covs, outcomeCov_var)
}
if(any(!is.na(cov_switchd))){
covs <- c(covs, cov_switchd)
}
if(any(!is.na(cov_switchn))){
covs <- c(covs, cov_switchn)
}
if(any(!is.na(cov_censed))){
covs <- c(covs, cov_censed)
}
if(any(!is.na(cov_censen))){
covs <- c(covs, cov_censen)
}
if(any(!is.na(cense))){
covs <- c(covs, cense)
}
if(any(!is.na(where_var))){
covs <- c(covs, where_var)
}
covs <- covs[!duplicated(covs)]
cols = c(id, period, treatment, outcome, eligible, covs)
if(!is.na(id_num)){
data = data_address[mwhich(data_address, c("id"), c(id_num), c('eq')),]
}else{
data = fread(data_path, header = TRUE, sep = ",")
}
if(!eligible %in% colnames(data)){
data$eligible = 1
}
data_new = as.data.table(data)
data_new = subset(data_new, select=cols)
tryCatch({
suppressWarnings(setnames(data_new,
c(id, period, outcome, eligible, treatment),
c("id", "period", "outcome", "eligible", "treatment")))
})
if(any(!is.na(eligible_wts_0))){
setnames(data_new, c(eligible_wts_0), c("eligible_wts_0"))
}
if(any(!is.na(eligible_wts_1))){
setnames(data_new, c(eligible_wts_1), c("eligible_wts_1"))
}
rm(data, covs, cols)
return(data_new)
}
#' Period Expanding Function
#'
#' This function get the data.table with period column and expand it based on it
#' @param y The data.table with period column
f <- function(y){
last = !duplicated(y$period, fromLast=TRUE)
last_ind = which(last == TRUE)
return(seq(0, y$period[last_ind]))
}
#' For_period Feature Function
#'
#' This function get the data.table with period and id columns and generate the for_period feature
#' @param x The data.table with id and period columns
#' for_period_func()
for_period_func <- function(x){
x_new = x[rep(1:.N, period+1), .(id, period)]
x_new[, for_period := f(.BY), by=.(id, period)]
return(x_new[, for_period])
}
#' Weight Calculation Function
#'
#' This function performs the calculation for weight of the data
#' @param sw_data A data.table
#' @param cov_switchn List of covariates to be used in logistic model for switching probabilities for numerator model
#' @param model_switchn List of models (functions) to use the covariates from cov_switchn
#' @param class_switchn Class variables used in logistic model for nominator model
#' @param cov_switchd List of covariates to be used in logistic model for switching probabilities for denominator model
#' @param model_switchd List of models (functions) to use the covariates from cov_switchd
#' @param class_switchd Class variables used in logistic model for denominator model
#' @param eligible_wts_0 Eligibility criteria used in weights for model condition Am1 = 0
#' @param eligible_wts_1 Eligibility criteria used in weights for model condition Am1 = 1
#' @param cense Censoring variable
#' @param pool_cense Pool the numerator and denominator models (0: split models by previous treatment Am1 = 0 and Am1 = 1 as in treatment models and 1: pool all observations together into a single numerator and denominator model) Defaults to 0
#' @param cov_censed List of covariates to be used in logistic model for censoring weights in denominator model
#' @param model_censed List of models (functions) to use the covariates from cov_censed
#' @param class_censed Class variables used in censoring logistic regression in denominator model
#' @param cov_censen List of covariates to be used in logistic model for censoring weights in numerator model
#' @param model_censen List of models (functions) to use the covariates from cov_censen
#' @param class_censen Class variables used in censoring logistic regression in numerator model
#' @param include_regime_length If defined as 1 a new variable (time_on_regime) is added to dataset - This variable stores the duration of time that the patient has been on the current treatment value
#' @param numCores Number of cores for parallel programming
weight_func <- function(sw_data, cov_switchn=NA, model_switchn=NA,
class_switchn=NA, cov_switchd=NA,
model_switchd=NA, class_switchd=NA,
eligible_wts_0=NA, eligible_wts_1=NA,
cense=NA, pool_cense=0, cov_censed=NA,
model_censed=NA, class_censed=NA,
cov_censen=NA, model_censen=NA, class_censen=NA,
include_regime_length=0,
numCores=NA){
if(include_regime_length == 1){
model_switchd <- c(model_switchd, "time_on_regime", "time_on_regime2")
model_switchn <- c(model_switchn, "time_on_regime", "time_on_regime2")
}
# ------------------- eligible0 == 1 --------------------
# --------------- denominator ------------------
if(any(!is.na(cov_switchd))){
len_d = length(model_switchd)
regformd <- paste(
paste("treatment", "~"),
paste(
paste(model_switchd, collapse="+"),
sep="+"
)
)
}else{
len_d = 0
regformd <- paste(
paste("treatment", "~"),
"1"
)
}
if(any(!is.na(model_switchn))){
len_n = length(model_switchn)
regformn <- paste(
paste("treatment", "~"),
paste(
paste(model_switchn, collapse="+"),
sep="+"
)
)
}else{
len_n = 0
regformn <- paste(
paste("treatment", "~"),
"1"
)
}
d = list(
list(sw_data[if(any(!is.na(eligible_wts_0)))
(eligible0 == 1 & eligible_wts_0 == 1) else eligible0 == 1], regformd, class_switchd),
list(sw_data[if(any(!is.na(eligible_wts_0)))
(eligible0 == 1 & eligible_wts_0 == 1) else eligible0 == 1], regformn, class_switchn),
list(sw_data[if(any(!is.na(eligible_wts_1)))
(eligible1 == 1 & eligible_wts_1 == 1) else eligible1 == 1], regformd, class_switchd),
list(sw_data[if(any(!is.na(eligible_wts_1)))
(eligible1 == 1 & eligible_wts_1 == 1) else eligible1 == 1], regformn, class_switchn)
)
if(numCores == 1) {
# cl <- makeCluster(numCores)
# m = parLapply(cl, d, weight_lr)
# stopCluster(cl)
m = lapply(d, weight_lr)
} else {
m = mclapply(d, weight_lr, mc.cores=numCores)
}
print("P(treatment=1 | treatment=0) for denominator")
model1 = m[[1]]
print(summary(model1))
switch_d0 = data.table(p0_d = model1$fitted.values,
eligible0 = unlist(model1$data$eligible0),
id = model1$data[, id],
period = model1$data[, period])
# -------------- numerator --------------------
print("P(treatment=1 | treatment=0) for numerator")
model2 = m[[2]]
print(summary(model2))
switch_n0 = data.table(p0_n = model2$fitted.values,
eligible0 = unlist(model2$data$eligible0),
id = model2$data[, id],
period = model2$data[, period])
# ------------------- eligible1 == 1 --------------------
# --------------- denominator ------------------
print("P(treatment=1 | treatment=1) for denominator")
model3 = m[[3]]
print(summary(model3))
switch_d1 = data.table(p1_d = model3$fitted.values,
eligible1 = unlist(model3$data$eligible1),
id = model3$data[, id],
period = model3$data[, period])
# -------------------- numerator ---------------------------
print("P(treatment=1 | treatment=1) for numerator")
model4 = m[[4]]
print(summary(model4))
switch_n1 = data.table(p1_n = model4$fitted.values,
eligible1 = unlist(model4$data$eligible1),
id = model4$data[, id],
period = model4$data[, period])
switch_0 = switch_d0[switch_n0, on = .(id=id, period=period,
eligible0=eligible0)]
switch_1 = switch_d1[switch_n1, on = .(id=id, period=period,
eligible1=eligible1)]
new_data = Reduce(function(x,y) merge(x, y,
by = c("id", "period"),
all = TRUE),
list(sw_data, switch_1, switch_0))
rm(switch_d0, switch_d1, switch_n0, switch_n1, switch_1, switch_0)
new_data[, eligible0.y := NULL]
new_data[, eligible1.y := NULL]
setnames(new_data, c("eligible0.x", "eligible1.x"),
c("eligible0", "eligible1"))
if(!is.na(cense)){
if(any(!is.na(model_censed))){
regformd <- paste(
paste("1", "-"),
paste(eval(cense), "~"),
paste(
paste(model_censed, collapse="+"),
sep="+"
)
)
}else{
regformd <- paste(
paste("1", "-"),
paste(eval(cense), "~"),
"1"
)
}
if(any(!is.na(model_censen))){
regformn <- paste(
paste("1", "-"),
paste(eval(cense), "~"),
paste(
paste(model_censen, collapse="+"),
sep="+"
)
)
}else{
regformn <- paste(
paste("1", "-"),
paste(eval(cense), "~"),
"1"
)
}
if(pool_cense == 1){
# -------------------- denominator -------------------------
print("Model for P(cense = 0 | X ) for denominator")
# ------------------------------------------------------------
d = list(
list(new_data, regformd, class_censed),
list(new_data, regformn, class_censen)
)
if(numCores == 1) {
# cl <- makeCluster(numCores)
# m = parLapply(cl, d, weight_lr)
# stopCluster(cl)
m = lapply(d, weight_lr)
} else {
m = mclapply(d, weight_lr, mc.cores=numCores)
}
model1.cense = m[[1]]
print(summary(model1.cense))
cense_d0 = data.table( pC_d = model1.cense$fitted.values,
id = model1.cense$data[, id],
period = model1.cense$data[, period])
# --------------------- numerator ---------------------------
print("Model for P(cense = 0 | X ) for numerator")
# ---------------------------------------------------------
model2.cense = m[[2]]
print(summary(model2.cense))
cense_n0 = data.table( pC_n = model2.cense$fitted.values,
id = model2.cense$data[, id],
period = model2.cense$data[, period])
new_data = Reduce(function(x,y) merge(x, y,
by = c("id", "period"),
all.x = TRUE, all.y = TRUE),
list(new_data, cense_d0, cense_n0))
rm(cense_d0, cense_n0)
}else{
# ---------------------- denominator -----------------------
print("Model for P(cense = 0 | X, Am1=0) for denominator")
# ---------------------- eligible0 ---------------------------
d = list(
list(new_data[eligible0 == 1], regformd, class_censed),
list(new_data[eligible0 == 1], regformn, class_censen),
list(new_data[eligible1 == 1], regformd, class_censed),
list(new_data[eligible1 == 1], regformn, class_censen)
)
if(numCores == 1) {
# cl <- makeCluster(numCores)
# m = parLapply(cl, d, weight_lr)
# stopCluster(cl)
m = lapply(d, weight_lr)
} else {
m = mclapply(d, weight_lr, mc.cores=numCores)
}
model1.cense = m[[1]]
print(summary(model1.cense))
cense_d0 = data.table( pC_d0 = model1.cense$fitted.values,
id = model1.cense$data[, id],
period = model1.cense$data[, period])
# -------------------------- numerator ----------------------
print("Model for P(cense = 0 | X, Am1=0) for numerator")
#--------------------------- eligible0 -----------------------
model2.cense = m[[2]]
print(summary(model2.cense))
cense_n0 = data.table( pC_n0=model2.cense$fitted.values,
id = model2.cense$data[, id],
period = model2.cense$data[, period])
# ------------------------- denomirator ---------------------
print("Model for P(cense = 0 | X, Am1=1) for denominator")
# ------------------------ eligible1 -------------------------
model3.cense = m[[3]]
print(summary(model3.cense))
cense_d1 = data.table( pC_d1=model3.cense$fitted.values,
id = model3.cense$data[, id],
period = model3.cense$data[, period])
# ------------------------ numerator -------------------------
print("Model for P(cense = 0 | X, Am1=1) for numerator")
# ------------------------- eligible1 -----------------------
model4.cense = m[[4]]
print(summary(model4.cense))
cense_n1 = data.frame( pC_n1 = model4.cense$fitted.values,
id = model4.cense$data[, id],
period = model4.cense$data[, period])
cense_0 = cense_d0[cense_n0, on = .(id=id, period=period)]
cense_1 = cense_d1[cense_n1, on = .(id=id, period=period)]
new_data = Reduce(function(x,y) merge(x, y,
by = c("id", "period"),
all.x = TRUE, all.y = TRUE),
list(new_data, cense_0, cense_1))
rm(cense_n1, cense_d1, cense_n0, cense_d0, cense_0, cense_1)
}
}
# wt and wtC calculation
if(any(!is.na(eligible_wts_0))){
new_data[(am_1 == 0 & eligible_wts_0 == 1 & treatment == 0 & !is.na(p0_n) & !is.na(p0_d)),
wt := (1.0-p0_n)/(1.0-p0_d)]
new_data[(am_1 == 0 & eligible_wts_0 == 1 & treatment == 1 & !is.na(p0_n) & !is.na(p0_d)),
wt := p0_n/p0_d]
new_data[(am_1 == 0 & eligible_wts_0 == 0), wt := 1.0]
}else{
new_data[(am_1 == 0 & treatment == 0 & !is.na(p0_n) & !is.na(p0_d)),
wt := (1.0-p0_n)/(1.0-p0_d)]
new_data[(am_1 == 0 & treatment == 1 & !is.na(p0_n) & !is.na(p0_d)),
wt := p0_n/p0_d]
}
if(any(!is.na(eligible_wts_1))){
new_data[(am_1 == 1 & eligible_wts_1 == 1 &treatment == 0 & !is.na(p1_n) & !is.na(p1_d)),
wt := (1.0-p1_n)/(1.0-p1_d)]
new_data[(am_1 == 1 & eligible_wts_1 == 1 & treatment == 1 & !is.na(p1_n) & !is.na(p1_d)),
wt := p1_n/p1_d]
new_data[(am_1 == 1 & eligible_wts_1 == 0), wt := 1.0]
}else{
new_data[(am_1 == 1 & treatment == 0 & !is.na(p1_n) & !is.na(p1_d)),
wt := (1.0-p1_n)/(1.0-p1_d)]
new_data[(am_1 == 1 & treatment == 1 & !is.na(p1_n) & !is.na(p1_d)),
wt := p1_n/p1_d]
}
if(is.na(cense)){
new_data[, wtC := 1.0]
}else{
#new_data[, pC_d := as.numeric(NA)]
#new_data[, pC_n := as.numeric(NA)]
if(pool_cense == 0){
new_data[am_1 == 0, ':='(pC_n=pC_n0, pC_d=pC_d0)]
new_data[am_1 == 1, ':='(pC_n=pC_n1, pC_d=pC_d1)]
}
new_data[is.na(pC_d), pC_d := 1]
new_data[is.na(pC_n), pC_n := 1]
new_data[, wtC := pC_n/pC_d]
}
new_data[, wt := wt * wtC]
sw_data <- new_data
rm(new_data)
gc()
return(sw_data)
}
|
74ffdcac86ff9cce9e6a8783612729f01aa5e9ef
|
f1616d090bc8bb0737cb2887c2acf48f37d4292a
|
/server.R
|
9f797956dccca44c23f1c576bdd1d5c63434680f
|
[] |
no_license
|
curtiskam/Developing-Data-Products-Project
|
255805078acd9303844bde32e91518f4995d892c
|
283a15a4f1331e2bec6ea0455f0269e2038ad57f
|
refs/heads/master
| 2021-01-10T13:17:11.655673
| 2016-02-27T01:57:09
| 2016-02-27T01:57:09
| 52,640,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 476
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# This app asks the user to pick a year between 1949 and 1960
# then plots the number of airline passengers for that tear
# by month
library(forecast)
data(AirPassengers)
shinyServer(
function(input, output) {
output$MonthlyPassengers <- renderPlot({seasonplot(window(AirPassengers,start=c(input$Year,1),
end=c(input$Year,12)), ylab="Passengers in Thousands", main="Passengers for Year")})
}
)
|
6faf77a60dfd34cef4e6566bf81935687861f619
|
13caa799685c559aa37ba9e5bd8ebab511489837
|
/scripts_for_CGC/combineAllSamplesVariantStatus.R
|
51b9c72aa6f0752cfb34074db6aa2b123e86bdd4
|
[
"MIT"
] |
permissive
|
ds21uab/STAR_protocols_GV_calling
|
37ea5f97bed4543088727833b16cf667c24cc8f9
|
52146406c7c67d261719358b79795799c128c4cc
|
refs/heads/main
| 2023-04-14T10:16:22.123243
| 2022-11-18T23:07:22
| 2022-11-18T23:07:22
| 419,077,939
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,489
|
r
|
combineAllSamplesVariantStatus.R
|
#!/usr/bin/Rscript
##Copywrite Divya Sahu, 2021
# this script will take GT_samdepth_merged.txt file for each sample and merge them to create a large .txt file with mutation status from all samples
# repeat this step for each data types separately
# this script requires: need full input path and full output path.
# the path in this script is set for wxs-normal. User need to set the path accordingly.
##########################################################################################################
# code chunk:1
##########################################################################################################
# load library
library(data.table)
library(readr)
library(dplyr)
##########################################################################################################
# code chunk:2
##########################################################################################################
# set the input path to read GT_samdepth_merged.txt
input_path <- "/STAR_protocols_GV_calling/analysis/variant_status/wxs-normal/"
# set the output path where combined_variants_AllSamples to be saved
output_path <- "/STAR_protocols_GV_calling/analysis/combined_variant_status/wxs-normal/"
# find all .txt files in the path and list them
filelist = list.files(input_path, pattern = "*.txt", full.names=TRUE)
print(paste0("total files to be merged", ":", " ", length(filelist)))
##########################################################################################################
# code chunk:3
##########################################################################################################
# read files as list
datalist = lapply(filelist, function(x)fread(x, stringsAsFactors=F, strip.white=T, check.names=F, header=TRUE))
mylist = lapply(datalist, function(x) setDF(x)) #convert data.table into data.frame
#print(str(mylist))
rm(filelist, datalist)
##########################################################################################################
# code chunk:4
##########################################################################################################
#check equality of columns before dropping columns
print(paste("dimension_before_dropColumns",":",lapply(mylist, function(x) dim(x)), sep=" "))
print(paste("allchromosome_before_dropColumns", ":",
names(table(unlist(lapply(mylist, function(x) all(mylist[[1]]$CHROM == x$CHROM))))),
table(unlist(lapply(mylist, function(x) all(mylist[[1]]$CHROM == x$CHROM)))),
sep=" "))
print(paste("allposition_before_dropColumns", ":",
names(table(unlist(lapply(mylist, function(x) all(mylist[[1]]$POS == x$POS))))),
table(unlist(lapply(mylist, function(x) all(mylist[[1]]$POS == x$POS)))),
sep=" "))
#print(paste("samdepth_before_dropColumns", ":",
# names(table(unlist(lapply(mylist, function(x) all(mylist[[1]]$samdepth == x$samdepth))))),
# table(unlist(lapply(mylist, function(x) all(mylist[[1]]$samdepth == x$samdepth)))),
# sep= " "))
##########################################################################################################
# code chunk:5
##########################################################################################################
# drop columns from each list of dataframe
drop <- c("samdepth", "TYPE", "DP", "VD", "AF", "MQ", "GT", "mutation_status")
mylist <- lapply(mylist, function(x) x[, !colnames(x) %in% drop])
##########################################################################################################
# code chunk:6
##########################################################################################################
#check equality of columns after dropping columns
print(paste("dimension_after_dropColumns",":",lapply(mylist, function(x) dim(x)),
sep=" "))
##########################################################################################################
# code chunk:7
##########################################################################################################
# bind columns of dataframe
combined_variants <- bind_cols(mylist)
print(dim(combined_variants))
##########################################################################################################
# code chunk:8
##########################################################################################################
# output combined variants file
fwrite(combined_variants, paste0(output_path, "combinedVariantStatusFromAllSamples.txt"), sep="\t", quote=FALSE)
|
03e3d9d7d808bac4f29f5f13b87d0cb2860188eb
|
2f69fcb191d092c540ac96d25709f93185dd6238
|
/Q2.R
|
2e12888235c30327be742bffaad280b7d37d98f3
|
[] |
no_license
|
vallalbe/MoneySmart_POC
|
16aa187367324c335ff3a65bd143363380c79408
|
3ad8ad3e9dc8b44775e348069d34389195812ff7
|
refs/heads/master
| 2020-04-03T09:23:31.163033
| 2018-11-02T08:23:07
| 2018-11-02T08:23:07
| 155,163,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,460
|
r
|
Q2.R
|
# Title : www3.moneysmart.sg
# Objective : Data Team Coding Challenge
# Created by: parivallalr
# Created on: 02/11/18
# Load the libraries
library(arules)
library(dplyr)
#Load data set from csv file which is generated by Q2.scala
tdata <-read.transactions(file = '/tmp/Q2_itempair_tmp.csv', sep=",")
summary(tdata)
# creating rule on 'apriori' algorithm
rules <- apriori (tdata, parameter = list(supp = 0.001, conf = 0.5))
# apply rules
inspect(head(rules))
# Sample Output as below,
# lhs rhs support confidence lift count
# [1] {} => {1} 0.943396226 0.9433962 1.000000 450
# [2] {Wilson Jones Hanging View Binder, White, 1",1} => {Staple envelope} 0.002096436 1.0000000 5.611765 1
# [3] {Office Impressions End Table, 20-1/2H x 24W x 20D,1} => {KI Adjustable-Height Table} 0.002096436 1.0000000 9.937500 1
# [4] {ACCOHIDE 3-Ring Binder, Blue, 1",1} => {Staple envelope} 0.002096436 1.0000000 5.611765 1
# [5] {Seth Thomas 13 1/2 Wall Clock,1} => {Staples} 0.002096436 1.0000000 4.917526 1
# [6] {Avery Framed View Binder, EZD Ring (Locking), Navy, 1 1/2",1} => {Staples} 0.002096436 1.0000000 4.917526 1
|
3db7981ffb77c283924ac8bc6ea99e372a7648c5
|
b201f1f182b1828a66a2d97baf28224b39d70564
|
/man/create_tm_named_list.Rd
|
d94ddb24a6bff1c92cad0966ee88d852b6e947af
|
[
"MIT"
] |
permissive
|
Drinchai/iatlas-app
|
147294b54f64925fb4ee997da98f485965284744
|
261b31224d9949055fc8cbac53cad1c96a6a04de
|
refs/heads/master
| 2023-02-08T08:17:45.384581
| 2020-07-20T23:27:08
| 2020-07-20T23:27:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 349
|
rd
|
create_tm_named_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/til_map_distributions_functions.R
\name{create_tm_named_list}
\alias{create_tm_named_list}
\title{Create Tilmap Named List}
\usage{
create_tm_named_list(tbl)
}
\arguments{
\item{tbl}{A tibble with columns class, display and id}
}
\description{
Create Tilmap Named List
}
|
ff2498506b10e2a7f7799a764650554d38aa8a8c
|
475c630b54fd299748034dd1a0b042d4c37a5a32
|
/R/lanniScript.R
|
39c1f522e953bd62da079e009dc0feb62b5845fd
|
[] |
no_license
|
cat2tom/moddicom
|
2546b2317c4cd954cbd4b58cd236517c1911c629
|
f78af7bd8cfe7ff86f5318b0adf76f6936c5eede
|
refs/heads/master
| 2021-04-29T11:52:00.744484
| 2016-04-07T07:35:33
| 2016-04-07T07:35:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,528
|
r
|
lanniScript.R
|
#
#
# path.pre<-"/progetti/immagini/lanni/pre/22872220";
# path.post<-"/progetti/immagini/lanni/post/22872220"
# ROIName<-"PET+"
#
# # istanzia gli oggetti
# obj.pre <- geoLet();
# obj.post <- geoLet();
# obj.pet <- geoLet();
#
# # carica il post e prendi il dataStorage: mi servità per prendere il nome della serie
# obj.post$openDICOMFolder( path.post );
# ds.post<-obj.post$getAttribute("dataStorage")
# serieName<-names(ds.post$info)
#
# # carica il PRE utilizzando il nome della serie del post
# obj.pre$openDICOMFolder(pathToOpen = path.pre,setValidCTRMNSeriesInstanceUID = serieName)
#
# # fai le verifiche per vedere che non ci siano troppe differenze nelle geometrie
# vox.post<-obj.post$getImageVoxelCube();
#
# # CHIODO per la PET (dato che ne hanno inserite due serie)
# petSeries<-"1.3.12.2.1107.5.1.4.11088.30000014021409165065600002670";
#
# obj.pet$openDICOMFolder(pathToOpen = path.pre,setValidCTRMNSeriesInstanceUID = petSeries)
# ds.pet<-obj.pet$getAttribute("dataStorage")
#
#
# ss<-services();
# voxelVolume.pet<-obj.pet$getImageVoxelCube()
# dx.pet<-ds.pet$info[[1]][[1]]$pixelSpacing[1];
# dy.pet<-ds.pet$info[[1]][[1]]$pixelSpacing[2];
# dz.pet<-as.numeric(abs(ds.pet$info[[1]][[1]]$ImagePositionPatient[3]-ds.pet$info[[1]][[2]]$ImagePositionPatient[3]))
#
#
# dx.post<-ds.post$info[[1]][[1]]$pixelSpacing[1]
# dy.post<-ds.post$info[[1]][[1]]$pixelSpacing[2]
# dz.post<-as.numeric(abs(ds.post$info[[1]][[1]]$ImagePositionPatient[3]-ds.post$info[[1]][[2]]$ImagePositionPatient[3]))
#
# coordsTop.post<-ds.post$info[[1]][[1]]$ImagePositionPatient
# coordsTop.pet<-ds.pet$info[[1]][[1]]$ImagePositionPatient
# top.x.post<-coordsTop.post[1]; top.y.post<-coordsTop.post[2]; top.z.post<-coordsTop.post[3]
# top.x.pet<-coordsTop.pet[1]; top.y.pet<-coordsTop.pet[2]; top.z.pet<-coordsTop.pet[3]
#
# top.x<-top.x.post - top.x.pet
# top.y<-top.y.post - top.y.pet
# top.z<-top.z.post - top.z.pet
# seq.x<-seq(from=top.x,to = dim(vox.post)[1]*dx.post+top.x,by = dx.post)
# seq.y<-seq(from=top.y,to = dim(vox.post)[2]*dy.post+top.y,by = dy.post)
# seq.z<-seq(from=0,to = dim(vox.post)[3]*dz.post,by = dz.post)
#
#
# VoxelCubePointPos.post<-expand.grid(seq.x,seq.y,seq.z)
#
# aa<-ss$new.SV.trilinearInterpolator.onGivenPoints(
# voxelCube = voxelVolume.pet, pixelSpacing.old = c(dx.pet,dy.pet,dz.pet),
# newPointCoords.x = seq.x,newPointCoords.y = seq.y,newPointCoords.z = seq.z)
#
# bbb<-aa
# for(i in seq(1,dim(bbb)[3])) {
# bbb[,,i]<-aa[,,dim(bbb)[3]-i+1]
# }
# aa<-bbb
# rm(bbb)
#
|
84c6c9912c97082380c75ef75dc2aa2d203aeb37
|
557cda9a1cb3fd04da7ef15c9adec69bb3df9888
|
/R/wtshare.R
|
b583bbac830f984a3848b3872da5ca933f8a3c9c
|
[] |
no_license
|
cran/SDAResources
|
7e4cb27a87fa4e8e334f641c419fcc6e912e33a2
|
addafccfb82d962f234606fc6fcb2386fc8f60f3
|
refs/heads/master
| 2023-08-22T23:38:51.589732
| 2021-10-22T08:20:13
| 2021-10-22T08:20:13
| 368,240,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,047
|
r
|
wtshare.R
|
#' wtshare data
#'
#'
#' Hypothetical sample of size 114, with indirect sampling. The data set has
#'multiple records for adults with more than one child; if adult 254 has 3 children, adult 254
#'is listed 3 times in the data set. Note that to obtain \eqn{L_k}, you need to take numadult +1.
#'
#'
#' @format This data frame contains the following columns:
#'
#' \describe{
#' \item{id:}{identification number of adult in sample}
#'
#' \item{child:}{= 1 if record is for a child
#'
#' = 0 if adult has no children}
#'
#' \item{preschool:}{= 1 if child is in preschool
#'
#' = 0 otherwise}
#'
#' \item{numadult:}{number of other adults in population who link to that child}
#'
#' }
#'
#' @docType data
#'
#' @usage data(wtshare)
#'
#'
#'
#' @keywords datasets
#'
#' @references Lohr (2021), Sampling: Design and Analysis, 3rd Edition. Boca Raton, FL: CRC Press.
#'
#'@references Lu and Lohr (2021), R Companion for \emph{Sampling: Design and Analysis, 3rd Edition}, 1st Edition. Boca Raton, FL: CRC Press.
#'
#'
#'
#'
#'
"wtshare"
|
b204fe2b9e026c94387a172d3c4f66b61b545dab
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/ffstream/man/isInteger.Rd
|
971247c8b3271f4a30719fec7efdd06f4d7d11cf
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 317
|
rd
|
isInteger.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{isInteger}
\alias{isInteger}
\title{Check if a value is an integer}
\usage{
isInteger(x)
}
\arguments{
\item{x}{The value to check.}
}
\description{
Function to check that a value \code{x} is an integer.
}
\keyword{internal}
|
46324d74bbcf1cace3436e49962922e73f258c59
|
dd7b1e7337a4e344d4754f3f036e04c5975df256
|
/man/detect_long_lines.Rd
|
7f0b9503532b79f09fd8f490484c693468d4ae56
|
[] |
no_license
|
bnaras/SUtools
|
2aa8e9de0c74c3bc122c720c14f6330b38388963
|
2eb1b3a6fe15181f18db7e5d1d0774a106f74c73
|
refs/heads/master
| 2022-11-25T06:06:41.985476
| 2022-11-18T01:17:57
| 2022-11-18T01:17:57
| 132,827,545
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 605
|
rd
|
detect_long_lines.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process.R
\name{detect_long_lines}
\alias{detect_long_lines}
\title{Detect if there are long lines in mortran or fortran sections}
\usage{
detect_long_lines(mortran_lines)
}
\arguments{
\item{mortran_lines}{the mortran file lines resulting from a
\code{base::readLines()}, say.}
}
\value{
a possibly empty data frame of the approximate line number
and the offending line if any
}
\description{
Detect if there are long lines in mortran or fortran sections
}
\examples{
\dontrun{
check_long_lines(readLines("./pcLasso.m"))
}
}
|
2d33c6f97d4f72f97ef52f1e34e49ae78bd679d1
|
dcd5b1085d31c69b874803413b346f5a6a942dde
|
/161017_KatjaNew15Vars.R
|
0b73bacdd8a3ceca981482b3ff7799cce692be04
|
[] |
no_license
|
neiljun/TAI-Volta
|
ab73a1d772d1023615eed8cb7021ec13cfeac3af
|
6dc1dd1debd93c91e38973b1961d5ab47f154c63
|
refs/heads/master
| 2021-01-03T18:06:15.670173
| 2019-04-11T06:54:42
| 2019-04-11T06:54:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,607
|
r
|
161017_KatjaNew15Vars.R
|
## load libraries
library ('gdata')
library ('dplyr')
library ('reshape')
library (corrgram)
library(vegan)
library (ggplot2)
# load libraries for mapping
library(maptools)
library(rgeos)
library(RColorBrewer)
# load libraries for clustering
library (vegan)
# library(rgl)
# library(cluster)
library(NbClust)
library(clValid)
# library(MASS)
library(kohonen)
## Functions
setwd("~/Documents/Projects/TAI/TransformedData/Data_Burkina&Ganha")
# load map for visualizations and data
volta.shp <- readShapeSpatial("~/Documents/Projects/TAI/TransformedData/Bundling/Volta_bundling1.shp")
##### Useful functions for later:
### Select only districts on the Volta Basin
volta.only <- function(x){
x <- x [ is.element (x$TAI_ID1, volta.shp@data$TAI_ID1 ), ]
x <- droplevels(x)
}
### Normalizing by scaling everything to 0:1
rescale_hw <- function (x){ # This is Hadley Wickman function from his book R for DataScience
rng <- range(x, na.rm= TRUE, finite = TRUE)
(x - rng[1]) / (rng[2] - rng[1])
}
### Correct the numeric format from excel files
correct.num <- function (x){
l <- length(x)
x[3:l] <- apply (x[3:l], 2, function (x){as.numeric(x)} )
return(x)
}
## Data
## Read data
#### New file with raw values from Katja: 160901 / 161017
file <- 'TAI_Variables_1610_2.xlsx'
sn <- sheetNames(xls=file)
dat <- read.xls (file, sheet = 2)
str(dat)
## Different tries with distance
d <- vegdist (dat[-1], method = "mahalanobis", tol= 10e-20)
# d <- mahalanobis(dat[-1], center = c(0,0), cov = cov(dat[-1]), tol = 10e-20)
# d <- dist (dat[-1], method = 'euclidean', diag = F)
# Validating number of clusters
# Number of clusters with NdClust: uses 30 different index and compare them to decide the optimal partition
library (NbClust)
# euclidean and manhattan are not good for gradient separation (see help vegdist)
clust_num <- NbClust( data = dat[-1], diss = d, dist = NULL,
min.nc = 2, max.nc = 12, method = 'ward.D2', alphaBeale = 0.1, index = 'all')
#dist = 'manhattan', # diss = designdist(full [-c(2,23)], '1-J/sqrt(A*B)' )
library (clValid)
## Internal validation
intern <- clValid(obj=as.data.frame(dat[-c(1)]), nClust=c(2:9),
clMethods=c('hierarchical', 'kmeans', 'diana', 'fanny','som',
'pam', 'sota', 'clara', 'model'),
validation='internal')
## Stability validation
stab <- clValid(obj=as.data.frame(dat[-c(1)]), nClust=c(2:9),
clMethods=c('hierarchical', 'kmeans', 'diana', 'fanny', 'som',
'pam', 'sota', 'clara', 'model'),
validation='stability')
summary (intern)
summary (stab)
## Prepare the clustering result dataset
mds <- metaMDS(dat[-c(1)], distance = 'manhattan', trymax = 1000, verbose = FALSE)
setwd('~/Documents/Projects/TAI/scripts/TAI-Volta')
### Explore the correlation problem
library (corrgram)
quartz(height = 4, width = 4)
## users
corrgram(dat[c(1:9)], type = "data", order = "PCA", lower.panel = panel.cor,
upper.panel = panel.pts, diag.panel = panel.density, main = 'users')
## interactions
corrgram(dat[23:36], type = "data", order = "PCA", lower.panel = panel.cor,
upper.panel = panel.pts, diag.panel = panel.density, main = 'interactions')
## biophysical
corrgram(dat[17:21], type = "data", order = "PCA", lower.panel = panel.cor,
upper.panel = panel.pts, diag.panel = panel.density, main = 'biophysical')
## resource
corrgram(dat[c(10:16, 37)], type = "data", order = "PCA", lower.panel = panel.cor, upper.panel = panel.pts, diag.panel = panel.density, main = 'resource')
|
0388a63953bb49433e805be9eda2a5bcdbf8eda4
|
85628bfeaa2dcb582176ea57ef4db15ea583d928
|
/tests/testthat/test_cluster.R
|
25c27932152f385d2e528eabddf12d61dcf47dd1
|
[
"MIT"
] |
permissive
|
roryk/DEGreport
|
d635134ff23c754df69209ca869e83ecd1854eec
|
75ba67483581e46e9666747f6a7532688f4a97f3
|
refs/heads/master
| 2020-04-06T06:51:09.691661
| 2018-08-10T19:39:46
| 2018-08-10T19:39:46
| 27,650,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,301
|
r
|
test_cluster.R
|
context("Clustering")
data(humanGender)
idx <- c(1:5, 75:80)
counts <- assays(humanGender)[[1]]
dse <- DESeqDataSetFromMatrix(counts[1:1000, idx],
colData(humanGender)[idx,],
design = ~group) %>% DESeq
test_that("transform", {
expect_gte(mean(.scale(counts(dse)[1,])), -0.5)
expect_lte(mean(.scale(counts(dse)[1,])), 0.5)
countsGroup <- .summarize_scale(counts(dse)[1:100,],
colData(dse)[["group"]])
expect_equal(mean(countsGroup), 0)
hc <- .make_clusters(countsGroup)
cluster0 <- .select_genes(hc, countsGroup, minc = 5, reduce = TRUE)
cluster <- .select_genes(hc, countsGroup, minc = 5)
expect_equal(unique(cluster), c(1, 2))
expect_equal(unique(cluster0), c(1, 2))
df <- data.frame(cluster = cluster, genes = names(cluster))
expect_equal(.median_per_cluster(countsGroup, df) %>% mean, 0)
expect_equal(.filter(df, 50) %>% nrow, 69)
expect_equal(.group_metadata(as.data.frame(colData(dse)),
"group", "group", "group") %>% nrow, 2)
})
test_that("groupDifference", {
ma <- matrix(rnorm(50), ncol = 2)
ma[1:20, 2] <- 1000 + ma[1:20, 2]
expect_equal(.remove_low_difference(ma, 500, FALSE) %>% nrow(), 20)
})
|
92f9bdb7557fae2d479dc1cb1160ef45194d3271
|
dd35bc9b79781c34443878243f7678f5232d6872
|
/R/final_speed.R
|
30a4ef74a1d04631b1dcd64206006ce4383728a4
|
[] |
no_license
|
aadler/midsprint
|
cf97421c73054a4d2637ab413f25602b8fa6b96c
|
4aaa5f3de62fea208e468f89a5bd24983ac611d1
|
refs/heads/master
| 2023-04-07T15:20:45.496594
| 2021-04-14T01:04:47
| 2021-04-14T01:04:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 591
|
r
|
final_speed.R
|
#' Final speed reached after a given distance
#'
#' @param player_profile
#' @param current_speed
#' @param distance
#'
#' @return
#' @export
#'
#' @examples
final_speed <- function (player_profile, current_speed, distance)
{
UseMethod("final_speed")
}
#' @export
final_speed.default <- function(player_profile, current_speed, distance) {
time_to_pos <- time_to_position(player_profile, current_speed, distance)
time_vel <- time_speed(player_profile, current_speed)
v_final <- speed_time(player_profile, time_to_pos + time_vel)
round(v_final, 2)
}
|
27bbf15f0449dcfed489e6c5e7fddf787ca2cba1
|
4035bf606855a8e389d0f184b5cf5cd4dcf077f8
|
/Scripts/Utilidades/limma_utils.R
|
4d2a7467ca71f162b63333f8d848e5cd67bcacaf
|
[] |
no_license
|
serogi/TFM
|
6215a3a6a6de91b7c11cc913776ae60bef12361d
|
316f863ff417b1856b86eb530c37ab95aa853204
|
refs/heads/master
| 2020-07-25T08:32:18.623742
| 2019-09-13T09:52:03
| 2019-09-13T09:52:03
| 207,012,094
| 0
| 0
| null | 2019-09-09T12:49:12
| 2019-09-07T18:49:58
| null |
UTF-8
|
R
| false
| false
| 500
|
r
|
limma_utils.R
|
### Función para obtener anovas de 2 vías con/sin interceptación a partir de Limma
Anova2way <- function(g1, g2, expr_data){
design <- model.matrix(~ g1 * g2)
rownames(design) <- colnames(expr_data)
corte <- 0.05
fit <- lmFit(expr_data, design)
res <- eBayes(fit)
res$p.adj <- apply(res$p.value, 2, p.adjust, method = "BH")
results <- decideTest(res, p.value = corte)
tt <- topTable(res, coef = 1:3)
return(tt)
}
Anova2way2 <- function(g1, g2, expr_data){
}
|
b41835863700933953cac55b574196b571e63e43
|
577f03954ec69ed82eaea32c62c8eba9ba6a01c1
|
/R/ensemble/h2oEnsemble-package/man/h2o.ensemble.Rd
|
79cb4e2f601ab7ed8576986bbe5a3b84d297b2a8
|
[
"Apache-2.0"
] |
permissive
|
ledell/h2o
|
21032d784a1a4bb3fe8b67c9299f49c25da8146e
|
34e271760b70fe6f384e106d84f18c7f0adb8210
|
refs/heads/master
| 2020-02-26T13:53:01.395087
| 2014-12-29T04:14:29
| 2014-12-29T04:14:29
| 24,823,632
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,286
|
rd
|
h2o.ensemble.Rd
|
\name{h2o.ensemble}
\alias{h2o.ensemble}
\title{
H2O Ensemble
}
\description{
This function creates a "super learner" ensemble using the H2O base learning algorithms specified by the user.
}
\usage{
h2o.ensemble(x, y, data, family = "binomial",
learner, metalearner = "h2o.glm.wrapper",
cvControl = list(V=5, shuffle=TRUE),
seed = 1, parallel = "seq")
}
\arguments{
\item{x}{
A vector containing the names of the predictors in the model.
}
\item{y}{
The name of the response variable in the model.
}
\item{data}{
An \code{\linkS4class{H2OParsedData}} object containing the variables in the model.
}
\item{family}{
A description of the error distribution and link function to be used in the model. This must be a character string. Currently supports \code{"binomial"} and \code{"gaussian"}.
}
\item{learner}{
A string or character vector naming the prediction algorithm(s) used to train the base models for the ensemble. The functions must have the same format as the h2o wrapper functions.
}
\item{metalearner}{
A string specifying the prediction algorithm used to learn the optimal combination of the base learners. Supports both h2o and SuperLearner wrapper functions.
}
\item{cvControl}{
A list of parameters to control the cross-validation process. The \code{V} parameter is an integer representing the number of cross-validation folds and defaults to 10. Other parmeters are \code{stratifyCV} and \code{shuffle}, which are not yet enabled.
}
\item{seed}{
A random seed to be set (integer); defaults to 1. If \code{NULL}, then a random seed will not be set. The seed is set prior to creating the CV folds and prior to model training for base learning and metalearning.
}
\item{parallel}{
A character string specifying optional parallelization. Use \code{"seq"} for sequential computation (the default) of the cross-validation and base learning steps. Use \code{"multicore"} to perform the V-fold (internal) cross-validation step as well as the final base learning step in parallel over all available cores. Or parallel can be a snow cluster object. Both parallel options use the built-in functionality of the R core "parallel" package. Currently, only \code{"seq"} is compatible with the parallelized H2O algorithms, so this argument may be removed or modified in the future.
}
}
\value{
\item{x}{
A vector containing the names of the predictors in the model.
}
\item{y}{
The name of the response variable in the model.
}
\item{family}{
Returns the \code{family} argument from above.
}
\item{cvControl}{
Returns the \code{cvControl} argument from above.
}
\item{folds}{
A vector of fold ids for each observation, ordered by row index. The number of unique fold ids is specified in \code{cvControl$V}.
}
\item{ylim}{
Returns range of \code{y}.
}
\item{seed}{
An integer. Returns \code{seed} argument from above.
}
\item{parallel}{
An character vector. Returns \code{character} argument from above.
}
\item{basefits}{
A list of H2O models, each of which are trained using the \code{data} object. The length of this list is equal to the number of base learners in the \code{learner} argument.
}
\item{metafit}{
The predictive model which is learned by regressing \code{y} on \code{Z} (see description of \code{Z} below). The type of model is specified using the \code{metalearner} argument.
}
\item{Z}{
The Z matrix (the cross-validated predicted values for each base learner). In the stacking ensemble literature, this is known as the "level-one" data and is the design matrix used to train the metalearner.
}
\item{runtime}{
A list of runtimes for various steps of the algorithm. The list contains \code{cv}, \code{metalearning}, \code{baselearning} and \code{total} elements. The \code{cv} element is the time it takes to create the \code{Z} matrix (see above). The \code{metalearning} element is the training time for the metalearning step. The \code{baselearning} element is a list of training times for each of the models in the ensemble. The time to run the entire \code{h2o.ensemble} function is given in \code{total}.
}
}
\references{
van der Laan, M. J., Polley, E. C. and Hubbard, A. E. (2007) Super Learner, Statistical Applications of Genetics and Molecular Biology, 6, article 25. \cr
\url{http://dx.doi.org/10.2202/1544-6115.1309}\cr
\url{http://biostats.bepress.com/ucbbiostat/paper222}\cr
\cr
Breiman, L. (1996) Stacked Regressions, Machine Learning, 24:49–64.\cr
\url{http://dx.doi.org/10.1007/BF00117832}\cr
\url{http://statistics.berkeley.edu/sites/default/files/tech-reports/367.pdf}
}
\author{
Erin LeDell \email{ledell@berkeley.edu}
}
\note{
Using an h2o algorithm wrapper function as the metalearner is not yet producing good results. For now, it is recommended to use the \code{\link[SuperLearner:SL.glm]{SL.glm}} function as the metalearner.
}
\seealso{
\code{\link[SuperLearner:SuperLearner]{SuperLearner}}, \code{\link[subsemble:subsemble]{subsemble}}
}
\examples{
\dontrun{
# An example of binary classification using h2o.ensemble
library(h2oEnsemble)
library(SuperLearner) # For metalearner such as "SL.glm"
library(cvAUC) # Used to calculate test set AUC (requires version >=1.0.1 of cvAUC)
localH2O <- h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, nthreads = -1)
# Import a sample binary outcome train/test set into R
train <- read.table("http://www.stat.berkeley.edu/~ledell/data/higgs_5k.csv", sep=",")
test <- read.table("http://www.stat.berkeley.edu/~ledell/data/higgs_test_5k.csv", sep=",")
# Convert R data.frames into H2O parsed data objects
data <- as.h2o(localH2O, train)
newdata <- as.h2o(localH2O, test)
y <- "V1"
x <- setdiff(names(data), y)
family <- "binomial"
# Create a custom base learner library & specify the metalearner
h2o.randomForest.1 <- function(..., ntrees = 1000, nbins = 100, seed = 1) h2o.randomForest.wrapper(..., ntrees = ntrees, nbins = nbins, seed = seed)
h2o.deeplearning.1 <- function(..., hidden = c(500,500), activation = "Rectifier", seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, seed = seed)
h2o.deeplearning.2 <- function(..., hidden = c(200,200,200), activation = "Tanh", seed = 1) h2o.deeplearning.wrapper(..., hidden = hidden, activation = activation, seed = seed)
learner <- c("h2o.randomForest.1", "h2o.deeplearning.1", "h2o.deeplearning.2")
metalearner <- c("SL.glm")
# Train the ensemble using 4-fold CV to generate level-one data
# More CV folds will take longer to train, but should increase performance
fit <- h2o.ensemble(x = x, y = y, data = data, family = family,
learner = learner, metalearner = metalearner,
cvControl = list(V=4))
# Generate predictions on the test set
pred <- predict(fit, newdata)
labels <- as.data.frame(newdata[,c(y)])[,1]
# Ensemble test AUC
AUC(predictions=as.data.frame(pred$pred)[,1], labels=labels)
# 0.7681649
# Base learner test AUC (for comparison)
L <- length(learner)
sapply(seq(L), function(l) AUC(predictions = as.data.frame(pred$basepred)[,l], labels = labels))
# 0.7583084 0.7145333 0.7123253
# Note that the ensemble results above are not reproducible since
# h2o.deeplearning is not reproducible when using multiple cores.
# For reproducible results, use h2o.init(nthreads = 1)
}
}
|
1cdd31557a73d69a032808c4df101ac2ad60c63d
|
e2b64116eab0d5a035a63e3edcf8ed0bebbe3c4d
|
/R/read-sod.R
|
fa3cd9ee8de54ac8ec1df42311b57dd88faf265c
|
[] |
no_license
|
kbrevoort/crar
|
b55bd3553a82903d5c58591af1c61364ca16bc10
|
ce3728d535a9d3ba1262654d0592797aa07835b5
|
refs/heads/master
| 2020-06-30T21:43:34.677236
| 2019-08-07T02:45:08
| 2019-08-07T02:45:08
| 200,959,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,315
|
r
|
read-sod.R
|
read_sod_file <- function(year) {
zip_file <- file.path(here::here(),
sprintf('data/zipped/SOD/ALL_%d.zip',
year))
csv_file <- sprintf('ALL_%d.csv', year)
readr::read_csv(unz(zip_file, csv_file)) %>%
mutate(mainoffice_fl = as.logical(BKMO),
denovo_fl = as.logical(DENOVO),
metroarea_fl = as.logical(METROBR),
microarea_fl = as.logical(MICROBR),
deposit_cd = factor(BRCENM,
levels = c('C', 'E', 'N', 'M'),
labels = c('Combined',
'Estimated',
'Non Deposit',
'Main Office')),
call_cd = factor(CALL, levels = c('CALL', 'TFR')),
charter_cd = factor(CHARTER,
levels = c('FED', 'STATE'),
labels = c('Federal', 'State')),
service_type = factor(BRSERTYPE,
levels = c(11, 12, 13, 21:30),
labels = c('Full service, brick and mortar',
'Full service, retail office',
'Full service, cyber office',
'Limited service, administrative office',
'Limited service, military facility',
'Limited service, drive-through facility',
'Limited service, loan production facility',
'Limited service, consumer credit office',
'Limited service, contractual office',
'Limited service, messenger office',
'Limited service, retail office',
'Limited service, mobile/seasonal office',
'Limited service, trust office')),
specialization = factor(SPECDESC,
levels = c('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.'),
labels = c('No specializztion group',
'International specialization',
'Agricultural specialization',
'Credit card specialization',
'Commercial lending specialization',
'Mortgage lending specialization',
'Consumer lending specialization',
'Other specialized under 1 billion',
'All other under 1 billion',
'All other over 1 billion',
'Not available')),
agency = factor(CHRTAGNN,
levels = c('Comptroller of the Currency',
'Office of Thrift Supervision',
'State Agency',
'Foreign Country')),
regulator = factor(REGAGNT,
levels = c('FED', 'FDIC', 'OCC', 'OTS', 'NCUA', 'STATE')),
agency_abb = factor(CHRTAGNT,
levels = c('OCC', 'OTS', 'STATE', 'SOVER'))) %>%
rename(inst_address = ADDRESS,
address = ADDRESBR,
assets = ASSETS,
branch_num = BRNUM,
cbsa_name = CBSA_DIV_NAMB,
cert_num = CERT,
county_name = CNTYNAMB,
county_fips = CNTYNUMB,
country_name = CNTRYNAMB,
country_inst = CNTRYNA,
deposits = DEPSUMBR,
deposits_inst = DEPSUM,
deposits_inst_domestic = DEPDOM,
id_rssd = RSSDID,
top_holder = RSSDHCR) %>%
select(-BKMO, -BRCENM, -SPECDESC, -CHRTAGNT, -REGAGNT,
-METROBR, -MICROBR, -BRSERTYP)
}
|
ee8a55f3977a89c70b13d0de67773d9e3d6e7af7
|
d3756e689a59b564a7ba0aec7c6c7130cc811815
|
/R/plot.grouped.tags.r
|
bfbb27ca859903248661caf0420024b5a3335ee5
|
[] |
no_license
|
PacificCommunity/ofp-sam-r4mfcl
|
710824eb52bc02a604123ca6860d9271ddcd949b
|
6b8f9e6f452b6d203da65a1d8902e28fd2f5a175
|
refs/heads/master
| 2023-02-21T12:52:16.632352
| 2023-02-07T02:28:02
| 2023-02-07T02:28:02
| 23,813,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,325
|
r
|
plot.grouped.tags.r
|
#' @export plot.grouped.tags
plot.grouped.tags <- function(tagfl=readtagrep, mix.time=1, remove.mix=TRUE, xaxe="Year", yaxe="Number of tag returns",
ln.sz=0.7, ln.col=alpha("black", 0.7), pt.sz=2, pt.col=alpha("red", 0.7), all.fishies=TRUE,
Ncols=2, grpnms=paste("Grp", 1:19), keepGrps=c(1,2,4,5), fsh.grps=1:23, fac.levels=paste("Grp", 1:19))
{
theme_set(theme_bw())
require(scales)
require(magrittr)
require(dplyr)
tmp <- tagfl$auxdat
if(!remove.mix) mix.time <- 0
if(all.fishies)
{
# All recaptures
tmp.pl2 <- tmp %>% filter(t2rec > mix.time) %>% group_by(Year = ctime) %>%
summarise(obs = sum(orec), pre = sum(prec)) %>% as.data.frame()
all.yr <- data.frame(Year = seq(min(tmp.pl2$Year), max(tmp.pl2$Year), 0.25))
tmp.pl2 <- merge(tmp.pl2, all.yr, by = "Year", all.y = TRUE)
tmp.pl2[is.na(tmp.pl2)] <- 0
pl <- ggplot(tmp.pl2, aes(x = Year, y = pre)) + geom_line(size = ln.sz, colour = ln.col) +
geom_point(aes(x = Year, y = obs), size = pt.sz, colour = pt.col) #+ facet_wrap(~ pro, ncol = 2, scales = "free_y")
} else {
# Fisheries separated
tmp.pl2 <- tmp %>% mutate(fry = fsh.grps[fry]) %>% filter(t2rec > mix.time, fry %in% keepGrps) %>% group_by(Year = ctime, fry) %>%
summarise(obs = sum(orec), pre = sum(prec)) %>% mutate(Fishery = grpnms[fry]) %>% as.data.frame()
all.yr <- data.frame(Year = rep(seq(min(tmp.pl2$Year), max(tmp.pl2$Year), 0.25), length(unique(tmp.pl2$Fishery))))
all.yr$Fishery <- rep(unique(tmp.pl2$Fishery), rep(length(unique(all.yr$Year)), length(unique(tmp.pl2$Fishery))))
tmp.pl2 <- merge(tmp.pl2, all.yr, by = c("Year","Fishery"), all.y = TRUE)
tmp.pl2[is.na(tmp.pl2)] <- 0
tmp.pl2$Fishery <- factor(tmp.pl2$Fishery, levels = fac.levels)
pl <- ggplot(tmp.pl2, aes(x = Year, y = pre)) + geom_line(size = ln.sz, colour = ln.col) +
geom_point(aes(x = Year, y = obs), size = pt.sz, colour = pt.col) + facet_wrap(~ Fishery, ncol = Ncols, scales = "free_y")
}
pl <- pl + xlab(xaxe) + ylab(yaxe) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(pl)
}
|
28e3280a5ed4d891dd56ac4fb4efda434df08c76
|
fc679433d67f9652a54515adb38554e14ae4608a
|
/clean.R
|
b836d662e8207f641cc48de95ce8220fbdac1ec8
|
[] |
no_license
|
chucheria/2019_SatRdaysParis
|
a50e159784fb0f3b27d3a6a5976014487487f655
|
a7b810ef4c9f3f10908667eafd9598352fd4f697
|
refs/heads/master
| 2021-07-02T12:09:12.647669
| 2020-09-24T13:37:53
| 2020-09-24T13:37:53
| 172,184,484
| 7
| 2
| null | 2019-03-01T19:21:10
| 2019-02-23T07:19:23
|
R
|
UTF-8
|
R
| false
| false
| 633
|
r
|
clean.R
|
library(neo4r)
con <- neo4j_api$new(
url = 'http://localhost:7474',
user = 'neo4j',
password = 'root'
)
con$get_index()
con$get_relationships()
con$get_labels()
con$get_constraints()
###### CLEAN DATABASE
clean <- c('DROP CONSTRAINT ON (p:Person) ASSERT p.name IS UNIQUE',
'DROP CONSTRAINT ON (t:Team) ASSERT t.name IS UNIQUE',
'DROP CONSTRAINT ON (c:Country) ASSERT c.name IS UNIQUE',
'DROP CONSTRAINT ON (g:Game) ASSERT g.name IS UNIQUE',
'DROP CONSTRAINT ON (l:League) ASSERT l.name IS UNIQUE',
'MATCH (n) DETACH DELETE n')
purrr::map(clean, call_neo4j, con = con)
|
cc3a6fe2af32e923f3383aacaca7db320f2cb5f4
|
d1a92aac30642f388baa400b9552a036a3cb42b2
|
/week3/source/plot1.R
|
24f7349d8e86041fa4f76ab98abc11e9685270ee
|
[] |
no_license
|
giuseppe82/04_ExploratoryAnalysis
|
6eacd2c29be49f11f2391e99dbc039f4b7573e8e
|
12e9e9866e338e8cf87139afca72b93446b971fe
|
refs/heads/master
| 2021-01-01T03:49:11.680842
| 2016-05-07T19:39:17
| 2016-05-07T19:39:17
| 58,213,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,644
|
r
|
plot1.R
|
# Course Project 2.
# 1. Have total emissions from PM2.5 decreased in the United States
# from 1999 to 2008? Using the base plotting system, make a plot showing
# the total PM2.5 emission from all sources for each of the years
# 1999, 2002, 2005, and 2008.
# Script Name: plot1.R
setwd(dir = "/Users/joedibernardo/Projects/DATASCIENCE/ExploratoryDataAnalysis/week3/exdata-data-NEI_data")
library(dplyr)
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
EM <- NEI$Emission # numeric vector of emissions
YR <- NEI$year # numeric vector of years
# str(EM) have a quick look into the EM data frame.
# a convenient "new" data frame
Data <- data.frame(cbind(YR, EM), stringsAsFactors = FALSE)
# summary(Data), just a quick check
# let's apply some filtering.
PM1999 <- Data[Data$YR == 1999, ]
PM2002 <- Data[Data$YR == 2002, ]
PM2005 <- Data[Data$YR == 2005, ]
PM2008 <- Data[Data$YR == 2008, ]
# summary(PM1999$EM), just a quick check
years <- cbind(1999,2002,2005,2008)
total_emissions <- cbind(sum(PM1999$EM), sum(PM2002$EM), sum(PM2005$EM), sum(PM2008$EM))
# total_emissions
png("./plot1.png", width = 640, height = 480)
par(mar=c(5,5,5,2)+0.1)
barplot(total_emissions, width = 1, col = "wheat",
cex = 1.5, cex.main = 1.5, cex.axis = 1.5,
cex.lab = 1.5,
ylim = c(0e+00, 7e+06),
axis.lty = 0,
main = expression('Total PM'[2.5]*' emissions in the USA, 1999 - 2008'),
xlab = "Years", ylab = expression('total PM'[2.5]*' emissions (tons)'),
horiz = FALSE, names.arg = years
)
dev.off()
|
2bf48a1651f64e2bc10ec3c236e27de4debcb98e
|
465c486d89fc671e9ebd499d9096ce7b4417b950
|
/R/json_tree.R
|
edaea7d46d7609b9970678d3818d775d63697c22
|
[] |
no_license
|
cran/rapsimng
|
7fa6879694ca512c212c780b35dc5a1f5f9920af
|
fadc3c478038c7e6bc85fdf199ab688e111ce9d0
|
refs/heads/master
| 2023-07-26T00:08:09.516036
| 2021-09-09T05:00:02
| 2021-09-09T05:00:02
| 305,099,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,014
|
r
|
json_tree.R
|
#' Get the parent node from a path
#'
#' @param l the list of apsimx file
#' @param path If numeric, the path returned by search_path or search_node. If character, the path supported by apsimx
#'
#' @return A new list for parent
#' @export
#'
#' @examples
#' wheat <- read_apsimx(system.file("Wheat.json", package = "rapsimng"))
#' a <- search_path(wheat, '[Structure].BranchingRate')
#' get_parent(wheat, a$path)
get_parent <- function(l, path) {
path <- .check_path(l, path)
if (length(path) == 1) {
stop("Already in the root for ", path)
}
if (length(path) == 2) {
return (list(node = l, path = 1))
}
path <- path[-1]
path <- path[-length(path)]
eq <- 'l'
for (i in seq(along = path)) {
eq <- c(eq, '[["Children"]]', paste0('[[', path[i], ']]'))
}
eq_str <- paste(eq, collapse = '')
eq_str <- paste0("l <- ", eq_str)
eval(parse(text=eq_str))
res <- list(node = l, path = c(1, path))
res
}
|
22698300fbfc16a81f6435d0051b29824eb8b3a3
|
2a2ab8c0dab2e1cb4aa300c9783b271c81ee15f4
|
/run_analysis.R
|
75170f08ded42722f31988629487de806abc4566
|
[] |
no_license
|
jafafe/getting-cleaning-data
|
1b8bdaf324402aff10b16ea799cd315ecd4d8b5b
|
5e109344bcfc479071f119ec47bd607e17df3e01
|
refs/heads/master
| 2021-01-09T20:40:45.350327
| 2016-06-28T16:17:41
| 2016-06-28T16:17:41
| 62,008,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,312
|
r
|
run_analysis.R
|
setwd("~/workspace/coursera/cleandata/project/getting-cleaning-data")
source("constants.R")
library(data.table)
library(plyr)
loadSubjects <- function() {
subjectsTraining <- fread(file.path(data.directory, trainDir, paste("subject_",trainDir, ".txt", sep="")))
subjectsTest <- fread(file.path(data.directory, testDir, paste("subject_",testDir, ".txt", sep="")))
subjects <- rbind(subjectsTraining, subjectsTest)
setnames(subjects, "subject")
}
loadActivities <- function() {
activitiesTrainind <- fread(file.path(data.directory, trainDir, paste("y_",trainDir, ".txt", sep="")))
activitiesTest <- fread(file.path(data.directory, testDir, paste("y_",testDir, ".txt", sep="")))
activities <- rbind(activitiesTrainind, activitiesTest)
setnames(activities, "activity")
}
loadValues <- function() {
xTraining <- read.table(file.path(data.directory, trainDir, paste("X_",trainDir, ".txt", sep="")))
xTest <- read.table(file.path(data.directory, testDir, paste("X_",testDir, ".txt", sep="")))
dataset <- rbind(xTraining, xTest)
dataset
}
loadActivityLabels <- function() {
aLabels <- read.table(paste(data.directory,"activity_labels.txt", sep = "/"))
activities[, 1] <- aLabels[activities[, 1], 2]
activities
}
# 1 Merges the training and the test sets to create one data set
subjects <- loadSubjects()
activities <- loadActivities()
xData <- loadValues()
#View(xData)
# 2 Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table(paste(data.directory,featuresFile, sep = "/"))
#mean or std
featuresFilter <- grep("-(mean|std)\\(\\)", features[, 2])
xData <- xData[, featuresFilter]
names(xData) <- features[featuresFilter, 2]
#View(xData)
# 3 Uses descriptive activity names to name the activities in the data set
activities <- loadActivityLabels()
#View(activities)
# 4 Appropriately labels the data set with descriptive variable names.
# labels done :)
fullData <- cbind(xData, activities, subjects)
#View(fullData)
# 5 From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject
tidyData <- ddply(fullData, .(subject, activity), function(x) colMeans(x[, 1:66]))
#View(tidyData)
write.table(tidyData,"tidy_average_data.txt",row.names=FALSE)
|
4f0714694a189c751e72b34d5002b70acae38834
|
abf15dd221160c57203a3a20b259686ed5959259
|
/SoilProj_summary_stats_2.R
|
272cc1900206ea92ce70e12e9f124887818fef46
|
[] |
no_license
|
mikeymid/Rcodes
|
8d91c5a5be7d0c45465f3d543fe03e5bbeb371db
|
d65529e6701b4b2c7c8f6a13fa1b6dadf7145240
|
refs/heads/master
| 2021-06-30T19:39:40.540742
| 2017-09-18T19:07:42
| 2017-09-18T19:07:42
| 103,958,299
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,413
|
r
|
SoilProj_summary_stats_2.R
|
#Mikey
#SummaryStats of Variables
#08-18-17
#-------------------
FieldData <- read.csv(file = "C:/Users/MMOHA14/Desktop/Projects/Proj 1 - Soil Nutrient Classification/Part3/data/locations/IADV.Corn.2017_MON-GLOBAL BREEDING-619_e3759721-954b-49d1-875b-d6319f19d3b8.csv", header = T, sep = ",")
#---------------------
par(mfrow=c(1,3))
var <- "organicMatter"
hist(FieldData[,which(colnames(FieldData)==var)], main = paste("Histogram of", var), xlab = var)
summary(FieldData[,which(colnames(FieldData)==var)])
var <- "cec"
hist(FieldData[,which(colnames(FieldData)==var)], main = paste("Histogram of", var), xlab = var)
summary(FieldData[,which(colnames(FieldData)==var)])
var <- "ph"
hist(FieldData[,which(colnames(FieldData)==var)], main = paste("Histogram of", var), xlab = var)
summary(FieldData[,which(colnames(FieldData)==var)])
par(mfrow=c(1,3))
var <- "p"
hist(FieldData[,which(colnames(FieldData)==var)], main = paste("Histogram of", var), xlab = var)
summary(FieldData[,which(colnames(FieldData)==var)])
var <- "k"
hist(FieldData[,which(colnames(FieldData)==var)], main = paste("Histogram of", var), xlab = var)
summary(FieldData[,which(colnames(FieldData)==var)])
var <- "s"
hist(FieldData[,which(colnames(FieldData)==var)], main = paste("Histogram of", var), xlab = var)
summary(FieldData[,which(colnames(FieldData)==var)])
|
024044824a7ac12f15d9da08fc02b6282f65cdd6
|
9b2a0eefea05a4c19125dfa493e68efad51198ca
|
/man/ltdb-data.Rd
|
c1c28522b6691a6f278e558a42b8464104139b91
|
[
"MIT"
] |
permissive
|
tiernanmartin/NeighborhoodChangeTypology
|
9ba10bffd7b2554ec5fc9e2f6666ad143dc01028
|
047dde257cfcd1b1e66c9fedc9203475dc87307c
|
refs/heads/master
| 2020-03-24T21:33:06.166394
| 2019-03-13T15:51:35
| 2019-03-13T15:51:35
| 143,038,483
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 800
|
rd
|
ltdb-data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ltdb-data.R
\name{prepare_ltdb_data}
\alias{prepare_ltdb_data}
\alias{make_ltdb_data}
\title{Make A Tibble of The Project's US Census Variables from LTDB}
\usage{
prepare_ltdb_data(data_template, acs_tables, path)
make_ltdb_data(path)
}
\arguments{
\item{data_template}{Tibble, the \code{data_template} object}
\item{acs_tables}{Tibble, the \code{acs_table} object}
\item{path}{Character, the path or connection to write to.}
}
\value{
a \code{tibble}
}
\description{
Return a \code{tibble} of all of the US Census data variables
that are obtained from the Brown University Longitudinal Tract Database (LTDB).
}
\note{
Data source: \link{https://s4.ad.brown.edu/projects/diversity/Researcher/LTBDDload/DataList.aspx}
}
|
a5593e73648bc7dd66d0d24d9aba0136673d04ba
|
01641d63cece8f48d985fed63b8c28b6e59e0c5a
|
/03-07_debugging-tools_basic-tools.R
|
2e4c4ce86d7f5c2041412f2f2c4f56968848356a
|
[] |
no_license
|
mareq/coursera_data-science_02-r-programming_lectures
|
8e9857c4f5e1fb459b75c89d333e30b75dbb6b58
|
fe099df9a3d7af7e2d4dba8c3cc930c0c6246e52
|
refs/heads/master
| 2021-01-01T06:40:15.762132
| 2015-01-18T21:27:30
| 2015-01-18T21:27:30
| 29,441,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 44
|
r
|
03-07_debugging-tools_basic-tools.R
|
# traceback, debug, browser, trace, recover
|
d71559935d20c5d97e950ca7fd09c71662be5810
|
602a36156f6bd708be469b340e3a5fdcc8c78339
|
/man/nrowHelper.Rd
|
062f3de181c44489b702eacd2fd443313fb6cd96
|
[
"BSD-3-Clause"
] |
permissive
|
FertigLab/CoGAPS
|
2290e0685be5a53617fb593183c3ab4773f616f5
|
e99d6eff85d81178f71ebc58e4c56c8f7eda4ce7
|
refs/heads/master
| 2023-04-05T14:39:32.838668
| 2023-04-03T23:12:39
| 2023-04-03T23:12:39
| 48,254,961
| 47
| 13
|
BSD-3-Clause
| 2023-09-11T19:44:09
| 2015-12-18T20:27:45
|
C++
|
UTF-8
|
R
| false
| true
| 394
|
rd
|
nrowHelper.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HelperFunctions.R
\name{nrowHelper}
\alias{nrowHelper}
\title{get number of rows from supported file name or matrix}
\usage{
nrowHelper(data)
}
\arguments{
\item{data}{either a file name or a matrix}
}
\value{
number of rows
}
\description{
get number of rows from supported file name or matrix
}
\keyword{internal}
|
afad0c33dbad9572a96007a248b902db16821724
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Probability_And_Statistics_For_Engineers_by_Richard_L._Scheaffer,_Madhuri_S._Mulekar,_James_T._Mcclave/CH8/EX8.16/ex_8_16.R
|
44bd25b6cce9d7a442eca9c8d14272338b29cd86
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 378
|
r
|
ex_8_16.R
|
rm("c")
x =c(3,1,4,2,0,2,3,3,5,4,1,1,1,2,0,3,2,2,4,1,3,0,2,3)
sample =1:24
dat <- data.frame(sample,x)
p <- mean(dat$x/50)
u <- p + 3*sqrt(p*(1-p)/50)
l <- p- 3*sqrt(p*(1-p)/50)
cat("The LCL and UCL are",0,"and", u,"respectively")
#Since l is neg. , we take lower limit to be 0.
#install the package qcc
library(qcc)
qcc(dat$x, sizes =50,type="p")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.