blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
β | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
β | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
β | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53db15ef751b117f01db0ea87c40cbca54bf8ec2
|
79fd75167bfc21f7a75440ae2ee1a2f9561e7ae1
|
/inst/doc/swephR.R
|
0d224698b33cd440dc58e5d1a1b13d743a53b41e
|
[] |
no_license
|
cran/swephR
|
b970020d3fa5dbaee8f68416641639dc954786fa
|
81599261243cd9b8b2194e0dc05c1ecc047b116f
|
refs/heads/master
| 2023-05-11T13:39:54.331061
| 2023-05-08T08:50:05
| 2023-05-08T08:50:05
| 168,393,116
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,392
|
r
|
swephR.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(swephR)
## ---- eval = FALSE------------------------------------------------------------
# swe_set_ephe_path("C:\\sweph\\ephe")
## -----------------------------------------------------------------------------
year <- 2000
month <- 1
day <- 1
hour <- 12
jdut <- swe_julday(year, month, day, hour, SE$GREG_CAL)
jdut
## -----------------------------------------------------------------------------
ipl <- SE$SUN
iflag <- SE$FLG_MOSEPH + SE$FLG_SPEED
result <- swe_calc_ut(jdut, ipl, iflag)
result
## -----------------------------------------------------------------------------
starname = "sirius"
result <- swe_fixstar2_ut(starname, jdut, iflag)
result
## -----------------------------------------------------------------------------
options(digits=15)
result <- swe_heliacal_ut(jdut,c(0,50,10),c(1013.25,15,50,0.25),c(25,1,1,1,5,0.8),starname,
SE$HELIACAL_RISING,SE$HELFLAG_HIGH_PRECISION+SE$FLG_MOSEPH)
result
## -----------------------------------------------------------------------------
options(digits=6)
swe_set_ephe_path(NULL)
iflag = SE$FLG_SPEED + SE$FLG_MOSEPH
{
#get year
jyear <- 2000
#get month
jmon <- 1
#get day
jday <- 1
#get time
jhour <- 12
#determine julian day number (at 12:00 GMT)
tjd_ut <- swe_julday(jyear, jmon, jday, jhour, SE$GREG_CAL)
cat("Julian day number (UT) :", tjd_ut, "(",jyear,",",jmon,",",jday,"; proleptic Gregorian calendar)\n")
cat("planet :",
c("longitude", "latitude", "distance", "long. speed", "lat. speed"),
"\n")
cat("===========================================================\n")
# loop over all planets
for (p in SE$SUN:SE$OSCU_APOG) {
# get the name of the planet p
objectname = swe_get_planet_name(p)
# do the coordinate calculation for this planet p
i = swe_calc_ut(tjd_ut, p, iflag)
if (i$return < 0) {
cat("Error :", i$err, "(", objectname, ")\n")
}
else
{
# print data
cat (objectname, ":", i$xx[0:5], "\n")
}
}
}
## -----------------------------------------------------------------------------
swe_close()
|
26151b264eb5ecb525400afcfb4705a8543dada4
|
2a3f50853c6e3b404329e98076428f60629cca75
|
/R/Scratch/TestWeekPlots.r
|
79659fbc29181498b80a7f54ba33fbd5a47de5b6
|
[] |
no_license
|
wmchad/NetworkResearch
|
dc9d8f210325e58fa27c2bf52ad9c4dc4a917d0b
|
9826d2442f291238c4d89bdca4046e93ee05b77e
|
refs/heads/master
| 2021-01-17T06:34:14.150982
| 2014-09-08T19:15:36
| 2014-09-08T19:15:36
| 18,338,256
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,273
|
r
|
TestWeekPlots.r
|
setwd("C:/Code/NetworkResearch/R/Functions");
source("Data/TowerFunctions.r");
source("Plotting/TowerVolumePlots.r");
source("Plotting/PlotUtilities.r");
year <- 11;
month <- 12;
towerId <- 405;
calls <- GetTowerAggregatedCalls(towerId, year, month);
subdata <- calls[calls$Day >= 1 & calls$Day <= 7,];
PlotMultipleDayVolume( subdata$TotalCalls, 2011, 12, 1, 7, plotTitle="My Title" );
PlotMultipleDayVolume( subdata$TotalCalls - subdata$TotalAvgCalls,
2011, 12, 1, 7, plotTitle="My Title" );
PlotMultipleDayVolume( (subdata$TotalCalls - subdata$TotalAvgCalls) / max(1, subdata$TotalAvgCalls),
2011, 12, 1, 7, plotTitle="My Title" );
subdata2 <- calls[calls$Day >= 8 & calls$Day <= 14,];
PlotMultipleDayVolume( subdata2$TotalCalls, 2011, 12, 8, 14, plotTitle="My Title" );
PlotMultipleDayVolume( subdata2$TotalCalls - subdata$TotalCalls,
2011, 12, 8, 14, plotTitle="My Title" );
subdata3 <- calls[calls$Day >= 15 & calls$Day <= 21,];
PlotMultipleDayVolume( subdata3$TotalCalls - subdata2$TotalCalls,
2011, 12, 15, 21, plotTitle="My Title" );
PlotMultipleDayVolume( subdata3$TotalCalls - subdata3$TotalAvgCalls,
2011, 12, 15, 21, plotTitle="My Title" );
PlotMultipleDayVolume( subdata3$TotalCalls,
2011, 12, 15, 21, plotTitle="My Title" );
firstweekdata <- c(rep(0, 288*4), calls[calls$Day >=1 & calls$Day <= 3,]$TotalCalls);
secondweekdata <- calls[calls$Day >=4 & calls$Day <= 10,]$TotalCalls;
thirdweekdata <- calls[calls$Day >=11 & calls$Day <= 17,]$TotalCalls;
fourthweekdata <- calls[calls$Day >=18 & calls$Day <= 24,]$TotalCalls;
fifthweekdata <- calls[calls$Day >=25 & calls$Day <= 31,]$TotalCalls;
wk1Plot <- PlotMultipleDayVolume( firstweekdata, 2011, 12, -3, 3,
plotTitle="Month Call Volume, December 2011" );
wk2Plot <- PlotMultipleDayVolume( secondweekdata, 2011, 12, 4, 10 );
wk3Plot <- PlotMultipleDayVolume( thirdweekdata, 2011, 12, 11, 17 );
wk4Plot <- PlotMultipleDayVolume( fourthweekdata, 2011, 12, 18, 24 );
wk5Plot <- PlotMultipleDayVolume( fifthweekdata, 2011, 12, 25, 31 );
multiplot( wk1Plot, wk2Plot, wk3Plot, wk4Plot, wk5Plot );
PlotMonthCallVolume <- function( towerId, year, month,
volCol="TotalCalls",
interestingDay=-1) {
fullYear <- 2000 + year;
calls <- GetTowerAggregatedCalls(towerId, year, month);
callVol <- calls[,volCol];
yrng <- c(min(callVol), max(callVol));
nextYear <- fullYear;
nextMonth <- month+1;
if ( nextMonth > 12 ) {
nextYear <- nextYear + 1;
nextMonth <- 1;
}
nDays <- as.numeric(difftime(Date.ymd(nextYear, nextMonth, 1), Date.ymd(fullYear, month, 1)));
wd <- weekdays(Date.ymd(fullYear, month, 1:7), TRUE);
firstDay <- (1:7)[wd == "Sun"];
if ( firstDay > 1 ) { firstDay <- firstDay - 7; }
nWeeks <- ceiling((nDays - firstDay + 1)/7);
startDays <- firstDay +(0:nWeeks)*7;
endDays <- startDays + 6;
w1data <- c(rep(0, 288*(1-firstDay)), callVol[calls$Day>=startDays[1] &
calls$Day<=endDays[1]]);
p1 <- PlotMultipleDayVolume(w1data, fullYear, month, startDays[1], endDays[1],
plotTitle=paste("Nearest Tower Call Volume, ",
fullYear, "-", Pad2(month), sep=""),
yrng=yrng, interestingDay=interestingDay);
w2data <- callVol[calls$Day>=startDays[2] & calls$Day<=endDays[2]];
p2 <- PlotMultipleDayVolume(w2data, fullYear, month, startDays[2], endDays[2],
yrng=yrng, interestingDay=interestingDay);
w3data <- callVol[calls$Day>=startDays[3] & calls$Day<=endDays[3]];
p3 <- PlotMultipleDayVolume(w3data, fullYear, month, startDays[3], endDays[3],
yrng=yrng, interestingDay=interestingDay);
w4data <- callVol[calls$Day>=startDays[4] & calls$Day<=endDays[4]];
p4 <- PlotMultipleDayVolume(w4data, fullYear, month, startDays[4], endDays[4],
yrng=yrng, interestingDay=interestingDay);
if ( nWeeks > 4 ) {
w5data <- callVol[calls$Day>=startDays[5] & calls$Day<=endDays[5]];
if ( endDays[5] > nDays ) {
w5data <- c(w5data, rep(0, 288*(endDays[5]-nDays-1)));
}
p5 <- PlotMultipleDayVolume(w5data, fullYear, month, startDays[5], endDays[5],
yrng=yrng, interestingDay=interestingDay);
if ( nWeeks > 5 ) {
w6data <- callVol[calls$Day>=startDays[6] & calls$Day<=endDays[6]];
if ( endDays[6] > nDays ) {
w5data <- c(w6data, rep(0, 288*(endDays[6]-nDays-1)));
}
p6 <- PlotMultipleDayVolume(w6data, fullYear, month, startDays[6], endDays[6],
yrng=yrng, interestingDay=interestingDay);
multiplot(p1,p2,p3,p4,p5,p6);
}
else { multiplot(p1,p2,p3,p4,p5); }
}
else { multiplot(p1,p2,p3,p4); }
}
setwd("C:/Plots/NetworkResearch/Event/Summaries/Bombing-2011-12-06");
pdf( "Bombing-2011-12-06-NearestTowerMonthVolume.pdf", height=10, width=10 );
print(PlotMonthCallVolume( 790, 11, 12, interestingDay = 6 ));
dev.off();
PlotMonthCallVolume( 790, 11, 11, interestingDay = 7 )
|
6ebeaf4820dafe2c766666597420d849fd6370de
|
5ed42045b81d6070b79fbe7dcf16a5cd54a5add4
|
/tests/testthat/test_toZipCode.R
|
edffa9dd42b4883b6d7aca8c0dde632177fe6e12
|
[] |
no_license
|
atajti/data.cleaning.HU
|
f123d43745fecccce88323e4b267074aeb294963
|
f5b7cc567055d8260936719c0c213616ab194fc6
|
refs/heads/master
| 2021-01-24T03:07:10.520724
| 2018-11-17T20:10:37
| 2018-11-17T20:10:37
| 122,875,778
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,113
|
r
|
test_toZipCode.R
|
library(data.cleaning.HU)
context("toZipCode")
test_that("hosszok_egyeznek",{
expect_equal(length(toZipCode(character(0))),
0)
expect_equal(length(toZipCode("2011 Budakal\u00E1sz, Kinizsi utca 33.")),
1)
expect_equal(length(toZipCode(c("2011 Budakal\u00E1sz, Kinizsi utca 33.",
"2011 Budakal\u00E1sz, Kinizsi utca 33."))),
2)
})
test_that("NA-k a hely\u00FCk\u00F6n vannak", {
expect_equal(which(is.na(toZipCode(NA))),
1)
expect_equal(which(is.na(toZipCode(c("2011 Budakal\u00E1sz, Kinizsi utca 33.",
NA)))),
2)
expect_equal(which(is.na(toZipCode(c(NA,
"2011 Budakal\u00E1sz, Kinizsi utca 33.",
NA)))),
c(1, 3))
})
test_that("m\u0171k\u00F6dik a p\u00E9lda", {
expect_equal(toZipCode(c("8000 Si\u00F3fok, Ballag\u00F3 utca 14/a",
"8000 Siofk, Ballag\u00F3 utca 14/a 3. em 31.")),
c("8000", "8000"))
})
|
940ab0f1d7af9aa3c9d3c2fec1cda7e65d2c2169
|
e20533fc38ede71c197d20355315a0e41feb00d0
|
/man/predict.jtharm.Rd
|
41c557c62a4ecf57887e300512d9f01a49e8d827
|
[] |
no_license
|
cran/SemiSupervised
|
b533d5757b37a53ec4fc238ec1de3b5a9af32be5
|
1b0bf4d3ff6ef3b4d80e536ded4e8e69dc58750f
|
refs/heads/master
| 2020-03-16T22:01:04.233684
| 2018-05-11T08:59:25
| 2018-05-11T08:59:25
| 133,023,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,812
|
rd
|
predict.jtharm.Rd
|
\name{predict.jtharm}
\alias{predict.jtharm}
\alias{predict,jtharm-method}
\title{Out-of-Sample Predict Procedure for \code{jtharm}}
\description{
This implements the out-of-sample prediction for an
\sQuote{jtharm} object.
}
\usage{\S4method{predict}{jtharm}(object,xnew,gnew,type=c("vector","response","prob"),pow=1,\dots)}
\arguments{
\item{object}{ an existing \sQuote{jtharm} object.}
\item{xnew}{ an object of class \sQuote{data.frame}, \sQuote{vector}, or \sQuote{matrix}.
This is not always necessary and depends on call context (refer to details below).
}
\item{gnew}{ the \sQuote{matrix} of new graph links between the data to predict and
the data used for training. This is not always necessary and depends on call
context (refer to details below).}
\item{type}{ the type of prediction to return.}
\item{pow}{ tuning parameter for the weighted power in the
interpolation predictions.}
\item{\dots}{mop up additional arguments.}
}
\details{
The prediction inputs are dependent upon how one calls the original \code{\link{jtharm}} generic function.
The cases are discussed next:
1) y~.: This is the default and most common case. Set xnew to your new hold-out data
set and do not initialize gnew.
2) y~dG(G): The gnew argument will [most likely] be a non-symmetric
\sQuote{matrix} of adjacencies between some new set of observations and the original x data.
3) y~sG(G): The gnew argument will [most likely] be a non-symmetric \sQuote{matrix}
of similarity adjacencies [most likely] observed directly.
4) Non-formula call: gnew will have to provided in this case but xnew is ignored.
}
\value{
If \code{type}(object) is \sQuote{r}, a vector of predicted values is
returned. If \code{type}(object) is \sQuote{c}, the object returned depends
on the type argument.
}
\author{Mark Vere Culp}
\examples{
## Prediction depends on the nature of the call. Consider some examples.
library(mlbench)
data(Sonar)
n=dim(Sonar)[1]
p=dim(Sonar)[2]
nu=0.2
set.seed(100)
L=sort(sample(1:n,ceiling(nu*n)))
U=setdiff(1:n,L)
U1=sample(U,ceiling(0.5*n))
y.true<-Sonar$Class
Sonar$Class[U]=NA
## Typical, call to jtharm and predict
g.jtharm1<-jtharm(Class~.,data=Sonar[c(L,U1),])
p.jtharm1<-predict(g.jtharm1,xnew=Sonar[U,-p])
tab=table(y.true[U],p.jtharm1)
1-sum(diag(tab))/sum(tab)
\donttest{
## Predict the graph only case Debug later
Dij<-x.scaleL(Sonar[,-p],L)
Dij<-as.matrix(cosineDist(Dij))
Dij1<-Dij[c(L,U1),c(L,U1)]
attr(Dij1,"metric")=attr(Dij,"metric")
attr(Dij1,"distance.graph")=attr(Dij,"distance.graph")
g.jtharm2<-jtharm(Class~dG(Dij1),data=Sonar[c(L,U1),])
p.jtharm2<-predict(g.jtharm2,gnew=Dij[U,c(L,U1)])
tab=table(y.true[U],p.jtharm2)
1-sum(diag(tab))/sum(tab)
}
}
\keyword{classes}
\keyword{models}
\keyword{methods}
|
dc667aef1c611d5b8a3e28eb0c184a8d46555198
|
8c2004d5b4e567fc71539e19911a4b4d37504b9a
|
/Data-Preprocessing.R
|
88571784df996ee8dc3e1cdb04f7670fd310518e
|
[] |
no_license
|
Teerapon789/Gene-Co-Expression-Network-of-Whole-Cancer-Cell-Lines
|
72957be6c55cd489d078071613c8a8c6cfeb49f1
|
9ea0479ea3891e259bad53c8bc5c37492b5f8921
|
refs/heads/main
| 2023-01-29T12:22:55.859724
| 2020-12-09T15:26:51
| 2020-12-09T15:26:51
| 308,499,856
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,866
|
r
|
Data-Preprocessing.R
|
### title: "GeneNetworkAnalysis"
###author: "Teerapon Sahwangarrom"
source("/Users/teeraponsahwangarrom/Desktop/RNASeq/FunctionFiles/networkFunctions.R")
source("/Users/teeraponsahwangarrom/Desktop/RNASeq/FunctionFiles/preprocessing.R")
source("/Users/teeraponsahwangarrom/Desktop/RNASeq/FunctionFiles/outlierRemovalFunctions.R")
library(DESeq2)
data = read.table("/Users/teeraponsahwangarrom/Desktop/CCLE_RNAseq_081117.reads.txt.bz2", sep="\t", header = TRUE)
GeneAnnot0 = data[, 1:2]
counts = t(as.matrix(data[, -c(1:2)]))
colnames(counts) = GeneAnnot0$Name
type = sub("[^_]*_", "", rownames(counts))
table(type)
typeDF = data.frame(type = type)
type.bin = binarizeCategoricalColumns(typeDF, val1 = 0, val2 = 1, includePrefix = FALSE, minCount = 5, includePairwise = FALSE, includeLevelVsAll = TRUE, dropFirstLevelVsAll = FALSE, dropUninformative = TRUE, nameSep = "")
sampleAnnot = cbind(typeDF, type.bin)
ds = DESeqDataSetFromMatrix.PL(t(counts), sampleAnnot, design = ~1)
ds = estimateSizeFactors(ds)
counts.norm = t(counts(ds, normalized = TRUE))
zz = heColumns(counts, 8, 0.1)
x = log2(counts.norm[, zz] + 1)
var = colVars(x)
means = colMeans(x)
av = approximateVST(counts.norm[, zz])
prepr = preprocessGeneral( xdata = counts, addToAnalysisManager = FALSE, organism = "human", normalizationPipeline = "DESeq2", idsAreSymbols = FALSE, analysisName = "CancerRNASeq", pheno = sampleAnnot, sampleAnnotation = sampleAnnot, plotPheno = sampleAnnot, geneAnnotation = GeneAnnot0, minValue = 10, minProportion = 0.1, useApproximateVST = TRUE, vst.design = "~1", bw.groupBy = "type", bw.minSamplesPerGroup = 1, outlierRemovalZ = 8, ior.replace = FALSE, ior.remove = FALSE, saveXData.entrez = FALSE, saveDir.base = "/Users/teeraponsahwangarrom/Desktop/GeneNetworkAnalysis", stp.mainBase = "Cancer RNA Seq", fileNamePrefix = "CancerRNASeq", verbose = 4)
|
005d22265bfcb3fd5806014b018beeb8ff410201
|
24325e9e235f53546d026a97dd6e2c3e037c0bfc
|
/Modeling_Accuracy_with_Survey_data.R
|
6e8da0c71483d7636c7c5d94d4aa8e89e25b6650
|
[] |
no_license
|
Andy-Powell/SciCast_Andy
|
de51b47971b2ea02eb40f5db19fb8c1769bc68fa
|
db1740000f25ea41be7bc5c1a23f62d86daacb75
|
refs/heads/master
| 2021-01-16T18:06:15.410377
| 2015-04-01T22:21:46
| 2015-04-01T22:21:46
| 30,318,812
| 1
| 1
| null | 2015-03-25T19:00:09
| 2015-02-04T20:13:47
|
R
|
UTF-8
|
R
| false
| false
| 1,775
|
r
|
Modeling_Accuracy_with_Survey_data.R
|
pid <- rep(0,length(un))
for (i in 1:length(un)) {
if (length(pus[pus==un[i]])>0) {
pid[i] <- pip[pus==un[i]]
}
# else { pid[i] <- NA }
}
#pid <- pid[complete.cases(pid)==TRUE]
acis <- acdis <- acisr <- acdisr <- rep(-1,length(pid))
for (i in 1:length(pid)) {
if (length(aci[pio==pid[i]])==0) {
acis[i] <- acdis[i] <- acisr[i] <- acdisr[i] <- NA
}
else {
acis[i] <- aci[pio==pid[i]]; acdis[i] <- acdi[pio==pid[i]]
acisr[i] <- aci[pio==pid[i]]^(1/4); acdisr[i] <- acdi[pio==pid[i]]^(1/8) # Transformation of variables to achieve normality
}
}
# hist(acis); hist(acdis)
acdisr[acdis>2] <- NA; acdis[acdis>2] <- NA
#g <-lm(acisr ~ age+gndr+edu+see*crt+aott)
#summary(g)
#anova(g)
#g <-lm(acisr ~ age+aott+see)
#summary(g)
#anova(g)
# Only aot seems to help at all.
g <-lm(acisr ~ aott)
summary(g)
anova(g)
par(mfrow=c(3,1))
qqnorm(g$res)
plot(g$fitted,g$res,xlab="Fitted",ylab="Residuals",main="Log response")
plot(g$fitted,acisr[is.na(acisr)==F&is.na(aott)==F],xlab="Fitted",ylab="Residuals",main="Log response")
cor(aott,acisr,use="complete.obs")
plot(aott,acisr)
abline(g$coefficients[1],g$coefficients[2],lwd=2)
g <-lm(acis ~ aott)
plot(aott,acis)
abline(g$coefficients[1],g$coefficients[2],lwd=2)
#png("demoBS.png", width = 1728, height = 2160, pointsize = 10, res = 360)
#plot(crt[ge==1],BSid[ge==1],col="red",pch=16, xlab="Cognitive Reflection Score",ylab="Mean Change in Market Brier Score per Trade",ylim=c(-0.30,0.25))
#points(crt[ge==0],BSid[ge==0],pch=1)
#abline(g$coefficients[1],g$coefficients[3],lwd=2)
#abline(g$coefficients[1]+g$coefficients[2],g$coefficients[3]+g$coefficients[4],col="red",lwd=3,lty=3)
# legend(0.25,-0.15,c("No Graduate Degree","Graduate Degree"),lwd=c(2,3),lty=c(1,3),pch=c(1,16),col=c("black","red"))
#dev.off()
|
751be32103b0e6c3e610237a9da4bcd3fb439af8
|
2689f1610a38608c1bf69fd38a612d6e113b0673
|
/code/randomForest/randomForest.R
|
89cb7115c5c1bc3dff9687566de2554bd4e68d1c
|
[
"MIT"
] |
permissive
|
ctufts/PPFA
|
49acef5f643eb36106ed091afb17db9cf4e40105
|
48f5d837d9eecaaf4218ab97eb1790a196e2b0f1
|
refs/heads/master
| 2021-01-25T09:53:38.306185
| 2015-04-13T18:48:17
| 2015-04-13T18:48:17
| 32,410,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,731
|
r
|
randomForest.R
|
rm(list = ls())
library(caret)
library(gbm)
library(doMC)
# registerDoMC(2)
att.importance <- read.csv("Data/attributeSummary.csv")
# error function/metric
llfun <- function(actual, prediction) {
epsilon <- .000000000000001
yhat <- pmin(pmax(prediction, epsilon), 1-epsilon)
logloss <- -mean(actual*log(yhat)
+ (1-actual)*log(1 - yhat))
return(logloss)
}
train.x <- read.csv("Data/train_values.csv")
train.y <- read.csv("Data/train_labels.csv")
test.x <- read.csv("Data/test_values.csv")
submission <- read.csv("Data/SubmissionFormat.csv")
# impute numerical and ordinal data to zero
numeric_col <- grep("n_", names(train.x))
categorical_col <- grep("c_", names(train.x))
ordinal_col <- grep("o_", names(train.x))
# impute missing ordinal and numeric values
train.x[, numeric_col][is.na(train.x[,numeric_col])] <- 0
train.x[, ordinal_col][is.na(train.x[,ordinal_col])] <- 0
for( i in 1:length(categorical_col)){
test.x[,categorical_col[i]] <- factor(test.x[,categorical_col[i]],
levels = levels(train.x[,categorical_col[i]])
)
}
result.matrix <- matrix(0,nrow = nrow(test.x), ncol = ncol(submission))
train.results <- rep(0, (ncol(submission)-1))
n.trees <- 5000
for( i in 2:ncol(att.importance)){
print(i)
# identify valid features and create formula
q <- quantile(att.importance[,i])
valid.features <- as.character(att.importance$X[att.importance[,i] > 0])
f <- formula(paste(
names(train.y)[i], " ~ ",
paste(valid.features, collapse = " + ")
))
# split training data into training and test sets
train <- createDataPartition(factor(train.y[, i]), p = 0.75)
set.seed(998)
inTraining <- createDataPartition(train.y[,i], p = .75, list = F)
# fitControl <- trainControl(method = "repeatedcv",
# number = 10,
# repeats = 1)
train.ds <- cbind(factor(train.y[,i]), train.x[, valid.features])
names(train.ds)[1] <- names(train.y)[i]
print("train model")
#generate model
set.seed(825)
gbmFit <- randomForest(f, data = train.ds[inTraining, ],
n.tree = n.trees)
# create test set after model
probs <- predict(gbmFit, newdata = train.ds[-inTraining, ],
type = "prob")
error <- llfun(train.y[-inTraining,i], probs[,2])
train.results[(i-1)] <- error
test.probs <- predict(gbmFit, newdata = test.x, type = "prob")
result.matrix[,i] <- test.probs[,2]
}
result.matrix[,1] <- test.x$id
result.df <- as.data.frame(result.matrix)
names(result.df) <- names(submission)
# write.csv(result.df, "Data/results/gbm_5000trees.csv", quote = F,
# row.names = F)
|
67171a446b9f19d2707afb14786cec04da89c1cc
|
81354954be61599eae884b0fa9f6ce4bccdd06b0
|
/R/install_magma.R
|
2c2d2185a9b9ee33f772706d1f519d7da1210857
|
[] |
no_license
|
obada-alzoubi/MAGMA_Celltyping
|
272631794f31733c24c6384fd2f181a0972eca9c
|
f931877dedcde103723f14242d5242fdca7b3af6
|
refs/heads/master
| 2023-07-10T15:53:22.307531
| 2021-08-20T09:19:22
| 2021-08-20T09:19:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,708
|
r
|
install_magma.R
|
#' Install the MAGMA command line tool
#'
#' @source [MAGMA website](https://ctg.cncr.nl/software/magma)
#' @source [MAGMA.celltyping documentation](https://github.com/NathanSkene/MAGMA_Celltyping)
install_magma <- function(dest_dir="~"){
get_os <- function () {
OS=""
switch(Sys.info()[['sysname']],
Windows= {OS="Windows"},
Linux = {OS="Linux"},
Darwin = {OS="Mac"})
return(OS)
}
check_magma <- ""
try({
check_magma <- system("magma", intern=T)
})
if(check_magma[1]=="No arguments specified. Please consult manual for usage instructions."){
message("MAGMA already installed.")
}else {
message("MAGMA not installed. Downloading...")
magma_url <- switch(get_os(),
Mac="https://ctg.cncr.nl/software/MAGMA/prog/magma_v1.08a_mac.zip",
Linux="https://ctg.cncr.nl/software/MAGMA/prog/magma_v1.08a.zip",
NULL="https://ctg.cncr.nl/software/MAGMA/prog/magma_v1.08a.zip")
destfile <- file.path(dest_dir,basename(magma_url))
destpath <- gsub(".zip","",destfile)
download.file(magma_url,
destfile = destfile)
unzip(destfile, junkpaths = T,
exdir = gsub(".zip","",destfile),
overwrite = T)
file.remove(destfile)
message("MAGMA installation complete.")
message("MAGMA path:\n",destpath)
# Create a symlink to the actualy magma executable
R.utils::createLink(link="/usr/local/bin/magma",
target=file.path(destpath,"magma"))
}
}
|
b20f4143e684741615ff5405eeaf15bfc7ec63c5
|
8c72041186b9b7e233b13fcb677462b9b4d3dd70
|
/R/create_recurrence_heatmap.R
|
27a73c0d56e460ed41670de44519a1edc3f5e1ae
|
[] |
no_license
|
Bio-Core/BioCore.Annotation
|
920b3062b0a8d1fa5d15f0db8e699b704b16176f
|
859d12c35d8f5eb67779626a007bcbe272e077c0
|
refs/heads/master
| 2021-05-12T02:46:21.125052
| 2020-02-03T15:51:10
| 2020-02-03T15:51:10
| 117,597,906
| 0
| 0
| null | 2020-02-03T15:51:11
| 2018-01-15T21:36:07
|
R
|
UTF-8
|
R
| false
| false
| 494
|
r
|
create_recurrence_heatmap.R
|
create_recurrence_heatmap <- function(data=NULL) {
if (is.null(data)) stop("Mandatory argument data is missing")
trellis_object <- lattice::levelplot(
x = as.matrix(t(data)),
xlab=NULL,
ylab=NULL,
col.regions=colorRampPalette(c('white', 'steelblue')),
scales=list(
x = list(
at = seq(1, ncol(data), 1),
rot = 90,
cex = 0.5
),
y = list(
at=seq(1, nrow(data), 1),
cex = 0.5
)
)
)
return(trellis_object)
}
|
617dc43d53f76f76cb346c6223b7eb16229118da
|
f857705b2aa6f56e4c277c386effed7ee63acec7
|
/r/shiny.r
|
99cb971a6ecf9b59686d586644cecd9f6ae4ea40
|
[
"MIT"
] |
permissive
|
ballance/HackathonCLT2017
|
5fe36fb624a747836750e0c18e04b78b3dedaffa
|
e453afbf3288c6816dee321b4d0d6dacc8adda9f
|
refs/heads/master
| 2021-01-23T03:48:26.331675
| 2017-03-26T22:44:35
| 2017-03-26T22:44:35
| 86,126,104
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
r
|
shiny.r
|
library(shiny)
library(DBI)
ui <- fluidPage(
numericInput("nrows", "Enter the number of rows to display:", 5),
tableOutput("tbl")
)
server <- function(input, output, session) {
output$tbl <- renderTable({
conn <- dbConnect(
drv = RMySQL::MySQL(),
user = 'droptables',
password = 'g6P3QV6Y#^8j',
host = 'hk-data.chtlgfr8b1iu.us-east-1.rds.amazonaws.com',
dbname='hkdatadatadata')
on.exit(dbDisconnect(conn), add = TRUE)
dbGetQuery(conn, paste0(
"select * from match_details_new limit ", input$nrows, ";"))
})
}
shinyApp(ui, server)
|
fbb47809cfaba0e0c4a0e98437986ba7793e4ef5
|
cddaba3a09c2ec3e289998fad9572258fc746711
|
/inst/examples/loess-shiny/loess-shiny/server.R
|
b37b868815774cd5dfdd56c32896942f45492c0b
|
[] |
no_license
|
sjp/sjpMScThesis
|
0b8a51d50ccda0c40f6fffb4efae8172048b5259
|
cfe9418a2f0014466b7eb8aceb1dab00e359517b
|
refs/heads/master
| 2021-01-19T02:13:51.056403
| 2014-05-25T10:00:37
| 2014-05-25T10:00:37
| 10,752,889
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,413
|
r
|
server.R
|
suppressPackageStartupMessages({
library(grid)
library(XML)
library(shiny)
library(selectr)
library(ggplot2)
library(gridSVG)
})
# Create randomly generated data
xs <- seq(-10, 10, length.out = 100)
ys <- xs^2 + 15 * rnorm(100)
# Create a ggplot2 plot that will be treated as a constant
svgplot <- {
pdf(file = NULL)
print(qplot(xs, ys))
svgplot <- grid.export("")
dev.off()
gridSVGCoords(svgplot$coords)
gridSVGMappings(svgplot$mappings)
saveXML(svgplot$svg, file = NULL)
}
# Find the viewport that the data belongs in.
# This is useful because we want to insert our smoother within it.
# Also, we need to recreate this viewport so we need it's name
panelvp <- getSVGMappings("layout::panel.3-4-3-4", "vp")
shinyServer(function(input, output) {
spanPar <- reactive({ as.numeric(input$spanalpha) })
# Generate loess smoother lines based on the span parameter
loessLine <- reactive({
# Opening a null device with a new page
pdf(file = NULL)
grid.newpage()
# Create a new viewport that is located at the same
# position and has the same size as the original vp
newvp <- viewportCreate(panelvp, newname = "newvp")
# Original vp does not have scales, introduce them
newvp$xscale <- extendrange(xs)
newvp$yscale <- extendrange(ys)
pushViewport(newvp)
# Creating the smoother
ll <- suppressWarnings(loess(ys ~ xs, span = spanPar()))
# Creating a line based on the smoother
grid.lines(x = xs, y = fitted(ll), default.units = "native",
gp = gpar(col = "red"), name = "smoother")
# Creating SVG output and reducing to only the smoother line
tmp <- grid.export("")
dev.off()
gridSVGMappings(tmp$mappings)
smootherID <- getSVGMappings("smoother", "grob", "selector")
loesssvg <- querySelectorNS(tmp$svg, smootherID,
c(svg = "http://www.w3.org/2000/svg"))
# Export XML node to text
# Wrap in SVG for easy parsing with DOMParser
paste('<svg xmlns="http://www.w3.org/2000/svg">',
saveXML(loesssvg, file = NULL, indent = FALSE),
"</svg>", sep = "")
})
output$smoothempty <- renderText({ loessLine() })
output$svggrid <- renderText({ svgplot })
})
|
c3bea0ee2195c9918ee5ddc41e0ff9f0323ebf4e
|
59df2cca07a082ea005dfdea161322ce0ac4d3de
|
/paste.R
|
5a4e10ac582128bfc1b7049a7f3dbfef162b682e
|
[] |
no_license
|
vishwajeet993511/datasciencecoursera
|
fdeff752311ca63f1ad3d3df632268920dc4b512
|
7f0b8d1319953dad4abd699c8b50cb3f4f892938
|
refs/heads/master
| 2021-01-11T07:43:13.094360
| 2017-03-18T18:10:17
| 2017-03-18T18:10:17
| 68,915,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 95
|
r
|
paste.R
|
x <- read.csv("data.csv")
url <-paste("http://www.unitedstateszipcodes.org/",x[,1],"/",sep="")
|
0ae5e10f334afa4e668deb3fe752330a197339e2
|
4f59527ddd189517932e82cdf16e07e7a514cd45
|
/rdf/tp10/functions.R
|
cfe42c6d73f8f8ae59533c7fa385d026070a56af
|
[] |
no_license
|
HugoAlder/m1s2
|
b335270022bf27bff073cd65efd36fed6a3bea04
|
925238884594f785a3c67643ef1e7539be7e64ae
|
refs/heads/master
| 2021-05-11T23:32:47.656966
| 2018-04-16T15:09:42
| 2018-04-16T15:09:42
| 117,513,994
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,871
|
r
|
functions.R
|
library ("EBImage")
# Chargement d'une image en niveaux de gris
rdfReadGreyImage <- function (nom) {
image <- readImage (nom)
if (length (dim (image)) == 2) {
image
} else {
channel (image, 'red')
}
}
# Remplissage d'une matrice 40*33*400
stackedFaces <- function(allFaces) {
res = array(0, dim=c(40,33,400))
for (i in 0:19) {
for (j in 0:19) {
res[,,(i*20+j+1)] = allFaces[(1+j*33) : ((j+1)*33), (1+i*40) : ((i+1)*40)]
}
}
res
}
# ProbabilitΓ© qu'un pixel soit Γ 1 parmi les images de l'ensemble courant
probPerPixel <- function(stackedFaces, classCount) {
res = array(0, dim=c(40,33))
# Total des images de l'ensemble courant
total = nbImages(classCount)
for (i in 1:40) {
for (j in 1:33) {
sum = 0
for (k in 1:40) {
for (l in 1:10) {
id = (k-1) * 10 + l
if (classCount[k,l] == 1) {
sum = sum + stackedFaces[i,j,id]
}
}
}
res[i,j] = sum/total
}
}
res
}
# Obtention de l'entropie de chaque pixel de l'ensemble courant
getEntropy <- function(probPerPixel) {
p = probPerPixel
pcomp = 1 - probPerPixel
H = -(log(p^p) + log(pcomp^pcomp))
H
}
# Selection du meilleur pixel, celui qui a l'entropie la plus haute
bestPixel <- function(entropy) {
res = which(entropy == max(entropy), arr.ind = TRUE)
res[1,]
}
# DΓ©termination des images qui possΓ©dent le pixel reprΓ©sentatif Γ 1
getSelectedImages <- function(bestPixel, stackedFaces, classCount) {
bx = bestPixel[1]
by = bestPixel[2]
newClassCount = array(0, dim=c(40,10))
for (i in 1:40) {
stackedFacesI = array(0, dim=c(10))
for (j in 1:10) {
id = (i-1) * 10 + j
# On ne prend que les images qui sont dans l'ensemble courant
if (classCount[i,j] == 1)
stackedFacesI[j] = stackedFaces[bx,by,id]
}
newClassCount[i,] = stackedFacesI
}
newClassCount
}
# Calcul du nombre d'images d'un sous-ensemble
nbImages <- function(classCount) {
res = sum(classCount == 1)
res
}
# DΓ©termine si un sous-ensemble n'est constituΓ© que d'une classe
# Retourne l'id de la classe si il ne reste que celle-ci, -1 sinon
hasUniqueClass <- function(classCount) {
res = 0
for (i in 1:40) {
sum = 0
# Nombre d'images d'une classe
sum = sum(classCount[i,] == 1)
if (sum != 0) {
if (res == 0) {
res = i
lim = 1
} else {
res = -1
break
}
}
}
res
}
# Inverse de classCount en tenant compte de l'ancien sous-ensemble
reverseClassCount <- function(oldClassCount, classCount) {
res = array(0, dim=c(40,10))
for (i in 1:40) {
for (j in 1:10) {
if (classCount[i,j] == 1)
res[i,j] = 0
else if (oldClassCount[i,j] == 0)
res[i,j] = 0
else
res[i,j] = 1
}
}
res
}
# Construction de l'arbre par rΓ©cursion
getTree <- function(stackedFaces, classCount) {
# ProbabilitΓ© d'avoir un pixel Γ 1 pour chaque pixel de l'ensemble
prob = probPerPixel(stackedFaces, classCount)
# Calcul de l'entropie par pixel
entropy = getEntropy(prob)
# Pixel le plus reprΓ©sentatif
bestpixel = bestPixel(entropy)
# Images possΓ©dant ce pixel Γ 1
leftClassCount = getSelectedImages(bestpixel, stackedFaces, classCount)
# Images ne possΓ©dant pas ce pixel Γ 1
rightClassCount = reverseClassCount(classCount, leftClassCount)
# Noeud gauche
left = hasUniqueClass(leftClassCount)
if (left != -1) {
print(paste("Class ok from left: ", left))
} else {
print("New branch from left")
getTree(stackedFaces, leftClassCount)
}
# Noeud droit
right = hasUniqueClass(rightClassCount)
if (right != -1) {
print(paste("Class ok from right : ", right))
} else {
print("New branch from right")
getTree(stackedFaces, rightClassCount)
}
print("END")
}
|
0f32daa4660450afb0e60077e22653aa7ad64c0d
|
1679bf45175566aec338b46eb0c968879cae2343
|
/R/pca_plots.R
|
084356a42f40b44d5cdd6bc22a5bf0f9b37f2163
|
[] |
no_license
|
UW-GAC/QCpipeline
|
e485044355e74dae3de324fc5985f82b9b41cb2b
|
f60319122d0842852acaa410eb977344be2bf4dc
|
refs/heads/master
| 2021-07-10T00:17:13.832045
| 2020-05-27T00:01:50
| 2020-05-27T00:01:50
| 97,990,389
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,855
|
r
|
pca_plots.R
|
##########
# PCA plots
# Usage: R --args config.file pca.type < pca_plots.R
##########
library(GWASTools)
library(QCpipeline)
library(RColorBrewer)
library(MASS)
library(ggplot2)
library(GGally)
library(ggExtra)
sessionInfo()
# read configuration
args <- commandArgs(trailingOnly=TRUE)
if (length(args) < 1) stop("missing configuration file")
config <- readConfig(args[1])
# check for type
if (length(args) < 2) stop("missing pca type (study or combined)")
type <- args[2]
theme_set(theme_bw())
# check config and set defaults
if (type == "study") {
required <- c("annot_scan_file", "annot_scan_raceCol", "annot_snp_file")
optional <- c("annot_scan_ethnCol", "annot_snp_rsIDCol",
"num_evs_to_plot", "out_corr_file", "out_pca_file",
"out_corr_plot_prefix", "out_corr_pruned_plot_prefix",
"out_dens_plot", "out_ev12_plot", "out_pairs_plot", "out_scree_plot",
"out_parcoord_plot",
"parcoord_vars", "out_parcoord_var_prefix")
default <- c(NA, "rsID", 12, "pca_corr.RData", "pca.RData",
"pca_corr", NA, "pca_dens.pdf",
"pca_ev12.pdf", "pca_pairs.png", "pca_scree.pdf",
"pca_parcoord.png",
"", "pca_parcoord")
snpfile <- config["annot_snp_file"]
} else if (type == "combined"){
required <- c("annot_scan_file", "annot_scan_raceCol", "out_comb_prefix")
optional <- c("annot_scan_ethnCol", "annot_snp_rsIDCol", "ext_annot_scan_file",
"ext_annot_scan_raceCol",
"num_evs_to_plot", "out_corr_file", "out_pca_file",
"out_corr_plot_prefix", "out_corr_pruned_plot_prefix",
"out_dens_plot", "out_ev12_plot", "out_pairs_plot", "out_scree_plot",
"out_parcoord_plot",
"out_ev12_plot_hapmap", "out_ev12_plot_study",
"parcoord_vars", "out_parcoord_var_prefix")
default <- c(NA, "rsID", NA, "pop.group", 12, "pca_combined_corr.RData",
"pca_combined.RData", "pca_corr", NA, "pca_dens.pdf",
"pca_ev12.pdf", "pca_pairs.png", "pca_scree.pdf",
"pca_parcoord.png",
"pca_ev12_hapmap.pdf", "pca_ev12_study.pdf",
"", "pca_parcoord")
snpfile <- paste0(config["out_comb_prefix"], "_snpAnnot.RData")
}
config <- setConfigDefaults(config, required, optional, default)
print(config)
# functions for parallel coordinate plots later
.getN <- function(samp, var){
ntab <- table(samp[[var]])
n <- ntab[as.character(samp[[var]])]
n[is.na(n)] <- sum(is.na(samp[[var]]))
n
}
# transparency based on number of samples in a group
.getParcoordAlpha <- function(samp, var) {
n <- .getN(samp, var)
return(ifelse(n < 10, 1,
ifelse(n < 100, 0.5,
ifelse(n < 1000, 0.3, 0.1))) * 255)
}
# parallel coordinates plot variables
vars <- unlist(strsplit(config["parcoord_vars"], " "))
# scan annotation
if (type == "study") {
scanAnnot <- getobj(config["annot_scan_file"])
samp <- getVariable(scanAnnot, c("scanID", c(config["annot_scan_raceCol"], vars)))
names(samp) <- c("scanID", "race", vars)
if (!is.na(config["annot_scan_ethnCol"])) {
samp$ethnicity <- getVariable(scanAnnot, config["annot_scan_ethnCol"])
} else samp$ethnicity <- NA
} else if (type == "combined") {
scanAnnot <- getobj(config["annot_scan_file"])
scan1 <- getVariable(scanAnnot, c("scanID", config["annot_scan_raceCol"], config["annot_scan_hapmapCol"]))
names(scan1) <- c("scanID", "race", "geno.cntl")
if (!is.na(config["annot_scan_ethnCol"])) {
scan1$ethnicity <- getVariable(scanAnnot, config["annot_scan_ethnCol"])
} else scan1$ethnicity <- NA
if (sum(is.na(scan1$race)) > 0 & hasVariable(scanAnnot, config["ext_annot_scan_raceCol"])) {
scan1$race2 <- getVariable(scanAnnot, config["ext_annot_scan_raceCol"])
scan1$race[is.na(scan1$race)] <- scan1$race2[is.na(scan1$race)]
scan1$race2 <- NULL
}
ext.scanAnnot <- getobj(config["ext_annot_scan_file"])
scan2 <- getVariable(ext.scanAnnot, c("scanID", config["ext_annot_scan_raceCol"]))
names(scan2) <- c("scanID", "race")
scan2$geno.cntl <- 1
scan2$ethnicity <- NA
samp <- rbind(scan1, scan2)
} else {
stop("pca type must be study or combined")
}
# get PCA results
pca <- getobj(config["out_pca_file"])
samp <- samp[match(pca$sample.id, samp$scanID),]
stopifnot(allequal(pca$sample.id, samp$scanID))
table(samp$race, samp$ethnicity, useNA="ifany")
# why are we doing this? (sort capital letters before lower case)
Sys.setlocale("LC_COLLATE", "C")
# color by race
race <- as.character(sort(unique(samp$race)))
if (length(race) > 0) {
#stopifnot(all(race %in% names(config)))
cmapRace <- setNames(config[race], race)
chk <- which(is.na(cmapRace))
if (length(chk) > 0) {
message(sprintf("Using default colors for %s races: %s", length(chk), paste(names(cmapRace[chk]), collapse=", ")))
defaultColors <- c(brewer.pal(8, "Dark2"), brewer.pal(8, "Set2"))
cmapRace[chk] <- defaultColors[1:length(chk)]
}
colorScale <- scale_color_manual("race", values=cmapRace, breaks=names(cmapRace), na.value="grey")
} else {
colorScale <- scale_color_manual("race", values="black", breaks="hack", na.value="black")
}
rm(race)
# plot symbol by ethnicity
ethn <- as.character(sort(unique(samp$ethnicity)))
if (length(ethn) > 0){
stopifnot(all(ethn %in% names(config)))
symbolMap <- config[ethn]
mode(symbolMap) <- "integer"
symbolScale <- scale_shape_manual("ethnicity", values=symbolMap, breaks=names(symbolMap), na.value=16)
} else {
symbolScale <- scale_shape_manual("ethnicity", values=1, breaks="hack", na.value=16)
}
rm(ethn)
# labels
## recent change in SNPRelate - pca$eigenval only returns first 32 values
#(x <- pca$eigenval[1:4]/sum(pca$eigenval))
x <- pca$varprop[1:4]
lbls <- paste("EV", 1:4, " (", format(100*x,digits=2), "%)", sep="")
pcs <- pca$eigenvect
colnames(pcs) <- paste0("EV", 1:ncol(pcs))
pcs <- as.data.frame(pcs)
pcs$scanID <- pca$sample.id
dat <- merge(pcs, samp)
# order by number of samples in race group
dat$nrace <- table(dat$race, useNA="ifany")[dat$race]
dat$nrace[is.na(dat$nrace)] <- sum(is.na(dat$nrace))
dat <- dat[order(-dat$nrace),]
# plot the first four pcs
nev <- 4
pairs <- ggpairs(dat,
mapping=aes(color=race, shape=ethnicity),
columns=which(names(dat) %in% sprintf("EV%s", 1:nev)),
upper=list(continuous=wrap("points", alpha=0.7)),
lower=list(continuous=wrap("points", alpha=0.7)),
columnLabels=lbls[1:nev],
axisLabels="internal")
for (i in 1:pairs$nrow){
for (j in 1:pairs$ncol){
subplot <- getPlot(pairs, i, j)
subplot <- subplot + colorScale + symbolScale
pairs <- putPlot(pairs, subplot, i, j)
}
}
png(config["out_pairs_plot"], width=1000, height=1000)
print(pairs)
dev.off()
# plot EV1 vs EV2 with density plots
p <- ggplot(dat, aes(x=EV1, y=EV2, color=race, shape=ethnicity)) +
geom_point(alpha=0.7) +
colorScale +
symbolScale +
theme(legend.position="none") +
xlab(lbls[1]) + ylab(lbls[2])
pdf(config["out_dens_plot"], width=6, height=6)
ggMarginal(p, type="density")
dev.off()
# plot EV1 vs EV2
p <- p + theme(legend.position="right") +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(config["out_ev12_plot"], plot=p, width=6, height=6)
ggParcoordTheme <- theme(axis.title.x=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
axis.title.y=element_blank(),
axis.text.x=element_text(colour="black"),
panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank(),
legend.position="top")
# parallel coordinates plot
ev.ind <- which(names(dat) %in% sprintf("EV%s", 1:12))
p <- ggparcoord(dat, columns=ev.ind, groupColumn="race", scale="uniminmax", alphaLines=0.5) +
colorScale + ggParcoordTheme +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(config["out_parcoord_plot"], plot=p, width=10, height=5)
## other variables for parallel coordinate, specified by user
if (type == "study" & length(vars) > 0){
for (var in vars){
stopifnot(var %in% names(samp))
# auto filename
fname <- paste(config["out_parcoord_var_prefix"], "_", var, ".png", sep="")
dat[["fvar"]] <- as.factor(dat[[var]])
p <- ggparcoord(dat, columns=ev.ind, groupColumn="fvar", scale="uniminmax", alphaLines=0.5) +
scale_color_brewer(var, palette="Set1", na.value="grey") + ggParcoordTheme +
guides(colour = guide_legend(override.aes = list(alpha=1)))
ggsave(fname, plot=p, width=10, height=5)
}
}
if (type == "combined"){
xlim <- range(dat$EV1)
ylim <- range(dat$EV2)
# hapmap plot
dat$plotcol <- dat$race
dat$plotcol[dat$geno.cntl %in% 0] <- NA
p <- ggplot(dat, aes(x=EV1, y=EV2, color=plotcol, shape=ethnicity)) +
geom_point() +
colorScale +
symbolScale +
xlab(lbls[1]) + ylab(lbls[2]) +
xlim(xlim)
ggsave(config["out_ev12_plot_hapmap"], plot=p, width=6, height=6)
p <- ggplot(dat[dat$geno.cntl %in% 0, ], aes(x=EV1, y=EV2, color=race, shape=ethnicity)) +
geom_point() +
colorScale +
symbolScale +
xlab(lbls[1]) + ylab(lbls[2]) +
xlim(xlim)
ggsave(config["out_ev12_plot_study"], plot=p, width=6, height=6)
}
#plot SNP-PC correlation
snpAnnot <- getobj(snpfile)
corr <- getobj(config["out_corr_file"])
snp <- snpAnnot[match(corr$snp.id, getSnpID(snpAnnot)),]
chrom <- getChromosome(snp, char=TRUE)
nev <- as.integer(config["num_evs_to_plot"])
png(paste(config["out_corr_plot_prefix"], "_%03d.png", sep=""), height=720, width=720)
par(mfrow=c(4,1), mar=c(5,5,4,2)+0.1, lwd=1.5, cex.lab=1.5, cex.main=1.5)
for(i in 1:nev){
snpCorrelationPlot(abs(corr$snpcorr[i,]), chrom,
main=paste("Eigenvector",i), ylim=c(0,1))
}
dev.off()
if (!is.na(config["out_corr_pruned_plot_prefix"])) {
snps.pruned <- getobj(config["out_pruned_file"])
ind <- getSnpID(snp) %in% snps.pruned
png(paste(config["out_corr_pruned_plot_prefix"], "_%03d.png", sep=""), height=720, width=720)
par(mfrow=c(4,1), mar=c(5,5,4,2)+0.1, lwd=1.5, cex.lab=1.5, cex.main=1.5)
for(i in 1:nev){
snpCorrelationPlot(abs(corr$snpcorr[i,ind]), chrom[ind],
main=paste("Eigenvector",i), ylim=c(0,1))
}
dev.off()
}
# scree plot
dat <- data.frame(ev=1:nev, varprop=pca$varprop[1:nev])
p <- ggplot(dat, aes(x=factor(ev), y=100*varprop)) +
geom_point() +
xlab("Eigenvector") + ylab("Percent of variance accounted for")
ggsave(config["out_scree_plot"], plot=p, width=6, height=6)
|
054f0b1374a7fdf07a0caf3b391ba839f53e3f28
|
c9bb58667be902f360232477246c3b218fe6eefa
|
/rpfister-quick-tree-classification-115ff25eaed8/script.R
|
6747e6a6f5d7129810030627b56513cf791ba2e4
|
[] |
no_license
|
JoyOfTech/Playtime_Projects
|
0cb3f49e30f90ca706a9d19872f37eecda4ae320
|
9ca4240c39e6a77bf7dd22c73dea87af2ceb11f5
|
refs/heads/master
| 2021-01-17T07:03:20.071032
| 2016-04-02T18:44:36
| 2016-04-02T18:44:36
| 24,542,640
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,372
|
r
|
script.R
|
#EDIT THIS TO YOUR WORKING DIRECTORY
setwd("C:/source/titanic/quick-tree-classification")
#import dataset - EDIT THIS FILENAME
data <- read.csv("train.csv")
#split the last 400 rows off into their own dataset;
#set the training data to the remaining rows
numberOfTrainingDataRows <- 400
train <- head(data, n = numberOfTrainingDataRows)
test <- tail(data, n = nrow(data) - numberOfTrainingDataRows)
#import tree classification library
library(rpart)
#Classification tree equation: EDIT THIS EQUATION
# The first variable is the taget to classify for; the others are parameters
# to consider for use in the tree.
fit <- rpart(
Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, method="class",
data=train)
#Plot the classification tree - you can show this to your business users
# or programmers to implement
library(rpart.plot)
prp(fit, faclen=0)
#Add predictions to the test data
test$Prediction <- predict(fit, test, type="class")
#Compare the prediction against the true value - CHANGE "Survived"
# to whatever your target variable is named
test$RightOrWrong <- test$Prediction == test$Survived
#Baseline if we just marked everything as false - CHANGE "Survived"
prop.table(table(test$Survived))
table(test$RightOrWrong) #How many we got right vs. wrong
prop.table(table(test$RightOrWrong)) #Proportion we got right vs. wrong
|
a1f9cd226658cd2f8ace628adfce865706d335b1
|
fdca452f475e69d59291668e764dfeef7e9063ca
|
/run_analysis.R
|
be9df9efb6df4246aa747542b4a9085dbdb8f5cc
|
[] |
no_license
|
cmcsjc/GCD
|
2e093b2d56005ab558a0ffd3de7bbfbea95572d8
|
4485d40b6b499ac1352cb881e8d52bd2b50df29d
|
refs/heads/master
| 2016-08-11T16:13:59.929157
| 2016-01-28T18:28:39
| 2016-01-28T18:28:39
| 50,601,026
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,193
|
r
|
run_analysis.R
|
## Check if the package "reshape2" is installed, if not do so..
if (!("reshape2" %in% rownames(installed.packages())) ) {
installed.packages("reshape2")
} else {
## If the package "reshape2" is installed...
library(reshape2)
## Read all data files
activity_labels <- read.table("./activity_labels.txt",col.names=c("activity_id","activity_name"))
features <- read.table("features.txt")
testX <- read.table("./test/X_test.txt")
testY <- read.table("./test/y_test.txt")
testSub <- read.table("./test/subject_test.txt")
trainX <- read.table("./train/X_train.txt")
trainY <- read.table("./train/y_train.txt")
trainSub <- read.table("./train/subject_train.txt")
feature_names <- features[,2] # Extract only feature names
colnames(testX) <- feature_names
colnames(testSub) <- "subject_id"
colnames(testY) <- "activity_id"
colnames(trainX) <- feature_names
colnames(trainSub) <- "subject_id"
colnames(trainY) <- "activity_id"
##Combine the test subject id's, the test activity id's
test_data <- cbind(testSub, testY, testX)
##Combine the test subject id's, the test activity id's
train_data <- cbind(trainSub, trainY, trainX)
##Combine the test data and the train data into one dataframe
all_data <- rbind(train_data,test_data)
mean_idx <- grep("mean",names(all_data),ignore.case=TRUE)
mean_names <- names(all_data)[mean_idx]
std_idx <- grep("std",names(all_data),ignore.case=TRUE)
std_names <- names(all_data)[std_idx]
full_data <-all_data[,c("subject_id","activity_id",mean_names,std_names)]
##Merge the activities datase with the mean/std values datase
descr_data <- merge(activity_labels,full_data,by.x="activity_id",by.y="activity_id",all=TRUE)
##Melt everything according to each variable (activity_id, activity_name, and subject_id)
melt_data <- melt(descr_data,id=c("activity_id","activity_name","subject_id"))
##Cast the melted dataset according to the average of each variable
mean_data <- dcast(melt_data,activity_id + activity_name + subject_id ~ variable,mean)
## Create a file with the new tidy dataset
write.table(mean_data,"./tidy_data.txt")
}
|
f1d13ba3660b4666a035d94a3694b85ff34736af
|
c6fe5eba437a3887edd5547b0907cc3ea73ccd51
|
/Plot3.R
|
c611e896eb122218ba577d14a7f6e721d8378c24
|
[] |
no_license
|
heikehv/datasciencecoursera
|
80ae4579033ed8e0437adface02247acab96c59e
|
5c733c5dc6b924c8435f5ebe6ddd5a4dd1796425
|
refs/heads/master
| 2021-01-11T01:24:58.868863
| 2017-07-02T09:15:23
| 2017-07-02T09:15:23
| 70,709,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,217
|
r
|
Plot3.R
|
# Presuming the file has been downloaded and saved in the working directory - using file.info() to check the size.
file.info("household_power_consumption.txt")
# Reading file into R, setting header to TRUE and separator sep to ;
HouseholdPower <- read.table("household_power_consumption.txt", header = TRUE, sep= ";")
# check classes of columns ...
lapply(HouseholdPower, class)
# ... and convert Date (factor) into Date (Date)
HouseholdPower$Date <- as.Date(HouseholdPower$Date, format="%d/%m/%Y")
# Filter data for the given timeframe
FebruaryPower <- HouseholdPower[HouseholdPower$Date == "2007-02-01" | HouseholdPower$Date == "2007-02-02",]
# change other variables from factors to numerics
FebruaryPower$Global_active_power <- as.numeric(as.character(FebruaryPower$Global_active_power))
FebruaryPower$Global_reactive_power <- as.numeric(as.character(FebruaryPower$Global_reactive_power))
FebruaryPower$Voltage <- as.numeric(as.character(FebruaryPower$Voltage))
FebruaryPower$Sub_metering_1 <- as.numeric(as.character(FebruaryPower$Sub_metering_1))
FebruaryPower$Sub_metering_2 <- as.numeric(as.character(FebruaryPower$Sub_metering_2))
FebruaryPower$Sub_metering_3 <- as.numeric(as.character(FebruaryPower$Sub_metering_3))
# combine Date & Time into new column to facilitate linegraph plotting
FebruaryPower <- transform(FebruaryPower, DateTime=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
# check classes again
lapply(FebruaryPower, class)
# create third plot - linegraph for all sub-meters
# First, create line for sub-metering 1, default in black and set axis labels (x axis by default, y axis = Energy sub metering)
plot(FebruaryPower$DateTime, FebruaryPower$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
# add second and third line
lines(FebruaryPower$DateTime, FebruaryPower$Sub_metering_2,col="red")
lines(FebruaryPower$DateTime, FebruaryPower$Sub_metering_3,col="blue")
# add a legend in the top-right corner
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
# copy plot to png with specific width & height
dev.copy(png, file="plot3.png", width=480, height=480)
# turn graphic device off
dev.off()
|
7f82b27cb48365464cc9ceef883796ade21d3502
|
7d17e7958149ae4d9850863d3ff78f069c626382
|
/.Rprofile
|
31264fac5051eab60056aba1900613acb10054c9
|
[] |
no_license
|
raffdoc/dotfiles-1
|
53ab46626a76e899d4e7e939f105674d30f83015
|
902fbeadc026eb2a37e7567ea50a7146515fc612
|
refs/heads/master
| 2021-01-17T05:26:59.504807
| 2012-11-16T17:23:26
| 2012-11-16T17:23:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 406
|
rprofile
|
.Rprofile
|
.First <- function() {
options("device" = "quartz")
options("editor" = "mate -w")
options("repos" = c(CRAN = "http://cran.r-project.org/"))
Sys.setenv("PATH" = paste(Sys.getenv("PATH"),"/usr/texbin",sep=":"))
}
.Last <- function () {
if (!any(commandArgs() == '--no-readline') && interactive()) {
require(utils)
try(savehistory(Sys.getenv("R_HISTFILE")))
}
}
|
bf4c3a6778d383e60f018f05a4d334c86272aa89
|
d0e22df57097ebe1431aba9a2f5ff89ad1d552a0
|
/Assignment 1 Resubmit Materials/map_template.R
|
8f1a3598ed5a24680f0564f9d5e75997260499db
|
[] |
no_license
|
ellenrjacobs/BIO-442
|
a0b82dbda2c09b89b79f1df2c0d844ecf457a8aa
|
7b520ef843d48a07a5406e19fdde742c872f71f5
|
refs/heads/main
| 2023-01-01T03:34:20.667245
| 2020-10-24T16:20:24
| 2020-10-24T16:20:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 617
|
r
|
map_template.R
|
##Assignment 1 Map Plotting Template
library(sp)
library(rgdal)
clean_data <- read.csv("my_clean_data.csv")
plotting_data <- SpatialPoints(clean_data[, c("Longitude", "Latitude")])
#Map of DC neighborhoods from maps2.dcgis.dc.gov
dc <- readOGR("Neighborhood_Clusters-shp", "Neighborhood_Clusters")
#Plot the map of DC
par(mar = c(1, 1, 1, 1))
plot(
dc,
col = "darkgrey",
border = "white",
main = "District of Columbia Bird Sightings"
)
plot(dc[46, ],
add = TRUE,
col = "#718BAE80",
border = "white")
#Add your data
plot(plotting_data,
add = TRUE,
pch = 16,
cex = 0.25)
|
a4bc5b09bb047014283b74dc17cf6840a003ab64
|
0b37c7d2d4cdafe6ee3ed9773271d1978282ebc1
|
/src/grafica_iris.R
|
c6329c96edf142da60d7310a5bd5dac40701aa89
|
[] |
no_license
|
davidmacer/aprende_ggplot2
|
e26a336ceae9bd8d0ddaf62bac3a696f1857b761
|
f6b102f5c9ddf197759f86038c17ab2ba6e9d7fb
|
refs/heads/main
| 2023-04-08T07:58:35.319402
| 2021-04-20T22:14:03
| 2021-04-20T22:14:03
| 359,964,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 683
|
r
|
grafica_iris.R
|
## Carga librerΓas
library(ggplot2)
## Carga datos
data(iris)
## Prepocesamiento / Limpieza de datos
## AnΓ‘lisis exploratorio de datos
## CreaciΓ³n de resultados
p <- ggplot(data = iris, aes(x=Sepal.Length, y=Sepal.Width))
p + geom_point(aes(colour=Species)) +
scale_colour_brewer(palette = "Set1") +
labs(title = "GrΓ‘fica de Iris",
x = "Longitud del pΓ©talo",
y = "Ancho del pΓ©talo"
) +
theme(
plot.title = element_text(colour = "green", size = 20),
axis.title.y = element_text(colour = "yellow", size = 16),
axis.text = element_text(colour = "blue", size = rel(1)),
axis.title.x = element_text(colour = "yellow", size = 16)
)
|
6c5d1f96d930c14c414786e34d1176e3ef246a8d
|
2a7655dc0c233967a41b99369eed3eb4a6be3371
|
/2-Get_PM25_Observations/process_PM25_UDEQ_data_source_functions.R
|
23afddc66b01741f197127dbaf06c64c7dfaa790
|
[
"MIT"
] |
permissive
|
earthlab/Western_states_daily_PM2.5
|
0977b40d883842d7114139ef041e13a63e1f9210
|
3f5121cee6659f5f5a5c14b0d3baec7bf454d4bb
|
refs/heads/master
| 2023-02-25T14:32:20.755570
| 2021-02-04T00:08:03
| 2021-02-04T00:08:03
| 117,896,754
| 2
| 1
| null | 2021-01-27T22:19:14
| 2018-01-17T21:48:29
|
R
|
UTF-8
|
R
| false
| false
| 25,122
|
r
|
process_PM25_UDEQ_data_source_functions.R
|
#process_PM25_UDEQ_data_source_functions.R
process_PM25_UDEQ_data_source.fn <- function(input_header, data_set_counter, this_plotting_color) {
# combine UDEQ PM2.5 data files into 1 dataframe
data_source_counter <- data_set_counter#data_source_counter + 1 # counter to distinguish between the various data sources
Data_Source_Name_Short <- "UtahDEQ"
Data_Source_Name_Display <- "Utah DEQ"
Datum_used = "NAD27" # see email from Ellen on May 29, 2018
UTDEQ_units <- "UG/M3"
# Create Sink output file and create its header
file_sub_label <- paste("PM25_",Data_Source_Name_Short,"_Step1_part_",processed_data_version,sep = "")
SinkFileName=file.path(define_file_paths.fn("ProcessedData.directory"),sub_folder,paste(file_sub_label,"_combining_sink.txt",sep = ""))
sink(file =SinkFileName, append = FALSE, type = c("output","message"), split = FALSE) # divert output from console to sink file
cat("Code and R output for process_PM25_UDEQ_data_source_function.R \n \n")
cat("Title: process_PM25_UDEQ_data_source_function.R \n")
cat("Author: Melissa May Maestas, PhD \n")
cat("Original Date: October 14, 2018 \n")
cat("Latest Update: September 2, 2019 \n")
cat(paste("Script ran and this text file created ",Sys.time()," \n",sep = ""))
cat("This program reads in and PM2.5 data from the UDEQ. \n")
# #### Pull in Utah DEQ PM2.5 data ####
# UTDEQ_units <- "UG/M3"
# # site location - see documentation for sources of this info
# UT_site_loc <- data.frame(matrix(NA,nrow=3,ncol=14)) # create data frame
# names(UT_site_loc) <- c("EPACode","Latitude","Longitude","StateCode","CountyCode","SiteNum","POC","County_Name","Parameter_Code","Parameter_Name","Sample_Duration","Address","City_Name","State_Abbrev")
# UT_site_loc[1,1:3] <- c(490490002,40.253611,-111.663056) # see documentation for source of lat/lon for this site
# UT_site_loc[1,c("POC","Parameter_Name","County_Name","Parameter_Code","Sample_Duration","Address","City_Name","State_Abbrev")] <- c(NA,"PM2.5 - Local Conditions","Utah",NA,"1 HOUR","1355 NORTH 200 WEST PROVO UT","Provo","UT")
#
#
# UT_site_loc[2,1:3] <- c(490530007,37.179125,-113.305096)
# UT_site_loc[2,c("POC","Parameter_Name","County_Name","Parameter_Code","Sample_Duration","Address","City_Name","State_Abbrev")] <- c(NA,"PM2.5 - Local Conditions","Washington",NA,"1 HOUR","147 N 870 W, Hurrricane, Utah","Hurricane","UT")
#
#
# UT_site_loc[3,1:3] <- c(490130002,40.2941780318,-110.00973229)
# UT_site_loc[3,c("POC","Parameter_Name","County_Name","Parameter_Code","Sample_Duration","Address","City_Name","State_Abbrev")] <- c(NA,"PM2.5 - Local Conditions","Duchesne",NA,"1 HOUR","290 S. 1000 W.","Roosevelt","UT")
#
# Datum_used = "NAD27" # see email from Ellen on May 29, 2018
#
# # fill in State Code, County Code, and Site Num
# for (this_row in 1:dim(UT_site_loc)[1]) { # cycle through each row in UT_site_loc data to determine state code, county code, and site num
# this_EPACode <- as.character((UT_site_loc[this_row,c("EPACode")])) # isolate the EPA code for this row of data
# if (is.na(this_EPACode)==TRUE) {
# UT_site_loc[this_row,c("StateCode")] <- NA
# UT_site_loc[this_row,c("CountyCode")] <- NA
# UT_site_loc[this_row,c("SiteNum")] <- NA
# } else if (nchar(this_EPACode)==8) { # determine how many characters are in EPACode (leading zeros are not in the data)
# UT_site_loc[this_row,c("StateCode")] <- substr(this_EPACode,1,1) # isolate state code
# UT_site_loc[this_row,c("CountyCode")] <- substr(this_EPACode,2,4) # isolate county code
# UT_site_loc[this_row,c("SiteNum")] <- substr(this_EPACode,5,8) # isolate site num
# } else if (nchar(this_EPACode)==9) {
# UT_site_loc[this_row,c("StateCode")] <- substr(this_EPACode,1,2) # isolate state code
# UT_site_loc[this_row,c("CountyCode")] <- substr(this_EPACode,3,5) # isolate county code
# UT_site_loc[this_row,c("SiteNum")] <- substr(this_EPACode,6,9) # isolate site num
# } else {# if (nchar(this_EPACode)==8) { # unexpected input
# stop("check data/code")
# } # for (this_row in 1:dim(UT_site_loc)[1]) { # cycle through each row in UT_site_loc data to determine state code, county code, and site num
# rm(this_EPACode)
# } # for (this_row in row_start:row_stop) { # cycle through each row in UT_site_loc data to determine state code, county code, and site num and put into input_mat1
# rm(this_row)
#
# this_source_file <- 'Utah_state-only_data.csv'
# print(this_source_file)
#
# UTDEQ_data_step<-read.csv(file.path(define_file_paths.fn("UTDEQ.directory"),this_source_file),header=TRUE,skip = 1) # load the UT DEQ file
# which_pos <- which(UTDEQ_data_step$UG.M3 >=0)
# N_neg <- dim(UTDEQ_data_step)[1] - length(which_pos)
# print(paste(N_neg," hourly observations with negative PM2.5 concentrations are removed from UTDEQ data prior to calculating daily values. NAs are also removed. Source file:",this_source_file))
# UTDEQ_data <- UTDEQ_data_step[which_pos, ] # data frame without any negative concentrations
# rm(UTDEQ_data_step,which_pos,N_neg)
# which_neg <- which(UTDEQ_data$UG.M3 < 0)
# if (length(which_neg) > 0) {
# stop("negative values should have all been removed by this point in the code, process_PM25_UDEQ_data_source.fn, line 74")
# }
# rm(which_neg)
#
# # create and fill in data frame for 24-hr data (originally hourly data)
# date_station <- data.frame(matrix(NA,nrow = dim(UTDEQ_data)[1], ncol = 2)) # create empty matrix
# all_date_times <- as.Date(UTDEQ_data$Date,"%m/%d/%Y") # get dates in UT DEQ data
# date_station[,1] <- all_date_times # fill in dates (with repeats) into date_station
# date_station[,2] <- UTDEQ_data$Station # fill in station names into date_station
# rm(all_date_times) # clear variables
#
# unique_date_station <- date_station[!duplicated(date_station[,c(1,2)]),] # figure out how many unique station-days are in the DEQ data
# rm(date_station)
#
# UTDEQ_data$X <- as.Date(UTDEQ_data$Date,format = "%m/%d/%Y") # fill in dates (without times) into an empty column in UTDEQ_data
#
# UTDEQ_24hr_ave <- data.frame(matrix(NA,nrow = dim(unique_date_station)[1],ncol = 20)) # create data frame
# names(UTDEQ_24hr_ave) <- c("Date","Station","PM25Conc","EPACode","Latitude","Longitude","StateCode","CountyCode","SiteNum","N_Obs","PercentObs","N_neg","POC","County_Name","Parameter_Code","Parameter_Name","Sample_Duration","Address","City_Name","State_Abbrev") # assign the header
#
# UTDEQ_24hr_ave$Date <- unique_date_station[,1] # Date
# UTDEQ_24hr_ave$Station <- unique_date_station[,2] # Station
# rm(unique_date_station)
# # fill in 24hr averages in UTDEQ_24hr_ave
# for (this_row in 1:dim(UTDEQ_24hr_ave)[1]) { # fill in 24hr averages in UTDEQ_24hr_ave
# # get Date for this row
# this_date <- UTDEQ_24hr_ave[this_row,c("Date")]
# #get Station for this row of data
# this_station <- UTDEQ_24hr_ave[this_row,c("Station")]
#
# # figure out which rows in UTDEQ_data correspond to this date and station
# which_this_date_station <- which(UTDEQ_data$X==this_date & UTDEQ_data$Station==this_station)
# rm(this_date,this_station)
# if (length(which_this_date_station)>24) {stop("too many rows of data picked up, check code and data")} # check on data/code
#
# #PM25Conc
# these_PM25 <- UTDEQ_data[which_this_date_station,c("UG.M3")] # isolate PM2.5 data from this date/location
# which_negative <- which(these_PM25<0)
# if (length(which_negative) > 0) {
# stop("negative values should have been removed by this point in the code, process_PM25_UDEQ_data_source.fn")
# }
# which_not_NA <- which(!is.na(these_PM25))
# UTDEQ_24hr_ave[this_row,c("PM25Conc")] <- mean(these_PM25[which_not_NA])
# UTDEQ_24hr_ave[this_row,c("N_Obs")] <- length(which_not_NA) #length(which_positive)+length(which_negative)
# UTDEQ_24hr_ave[this_row,c("PercentObs")] <- length(which_not_NA)/24*100 #length(which_positive)+length(which_negative)
# UTDEQ_24hr_ave[this_row,c("N_neg")] <- length(which_negative)
# rm(which_negative,which_not_NA,these_PM25)
#
# #Location: "StateCode"
# this_EPACode <- unique(UTDEQ_data[which_this_date_station,c("EPA.code")])
# UTDEQ_24hr_ave[this_row,c("EPACode")] <- this_EPACode
# this_state_code <- UT_site_loc[which(UT_site_loc$EPACode==this_EPACode),c("StateCode")]
# UTDEQ_24hr_ave[this_row,c("StateCode")] <- this_state_code
#
# which_UT_site_loc <- which(UT_site_loc$EPACode==this_EPACode)
# UTDEQ_24hr_ave[this_row,c("CountyCode")] <- UT_site_loc[which_UT_site_loc,c("CountyCode")]
# UTDEQ_24hr_ave[this_row,c("SiteNum")] <- UT_site_loc[which_UT_site_loc,c("SiteNum")]
# UTDEQ_24hr_ave[this_row,c("Latitude")] <- UT_site_loc[which_UT_site_loc,c("Latitude")]
# UTDEQ_24hr_ave[this_row,c("Longitude")] <- UT_site_loc[which_UT_site_loc,c("Longitude")]
#
# UTDEQ_24hr_ave[this_row,c("POC")] <- UT_site_loc[which_UT_site_loc,c("POC")]
# UTDEQ_24hr_ave[this_row,c("County_Name")] <- UT_site_loc[which_UT_site_loc,c("County_Name")]
# UTDEQ_24hr_ave[this_row,c("Parameter_Code")] <- UT_site_loc[which_UT_site_loc,c("Parameter_Code")]
# UTDEQ_24hr_ave[this_row,c("Parameter_Name")] <- UT_site_loc[which_UT_site_loc,c("Parameter_Name")]
# UTDEQ_24hr_ave[this_row,c("Sample_Duration")] <- UT_site_loc[which_UT_site_loc,c("Sample_Duration")]
# UTDEQ_24hr_ave[this_row,c("Address")] <- UT_site_loc[which_UT_site_loc,c("Address")]
# UTDEQ_24hr_ave[this_row,c("City_Name")] <- UT_site_loc[which_UT_site_loc,c("City_Name")]
# UTDEQ_24hr_ave[this_row,c("State_Abbrev")] <- UT_site_loc[which_UT_site_loc,c("State_Abbrev")]
#
# rm(which_this_date_station,which_UT_site_loc)
# rm(this_EPACode,this_state_code)
# } # for (this_row in 1:dim(UTDEQ_24hr_ave)[1]) { # fill in 24hr averages in UTDEQ_24hr_ave
# rm(UT_site_loc,this_row)
# which_neg <- which(UTDEQ_24hr_ave$PM25Conc < 0)
# print(paste(length(which_neg)," rows of data have negative concentration after processing ",this_source_file))
# if (length(which_neg) > 0 | max(UTDEQ_24hr_ave$N_neg > 0)) {
# stop("negative concentrations should have been removed by this point in the script,process_PM25_UDEQ_data_source.fn, line 149")
# }
# UTDEQ_24hr_ave$SourceFile <- this_source_file
#
# incorporate more recent UT DEQ files, which are already daily values
UDEQ_header_start <- c("Date","Station","PM25Conc","EPACode","Latitude","Longitude","StateCode","CountyCode","SiteNum","N_Obs","PercentObs","N_neg","POC","County_Name","Parameter_Code","Parameter_Name","Sample_Duration","Address","City_Name","State_Abbrev") # assign the header
recent_source_files <- c("UT-PM2.5-2008.csv","UT-PM2.5-2009.csv","UT-PM2.5-2010.csv","UT-PM2.5-2011.csv","UT-PM2.5-2012.csv","UT-PM2.5-2013.csv","UT-PM2.5-2014.csv","UT-PM2.5-2015.csv","UT-PM2.5-2016.csv","UT-PM2.5-2017.csv","UT-PM2.5-2018.csv")
#full_UTDEQ_data <- merge_recent_UTDEQ_files.fn(recent_source_files = recent_source_files, UTDEQ_data_in = UTDEQ_24hr_ave, UTDEQ.directory = define_file_paths.fn("UTDEQ.directory"))
UTDEQ_24hr_ave <- merge_recent_UTDEQ_files.fn(recent_source_files = recent_source_files, UDEQ_header_start = UDEQ_header_start, UTDEQ.directory = define_file_paths.fn("UTDEQ.directory"))
# UTDEQ_data_in = UTDEQ_24hr_ave,
#rm(UTDEQ_24hr_ave)
#UTDEQ_24hr_ave <- full_UTDEQ_data
#rm(full_UTDEQ_data)
which_neg <- which(UTDEQ_24hr_ave$PM25Conc < 0)
if (length(which_neg) > 0 | max(UTDEQ_24hr_ave$N_neg) >0) {
stop("negative concentrations should have been removed by this point in the script,process_PM25_UDEQ_data_source.fn, line 161")
}
# Create input_mat1 data frame
input_mat1 <- data.frame(matrix(NA,nrow=dim(UTDEQ_24hr_ave)[1],ncol=length(input_header))) # create data frame for input_mat1
names(input_mat1) <- input_header # assign the header to input_mat1
input_mat1 <- input_mat_change_data_classes.fn(input_mat1)
## fill in input_mat1
# input 'State_Code' into input_mat1
input_mat1$State_Code <- UTDEQ_24hr_ave$StateCode
# input 'County_Code' into input_mat1
input_mat1$County_Code <- UTDEQ_24hr_ave$CountyCode
# input 'Site_Num' into input_mat1
input_mat1$Site_Num <- UTDEQ_24hr_ave$SiteNum
# input 'Parameter_Code' into input_mat1
input_mat1$Parameter_Code <- UTDEQ_24hr_ave$Parameter_Code
# input 'POC' into input_mat1
input_mat1$POC <- UTDEQ_24hr_ave$POC
# input latitude and longitude ('PM2.5_Lat','PM2.5_Lon')
input_mat1$PM2.5_Lat <- UTDEQ_24hr_ave$Latitude
input_mat1$PM2.5_Lon <- UTDEQ_24hr_ave$Longitude
# input 'Datum' into input_mat1
input_mat1$Datum <- Datum_used
#this_col <- 'Datum'
#AQSVar <- UTDEQ_data[,c(this_col)]
#AQSVarChar <- as.character(AQSVar)
#input_mat1[row_start:row_stop,c(this_col)] <- # Not sure what to put for datum
#rm(this_col,AQSVar,AQSVarChar)
# input 'Parameter_Name' into input_mat1
#this_col_input_mat <- 'Parameter_Name'
#this_col_AQS <- 'Parameter.Name'
#AQSVar <- UTDEQ_data[,c(this_col_AQS)]
#AQSVarChar <- as.character(AQSVar)
#input_mat1[row_start:row_stop,c(this_col_input_mat)] <- AQSVarChar
input_mat1$Parameter_Name <- UTDEQ_24hr_ave$Parameter_Name
#rm(this_col_input_mat,this_col_AQS,AQSVar,AQSVarChar)
# input "Sample_Duration" into input_mat1
#this_col_input_mat <- "Sample_Duration"
#this_col_AQS <- 'Sample.Duration'
#AQSVar <- UTDEQ_data[,c(this_col_AQS)]
#AQSVarChar <- as.character(AQSVar)
input_mat1$Sample_Duration <- UTDEQ_24hr_ave$Sample_Duration # not sure if it's 24-hr data or hourly
#rm(this_col_input_mat,this_col_AQS,AQSVar,AQSVarChar)
# input 'Pollutant_Standard' into input_mat1
#this_col_input_mat <- 'Pollutant_Standard'
#this_col_AQS <- 'Pollutant.Standard'
#AQSVar <- UTDEQ_data[,c(this_col_AQS)]
#AQSVarChar <- as.character(AQSVar)
#input_mat1[row_start:row_stop,c(this_col_input_mat)] <- AQSVarChar # not sure what to put here
#rm(this_col_input_mat,this_col_AQS,AQSVar,AQSVarChar)
# input 'Date_Local' into input_mat1
#input_mat1$Date_Local <- as.Date(UTDEQ_24hr_ave[,c(this_col_source)], format = "%m/%d/%y HH:MM")
input_mat1$Date_Local <- as.Date(UTDEQ_24hr_ave[,c("Date")], format = "%m/%d/%Y")
#old: this_col_input_mat <- 'Date_Local'
#old: this_col_source <- 'Date'
#old: SourceVar <- as.Date(UTDEQ_24hr_ave[,c(this_col_source)],"%Y-%m-%d")
#old: SourceVarChar <- format(SourceVar,"%Y-%m-%d")
#old: input_mat1[ ,c(this_col_input_mat)] <- SourceVarChar
#old: rm(this_col_input_mat,this_col_source,SourceVar,SourceVarChar)
# input 'Units_of_Measure' into input_mat1
input_mat1$Units_of_Measure <- UTDEQ_units#"UG/M3"
# input 'Event_Type' into input_mat1
#this_col_input_mat <- 'Event_Type'
#this_col_AQS <- 'Event.Type'
#AQSVar <- UTDEQ_data[,c(this_col_AQS)]
#AQSVarChar <- as.character(AQSVar)
#input_mat1[row_start:row_stop,c(this_col_input_mat)] <- AQSVarChar # Not sure what to put for Event Type
#rm(this_col_input_mat,this_col_AQS,AQSVar,AQSVarChar)
# input 'Observation_Count' into input_mat1
input_mat1$Observation_Count <- UTDEQ_24hr_ave$N_Obs
# input 'Observation_Percent' into input_mat1
input_mat1$Observation_Percent <- UTDEQ_24hr_ave$PercentObs
# input PM2.5 concentration
input_mat1$PM2.5_Obs <- UTDEQ_24hr_ave$PM25Conc
# input '1st_Max_Value'
#input_mat1$1st_Max_Value <- # Not sure what to put UTDEQ_data[,c("X1st.Max.Value")]
# input '1st_Max_Hour'
#input_mat1$1st_Max_Hour')] <- # Not sure what to put#UTDEQ_data[,c("X1st.Max.Hour")]
# input 'AQI'
#input_mat1$AQI')] <- # not sure what to put # UTDEQ_data[,c('AQI')]
# input 'Method_Code'
#input_mat1$Method_Code')] <- # not sure what to put #UTDEQ_data[,c('Method.Code')]
# input 'Method_Name' into input_mat1
#this_col_input_mat <- 'Method_Name'
#this_col_AQS <- 'Method.Name'
#AQSVar <- UTDEQ_data[,c(this_col_AQS)]
#AQSVarChar <- as.character(AQSVar)
#input_mat1[row_start:row_stop,c(this_col_input_mat)] <- # not sure what to put # AQSVarChar
#rm(this_col_input_mat,this_col_AQS,AQSVar,AQSVarChar)
# input 'PM25_Station_Name' into input_mat1
input_mat1$PM25_Station_Name <- UTDEQ_24hr_ave$Station
# input 'Address' into input_mat1
input_mat1$Address <- UTDEQ_24hr_ave$Address
# input 'State_Name' into input_mat1
input_mat1$State_Name <- "Utah"
# input 'County_Name' into input_mat1
#this_col_input_mat <- 'County_Name'
#this_col_AQS <- 'County.Name'
#AQSVar <- UTDEQ_data[,c(this_col_AQS)]
#print(AQSVar)
#AQSVarChar <- as.character(AQSVar)
#print(AQSVarChar)
input_mat1$County_Name <- UTDEQ_24hr_ave$County_Name
#rm(this_col_input_mat,this_col_AQS,AQSVar,AQSVarChar)
# input 'City_Name' into input_mat1
input_mat1$City_Name <- UTDEQ_24hr_ave$City_Name
# input 'CBSA_Name' into input_mat1
#this_col_input_mat <- 'CBSA_Name'
#this_col_AQS <- 'CBSA.Name'
#AQSVar <- UTDEQ_data[,c(this_col_AQS)]
#print(AQSVar)
#AQSVarChar <- as.character(AQSVar)
#print(AQSVarChar)
#input_mat1[row_start:row_stop,c(this_col_input_mat)] <- # Not sure what to put here # AQSVarChar
#rm(this_col_input_mat,this_col_AQS,AQSVar,AQSVarChar)
# input 'Date_of_Last_Change' into input_mat1
#this_col_input_mat <- 'Date_of_Last_Change'
#this_col_AQS <- 'Date.of.Last.Change'
#AQSVar <- as.Date(UTDEQ_data[,c(this_col_AQS)],"%Y-%m-%d")
#print(AQSVar)
#AQSVarChar <- format(AQSVar,"%Y-%m-%d")
#print(AQSVarChar)
#input_mat1[row_start:row_stop,c(this_col_input_mat)] <- # Not sure what to put here # AQSVarChar
#rm(this_col_input_mat,this_col_AQS,AQSVar,AQSVarChar)
# input 'State_Abbrev' into input_mat1
input_mat1$State_Abbrev <- UTDEQ_24hr_ave$State_Abbrev
# Note: 'Winter' is filled in near the end of the script
# Note: 'Year' is filled in near the end of the script
# Note: 'Month' is filled in near the end of the script
# Note: 'Day' is filled in near the end of the script
# input 'Data_Source_Name_Display' into input_mat1
input_mat1$Data_Source_Name_Display <- Data_Source_Name_Display
# input 'Data_Source_Name_Short' into input_mat1
input_mat1$Data_Source_Name_Short <- Data_Source_Name_Short
# input data source counter - indicates if this is EPA data or field data, etc.
input_mat1$Data_Source_Counter <- data_source_counter
# input color for this data source for plots (totally arbitrary choice)
input_mat1$PlottingColor <- this_plotting_color
# input 'Source_File' name
input_mat1$Source_File <- UTDEQ_24hr_ave$SourceFile #this_source_file
# input the 'Composite_of_N_rows' - this variable indicates how many separate rows of
# data were composited to form this row of data. This will be relevant when getting rid of repeated data.
# For now, this is set to 1 because repeated rows of data will be consolidated in a later script.
input_mat1$Composite_of_N_rows <- UTDEQ_24hr_ave$N_Obs
# input 'N_Negative_Obs' into input_mat1 - this is to note negative concentrations
#which_negative <- which(UTDEQ_data$Ranchos..PM25LC.UG.M3.<0)
#neg_flag_vec <- c(1:dim(UTDEQ_data)[1])*0
#neg_flag_vec[which_negative] <- 1
#input_mat1[,c('N_Negative_Obs')] <- neg_flag_vec
#rm(which_negative,neg_flag_vec)
input_mat1$N_Negative_Obs <- UTDEQ_24hr_ave$N_neg
# input "InDayLatDiff","InDayLonDiff" - which will all be zero for AQS data since there is only one
# row of data for lat & lon on a given day
# (for the DRI data, there are multiple measurements of lat/lon in a day and sometimes they don't all match, these variables give max-min for lat & lon in a given day)
input_mat1$InDayLatDiff <- 0
input_mat1$InDayLonDiff <- 0
#"Year"
input_mat1[ ,c("Year")] <- input_mat_extract_year_from_date.fn(input_mat1$Date_Local)
#"Month"
input_mat1[ ,c("Month")] <- input_mat_extract_month_from_date.fn(input_mat1$Date_Local)
#"Day"
input_mat1[ ,c("Day")] <- input_mat_extract_day_from_date.fn(input_mat1$Date_Local)
# update row counter
#row_start=row_stop+1
# clear variables before moving on
#rm(this_source_file,UTDEQ_data)
#rm(Data_Source_Name_Display,Data_Source_Name_Short)
#rm(UTDEQ_24hr_ave,UTDEQ_units)
#rm(Datum_used)
print("summary of the data output:")
print(summary(input_mat1)) # give summary of current state of data
print(paste("This data has",dim(input_mat1)[1],"rows of PM2.5 observations.")) # how many rows of data?
print(paste("finished processing ", Data_Source_Name_Display))
sink() # stop outputting to sink file
# output to file #
write.csv(input_mat1,file = file.path(define_file_paths.fn("ProcessedData.directory"),sub_folder,paste(file_sub_label,'.csv',sep = "")),row.names = FALSE)
# output input_mat1 from function #
return(input_mat1) # output from function
} # end function
#merge_recent_UTDEQ_files.fn(recent_source_files = recent_source_files, UTDEQ_data_in = UTDEQ_24hr_ave, UTDEQ.directory = define_file_paths.fn("UTDEQ.directory"))
#merge_recent_UTDEQ_files.fn <- function(recent_source_files, UTDEQ_data_in,UTDEQ.directory) {
merge_recent_UTDEQ_files.fn <- function(recent_source_files, UDEQ_header_start,UTDEQ.directory) {
# load and merge all of the recent files since they should all have the same headers
lapply_output <- lapply(1:length(recent_source_files), function(this_file_counter) { # start lapply function
recent_source_file <- recent_source_files[this_file_counter]
print(paste('this_file_counter = ',this_file_counter,"; ",recent_source_file, sep = ""))
this_recent_UTDEQ_data <- read.csv(file.path(UTDEQ.directory,recent_source_file)) # load data file
this_recent_UTDEQ_data$SourceFile <- recent_source_file
return(this_recent_UTDEQ_data) # return processed data
}) # end lapply function
Merged_recent_UTDEQ_step1 <- do.call("rbind", lapply_output) #concatinate the output from each iteration
rm(lapply_output)
goal_header <- UDEQ_header_start#names(UTDEQ_data_in)
Merged_recent_UTDEQ_step2 <- data.frame(matrix(NA,nrow = dim(Merged_recent_UTDEQ_step1)[1],ncol = length(goal_header)))
names(Merged_recent_UTDEQ_step2) <- goal_header
print(goal_header)
# put the recent data in a new data frame with all of the same columns as the first file
# "UTM.Northing" "UTM.Easting"
Merged_recent_UTDEQ_step2$SourceFile <- Merged_recent_UTDEQ_step1$SourceFile
Merged_recent_UTDEQ_step2$Date <- as.Date(Merged_recent_UTDEQ_step1$Date)
Merged_recent_UTDEQ_step2$Station <- Merged_recent_UTDEQ_step1$Name
#Merged_recent_UTDEQ_step2$PM25Conc <- Merged_recent_UTDEQ_step1$X
Merged_recent_UTDEQ_step2$PM25Conc <- Merged_recent_UTDEQ_step1$PM2.5
Merged_recent_UTDEQ_step2$EPACode <- Merged_recent_UTDEQ_step1$Station
Merged_recent_UTDEQ_step2$Latitude <- Merged_recent_UTDEQ_step1$Latitude
Merged_recent_UTDEQ_step2$Longitude <- Merged_recent_UTDEQ_step1$Longitude
#Merged_recent_UTDEQ_step2$StateCode <- Merged_recent_UTDEQ_step1$
#Merged_recent_UTDEQ_step2$CountyCode <- Merged_recent_UTDEQ_step1$
#Merged_recent_UTDEQ_step2$SiteNum <- Merged_recent_UTDEQ_step1$
##date_format <- determine_date_format.fn(check_date <- Merged_recent_UTDEQ_step2[1,c("Date")]) # figure out date format
##Merged_recent_UTDEQ_step2$Date <- as.Date(Merged_recent_UTDEQ_step2$Date, format = date_format) # fix class
##rm(date_format)
#stop("fill in N obs and percent obs on next two lines, process_PM25_UDEQ_data_source_function.R")
Merged_recent_UTDEQ_step2$N_Obs <- Merged_recent_UTDEQ_step1$Num_Obs #1 #Merged_recent_UTDEQ_step1$
Merged_recent_UTDEQ_step2$PercentObs <- Merged_recent_UTDEQ_step2$N_Obs/24*100 #100
Merged_recent_UTDEQ_step2$N_neg <- 0#1
which_neg <- which(Merged_recent_UTDEQ_step2$PM25Conc <0)
if (length(which_neg) > 0) {
stop("all negative concentrations should have been removed by this point in the code, merge_recent_UTDEQ_files.fn")
}
Merged_recent_UTDEQ_step2[which_neg, "N_neg"] <- 1
#Merged_recent_UTDEQ_step2$POC <- NA
#Merged_recent_UTDEQ_step2$County_Name <- Merged_recent_UTDEQ_step1$
#Merged_recent_UTDEQ_step2$Parameter_Code <- NA
#Merged_recent_UTDEQ_step2$Parameter_Name <- NA
#Merged_recent_UTDEQ_step2$Sample_Duration
#Merged_recent_UTDEQ_step2$Address
#Merged_recent_UTDEQ_step2$City_Name
#Merged_recent_UTDEQ_step2$State_Abbrev
# remove NA rows
which_not_na <- which(!is.na(Merged_recent_UTDEQ_step2$PM25Conc))
Merged_recent_UTDEQ_step3 <- Merged_recent_UTDEQ_step2[which_not_na, ]
# add Sample Duration - different for old vs new files
Merged_recent_UTDEQ_step3$Sample_Duration <- "1 HOUR" # these are hourly observations #"24 HOUR"
# # merge the recent file data frame with the UTDEQ_data_in data frame
#UTDEQ_data_out <- rbind(UTDEQ_data_in,Merged_recent_UTDEQ_step3)
UTDEQ_data_out <- Merged_recent_UTDEQ_step3
return(UTDEQ_data_out)
} # end of merge_recent_UTDEQ_files.fn function
|
2541821c2882abf3f8efd74323042f161be6575f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gamlss.cens/examples/gamlss.cens-package.Rd.R
|
773e6bf0df216db61326edb22058adae10bf747d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,459
|
r
|
gamlss.cens-package.Rd.R
|
library(gamlss.cens)
### Name: gamlss.cens-package
### Title: Fitting an Interval Response Variable Using 'gamlss.family'
### Distributions
### Aliases: gamlss.cens-package gamlss.cens
### Keywords: package
### ** Examples
library(survival)
library(gamlss)
library(gamlss.dist)
# comparing results with package survival
# fitting the exponential distribution
ms1<-survreg(Surv(futime, fustat) ~ ecog.ps + rx, ovarian,
dist='exponential')
mg1<-gamlss(Surv(futime, fustat) ~ ecog.ps + rx, data=ovarian,
family=cens(EXP),c.crit=0.00001)
if(abs(-2*ms1$loglik[2]-deviance(mg1))>0.001) stop(paste("descrepancies in exp"))
if(sum(coef(ms1)-coef(mg1))>0.001) warning(paste("descrepancies in coef in exp"))
summary(ms1)
summary(mg1)
# fitting the Weibull distribution
ms2 <-survreg(Surv(futime, fustat) ~ ecog.ps + rx, ovarian, dist='weibull')
mg2 <-gamlss(Surv(futime, fustat) ~ ecog.ps + rx, data=ovarian,
family=cens(WEI, delta=c(0.001,0.001)), c.crit=0.00001)
if(abs(-2*ms2$loglik[2]-deviance(mg2))>0.005)
stop(paste("descrepancies in deviance in WEI"))
summary(ms2);summary(mg2)
# compare the scale parameter
1/exp(coef(mg2,"sigma"))
# now fit the Weibull in different parameterrazions
mg21<-gamlss(Surv(futime, fustat) ~ ecog.ps + rx, data=ovarian,
family=cens(WEI2), method=mixed(2,30))
mg21<-gamlss(Surv(futime, fustat) ~ ecog.ps + rx, data=ovarian,
family=cens(WEI3))
|
a0717f316b9957f21c49ab3ceb4c7b9f6b7d94cc
|
854428a9085dfe1555c037e4e71a0bb0ba57a31a
|
/master_project_arima (1).r
|
482aa49ac4d261cc4f54d2304ca49a47cceb4298
|
[] |
no_license
|
singhals912/Time-series-forecasting
|
bb3df1910224bc404b9865d28de1630172f3f00a
|
61e7242a8f7bb2ae25312a45cdd23e5f2d80ffb2
|
refs/heads/main
| 2023-08-14T18:29:36.461216
| 2021-09-21T19:58:07
| 2021-09-21T19:58:07
| 408,939,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,214
|
r
|
master_project_arima (1).r
|
library(forecast)
library(readxl)
library(dplyr)
salesdata <- read_excel("C://Users//geeti//Downloads//SalesData.xlsx", sheet=1, col_names = TRUE)
View(salesdata)
salest<-data.frame(salesdata$OrderDate,salesdata$Sales)
####salests<-ts(salest[,-1],frequency =24,start=c(2014, 01),end=c(2018, 01))
salests<-ts(salest[,-1],frequency =12,start=c(2014, 01),end=c(2018, 01))
head(salests)
plot(salests,xlab="Year",ylab="Sales (USD)",main="Superstore Sales,Jan2014-December2017")
seasonplot(salests,ylab="Sales (USD)",col=c(1,2,3,4),main="Seasonalplot:Superstore Sales,Jan2014-December2017",year.labels=TRUE)
Acf(salests)
cycle(salests)
boxplot(salests~cycle(salests))
annual_sales = aggregate(salests)
plot.ts(annual_sales, col = "blue", main = "Yearly Beer sales time series data between 2014 and 2017")
decompose_salesdata <- decompose(salests,type = "additive")
seasonal_sales <- as.ts(decompose_salesdata$seasonal)
trend_sales <- as.ts(decompose_salesdata$trend)
random_sales <- as.ts(decompose_salesdata$random)
plot.ts(seasonal_sales, main = "Seasonal Component")
plot.ts(trend_sales, main = "Trend Component")
plot.ts(random_sales, main = "Randon Component")
plot (decompose(salests, type="additive"))
salests_seasonallyadjusted <- salests - seasonal_sales
plot.ts(salests_seasonallyadjusted, main = "Seasonal Adjusted Plot of sales")
salests_hw <- hw(salests,seasonal="additive", h=24)
salests_hw$model
plot(salests_hw$model)
plot(salests_hw)
plot(salests)
accuracy(salests_hw)
plot(salests_hw$residuals)
abline(0, 0)
Acf(salests_hw$residuals)
####Acf(salests_hw$fitted)
####Data partition
####data partition
####Validation data set has been chosen to be 6 months because the last 1 year shows
####relatively very less variation in the sales. If sales for 1 year are taken to validate
####the model, difference in the forecast and actual values is very large owing to
####the large variation in the training data of the previous months.
nvalid <- 6
ntrain <- length(salests) - nvalid
salests_train <- window(salests,start=c(2014, 01),end=c(2014,ntrain))
salests_valid <- window(salests,start=c(2014,ntrain+1),end=c(2014,ntrain+nvalid))
# plot the series and its ACF, demonstrating trend and seasonality, indicating
# starting with d=0 and D=1
plot(salests_train, xlab = "Time", ylab = "Sales",bty = "l")
#salests_train %>% diff() %>% Acf()
salests_train %>% diff(lag=12) %>% Acf()
#salests_train %>% diff(lag=12) %>% diff() %>% Acf()
####Pacf for p,P=(0,0)
#salests_train %>% diff() %>% Pacf()
salests_train %>% diff(lag=12) %>% Pacf()
#salests_train %>% diff(lag=12) %>% diff() %>% Pacf()
####Acf for q,Q=(0,0)
####plot ARIMA
arima_model <- arima(salests_train,order=c(0,0,0),seasonal = c(0,1,1))
accuracy(arima_model)
auto_arima_model <- auto.arima(salests_train)
accuracy(auto.arima(salests_train))
auto.arima(salests_train)
arima_model
forecast_data <- forecast(arima_model,h=6)
forecast_data_auto <- forecast(auto_arima_model,h=6)
accuracy(forecast_data,salests_valid)
accuracy(forecast_data_auto,salests_valid)
plot(forecast_data)
lines(salests_valid,col="green")
|
4cdf4cb7c925e406d98d806679f8de98a31f8b5c
|
b7978c1a62cb3c8b76a3597e0bb6f03dc1c2c703
|
/CodeFile.R
|
7ba70c57b1450b89b439900b776a8b984dfb92f0
|
[] |
no_license
|
sowmi546/Reproducible-Research
|
4a96e67792ebb4f69b46ab7f99045dfb50212886
|
ce06cbfd0c2435e30e8ba3408ddb17defa34f55c
|
refs/heads/master
| 2021-01-20T19:49:06.858670
| 2016-02-04T15:17:59
| 2016-02-04T15:17:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,591
|
r
|
CodeFile.R
|
#1.loading and preprocessing of data
library(lattice)
myData<- read.csv("activity.csv")
summary(myData)
myData$date <- as.Date(myData$date,"%Y-%m-%d")
#2.What is mean total number of steps taken per day?
names(myData)
plot(steps~date,myData)
#tail(myData$date)
stepsByDay <- aggregate(steps~date,data=myData,sum,na.rm=TRUE)
names(stepsByDay)
#plotting the histogram for the number of steps taken
hist(stepsByDay$steps,xlab="Day",ylab="steps taken",main="Mean num of steps taken by day",col="red")
#calculating mean and median
mean(stepsByDay$steps)
median(stepsByDay$steps)
#3.What is the average daily activity pattern?
names(myData)
tmyData <- tapply(myData$steps, myData$interval, mean, na.rm = TRUE)
row.names(tmyData)
names(tmyData)
tmyData
plot(names(tmyData),tmyData,type="l",xlab="5 minute interval",ylab="avg steps for each interval",main="average daily activity pattern",col="red")
#maximum steps interval
max_interval <- which.max(tmyData)
names(max_interval)
#4.Inputting missing values
count <-sum(is.na(myData)) #counting number of missing values in data set
count
myData$interval
#replacing NA values with mean value for that interval
StepsAverage <- aggregate(steps~interval,data=myData,mean)
fillNA <- numeric()
for(i in 1:nrow(myData))
{
r <-myData[i,]
if(is.na(r$steps))
{
steps <- subset(StepsAverage,interval==r$interval)$steps
}
else
steps <-r$steps
fillNA <- c(fillNA,steps)
}
newData <- myData
newData$steps <- fillNA
head(newData)
newTotalSteps <- aggregate(steps ~ date, data = newData, sum, na.rm = TRUE)
hist(newTotalSteps$steps, main = "Total steps by day", xlab = "day", col = "blue")
mean(newTotalSteps$steps)
median(newTotalSteps$steps)
#5.Are there differences in activity patterns between weekdays and weekends?
#checking if the day is week day or weekend
dayType <- weekdays(myData$date)
levelOfDay <- vector()
for (i in 1:nrow(myData)) {
if (dayType[i] == "Saturday" | dayType[i]=="Sunday") {
levelOfDay[i] <- "Weekend"
} else {
levelOfDay[i] <- "Weekday"
}
}
#adding one more column to the new Data set specifying if the date is weekend or weekday
names(newData)
newData$levelOfDay <- levelOfDay
#head(newData)
#converting level to factor variable
newData$levelOfDay <- factor(newData$levelOfDay)
stepsByEachDay <- aggregate(steps~interval+levelOfDay,data=newData,mean)
names(stepsByEachDay)
par(mfrow=c(2,1))
xyplot(steps~interval|levelOfDay,type="l",data=stepsByEachDay,xlab="Day Type",ylab="steps taken",main="difference in pattern on weekdays and weekends",col="red")
|
f1da2ab9b24d9cfdcce9a4769bccbf24ee8c6c2d
|
9ea6e8002beed284ebb0e7d33e788ce64812cd93
|
/scripts/exploration_dist.R
|
e91e182ba8975c17e6134468259d2266bd73b24a
|
[] |
no_license
|
AimeeRose/conflicts_analysis_project
|
97985d644a8d7689dfe09fa015665cbce00faa31
|
29a707fe482535b2eec6d89eacb79062dbb46108
|
refs/heads/master
| 2021-01-24T23:41:28.205850
| 2015-11-17T14:34:10
| 2015-11-17T14:34:10
| 46,353,040
| 0
| 0
| null | 2015-11-17T14:41:25
| 2015-11-17T14:41:25
| null |
UTF-8
|
R
| false
| false
| 2,863
|
r
|
exploration_dist.R
|
library(RMySQL)
library(ggmap)
library(fields)
con <- dbConnect(MySQL(), user="root", password="3442", dbname = "gdelt")
res <- dbSendQuery(con, "select * from random_events")
data <- dbFetch(res, n = -1)
geodata <- subset(data, select=c(GLOBALEVENTID, EventRootCode, NumMentions, ActionGeo_Lat, ActionGeo_Long))
geodata_us <- subset(geodata, ActionGeo_Lat < 55 & ActionGeo_Lat > 0 & ActionGeo_Long < -45 & ActionGeo_Long > -140)
time <- subset(data, select=c(GLOBALEVENTID, EventRootCode, DATEADDED))
time[,3] <- as.Date(as.character(time[,3]), "%Y%m%d")
time[,3] <- as.numeric(time[,3])
# It is much better to use the logarithm of the number of mentions
# to measure relevance, rather than the absolute value
hist(geodata$NumMentions)
hist(log(geodata$NumMentions))
geodata$LogMentions = log(geodata$NumMentions)
map <- get_map(location = 'united states', zoom = 3, source = 'google')
#map <- get_map(location = c(left = 51.4978, bottom = -60.4462, right = 16.0290, top = -133.5712), source = 'google')
firstlook_size <- ggmap(map) +
geom_point(aes(x = ActionGeo_Long, y = ActionGeo_Lat, size = LogMentions), data = geodata, colour="red", alpha=.5)
firstlook_size
firstlook_CAMEO <- ggmap(map) +
geom_point(aes(x = ActionGeo_Long, y = ActionGeo_Lat, size = LogMentions, colour=EventRootCode), data = geodata, alpha=.5)
firstlook_CAMEO
# I created a matrix of dimension NumEvents x NumEvents indicating the distance
# between any pair of events from a random subset of 2000 and plotted the distribution
coordinates_matrix <- subset(geodata[round(runif(2000,1,20000),0),], select=c(ActionGeo_Lat, ActionGeo_Long))
distances = rdist(as.matrix(coordinates_matrix))
hist(as.vector(distances))
time_matrix <- subset(time[round(runif(2000,1,20000),0),], select=c(DATEADDED))
time_distance = rdist(as.matrix(time_matrix))
# either there is something wrong with the following graph or we have found something cool
hist(as.vector(time_distance))
# attempt of clustering analysis
# hierarcical
# too big to work?
distances_k <- rdist(as.matrix(na.omit(coordinates_matrix)))
clusters <- hclust(distances_k, method="ward.D")
plot(clusters)
groups <- cutree(clusters, k=5)
rect.hclust(clusters, k=5, border="red")
# k-means
k = 5
set.seed(1)
geodata_us <- na.omit(geodata_us)
geodata_us$LogMentions = log(geodata_us$NumMentions)
geodata_k <- geodata_us[,4:5]
KMC = kmeans(geodata_k, centers = k, iter.max = 1000)
str(KMC)
# Extract clusters
Clusters = KMC$cluster
clust_plot <- cbind(geodata_us, Clusters)
library(cluster)
clusplot(geodata_k, Clusters, color=TRUE, shade=TRUE,
labels=2, lines=0)
install.packages("fpc")
library(fpc)
plotcluster(geodata_k, Clusters)
firstlook_clust <- ggmap(map) +
geom_point(aes(x = ActionGeo_Long, y = ActionGeo_Lat, size = LogMentions, colour=Clusters), data = geodata_us, alpha=.5)
firstlook_clust
|
1b0c0e13ba0b139c40d3551ffc98cf47a2af4f33
|
de911eb1f603ed40516b8a3827ce996062e21c07
|
/dataController/postgresController.R
|
9a60b3fcaf536b7e978c645af93dcf578c5a16f0
|
[] |
no_license
|
josegerar/examen-final-garcia-it
|
c2c8d56a828a7c82e286efee393884ae2970c9ab
|
fe4fa0e1fd2a5f319f116e708c07a55c569d3e42
|
refs/heads/main
| 2023-03-19T08:36:40.337188
| 2021-03-13T22:26:28
| 2021-03-13T22:26:28
| 346,424,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 336
|
r
|
postgresController.R
|
library(DBI)
library(odbc)
openConnectionPostgres <- function(){
return( DBI::dbConnect(
odbc::odbc(),
#Driver = "PostgreSQL Unicode",
Driver = "PostgreSQL ODBC Driver(UNICODE)",
Server = "localhost",
Database = "radiodifusora",
UID = "postgres",
PWD = "123456",
Port = 5432
))
}
|
f410cbc0eb56b66d6207aec4ece10f64140e635f
|
6e1001104bba35f0e9ba085c88e9c30da0694a77
|
/cohort_boot_functions/taxa_list_split_function.R
|
d698ab80a4931d2f50b1d66a9736cd33779afd62
|
[
"MIT"
] |
permissive
|
jimjunker1/bootstrap_growth
|
f888e0fec66aa90fc69006dbb614072844198577
|
b47ff739a16891e7a896ce496d82697bdd511093
|
refs/heads/master
| 2020-03-28T04:41:35.211391
| 2019-01-04T22:55:09
| 2019-01-04T22:55:09
| 147,730,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
r
|
taxa_list_split_function.R
|
taxa_list_split = function(site_data_list, taxa_list, envir = parent.frame(),...){
site_list = c(rep(unique(site_data_list$SITE),length(taxa_list)))
future_map2(list(site_list),taxa_list, function(x,y){
site_data_list %>% filter(SITE == x, TAXON == y)
})
}
|
efc877eab37cc7383f4b2dad27b5429deac5e005
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/roptim/vignettes/article.R
|
0225d957dd14a4dcce01530822e77f7387e18565
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
article.R
|
### This is an R script tangled from 'article.pdf.asis'
|
cb88fc439f1f673ec36c298e11a3be1c5647f626
|
00cff0ee84a3bed9b6309ff507c17fb1a769942f
|
/4. Detailed GAM Examples/day-3-exercise-solution/Ch-3-Day-3-exercises.R
|
eaa0a61d8f7b50ffaaeddb76a13d3ceb5e195422
|
[] |
no_license
|
perkygva/LM-GLM-GAM-Udemy-
|
b4fc0d78c12ee4ea8fe40be1e440be5f35e25a50
|
485d9711d96e29fcefa5ff824284c2467b5ec1f1
|
refs/heads/master
| 2021-01-22T11:00:03.323939
| 2017-02-15T13:46:25
| 2017-02-15T13:46:25
| 82,062,297
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,208
|
r
|
Ch-3-Day-3-exercises.R
|
############## CHAPTER 3 QUESTION #1
## polynomial fits
## create some data:
set.seed(1)
## uniform x from approx .xx to 3.14:
x <- sort(runif(40)*10)^.5
## uniform y from approx .68 to 1
y <-sort(runif(40))^0.1
## create a sequence of 200 equally spaced values from min(x) to max(x)
xx <- seq(min(x),max(x),length=200)
plot(x,y)
b <- lm(y~poly(x,5))
lines(xx,predict(b,data.frame(x=xx)))
b10 <- lm(y~poly(x,10))
lines(xx,predict(b10,data.frame(x=xx)))
## splines fits
sb <- function(x,xk) { abs(x-xk)^3}
q <- 11
xk <- ((1:(q-2)/(q-1))*10)^.05
## lazy person's formula construction ...
form <- paste("sb(x,xk[",1:(q-2),"])",sep="",collapse="+")
form <- paste("y~x+",form)
bform <- lm(formula(form))
lines(xx,predict(bform,data.frame(x=xx)),col=3)
##################################################
## QUESTION #2
## x,y, and xx from previous question
b1 <- lm(form)
plot(x,y)
lines(xx,predict(b1,data.frame(x=xx)),col=4)
X <- model.matrix(b1) # extract model matrix
beta <- solve(t(X)%*%X,t(X)%*%y,tol=0)
b1$coefficients <- beta # trick for simple prediction
lines(xx,predict(b1,data.frame(x=xx)),col=5)
## Upping the basis dimension to 11 also makes the normal
## equations estimates perform very badly
|
2182c3c3ca4be9b95b20f63f8522ce21839e5562
|
aae7f08360ee40b6a5fb633f5f59f1bfe675eccd
|
/APB-ML-Feature importance-learn.R
|
ba4556d063acb0dd32e955fc9f620a64961d693b
|
[] |
no_license
|
IanShi1996/BCB410-DataScience
|
ed4a785ff5b5b6032fbd10ee7a7f3c1199305163
|
99e7d33023901fbc4de597e8b962f20b76a3d58d
|
refs/heads/master
| 2021-07-10T08:23:58.455016
| 2017-10-11T14:21:17
| 2017-10-11T14:21:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,337
|
r
|
APB-ML-Feature importance-learn.R
|
# ___ID___.R
#
# Purpose: A Bioinformatics Course:
# R code accompanying the Assessing Feature Importance unit.
#
# Version: 0.1
#
# Date: 2017 MM DD
# Author: yuqing zou (yuqing.zou@mail.utoronto.ca)
#
# Versions:
# 0.1 (Describe ...)
#
# TODO:
#
#
# == DO NOT SIMPLY source() THIS FILE! =======================================
#
# If there are portions you don't understand, use R's help system, Google for an
# answer, or ask your instructor. Don't continue if you don't understand what's
# going on. That's not how it works ...
#
# ==============================================================================
# = 1 Feature Selection with the Caret R Package
#Selecting the right features in your data can mean the difference between mediocre
#performance with long training times and great performance with short training times.
#The caret R package provides tools automatically report on the relevance and
#importance of attributes in your data and even select the most important features
# =1.1 install package first! you need mlbench and also caret
if (!require(mlbench, quietly = TRUE)) {
install.packages("mlbench")
library(mlbench)
}
if (!require(caret, quietly = TRUE)) {
install.packages('DEoptimR')
install.packages("pbkrtest", dependencies = TRUE)
library(caret)
}
if (!require(e1071, quietly = TRUE)) {
install.packages("e1071")
install.packages("randomForest")
library(e1071)
}
# = 1.2 Remove Redundant Features
#from original data, many features may highly correalted with each others. Those
#redundant fatures add no realevant information to your other fatures, since they
#can obtain from other features by some linear comnination.Basiclly having those
#feartures are not really helpful, which may increasing the computational cost.
# ensure the results are repeatable
set.seed(7)
# load the library
library(mlbench)
library(caret)
# load the data
load(file="./data/myGOExSet.RData")
#replace NA or jsut drop the row, otherwise can not caculate correlation matrix
myGOExSet[is.na(myGOExSet)] <- 0
head(myGOExSet)
# = 1.3 task1
#find the highly correlated features using two function that i recommand in unit page.
#cor() : to find the correlation matrix
#findCorrelation() :
# = 1.4 RFE auto-features-selection
#Recursive Feature Elimination is regard as wrapper methods, usually they are build in
#top of SVM or regression. RFE is based on the idea to repeatedly
#construct a model (eg.SVM) and choose best performing feature.
#All features in data set are applied for this process, and ranked features in the end.
# ensure the results are repeatable
set.seed(7)
# load the library
library(mlbench)
library(caret)
# load the data
load(file="./data/myGOExSet.RData")
#replace NA or jsut drop the row, otherwise can not caculate correlation matrix
myGOExSet[is.na(myGOExSet)] <- 0
head(myGOExSet)
# = 1.5 task2
#try
#cor() : to find the correlation matrix
#findCorrelation() :
# = 1.4 Task solutions for task1
# ensure the results are repeatable
set.seed(7)
# load the library
library(mlbench)
library(caret)
# load the data
load(file="./data/myGOExSet.RData")
#replace NA or jsut drop the row, otherwise can not caculate correlation matrix
myGOExSet[is.na(myGOExSet)] <- 0
head(myGOExSet)
# calculate correlation matrix
correlationMatrix <- cor(myGOExSet[,5:16])
# summarize the correlation matrix
print(correlationMatrix)
# find attributes that are highly corrected (ideally >0.75)
highlyCorrelated <- findCorrelation(correlationMatrix, cutoff=0.5)
# print indexes of highly correlated attributes
print(highlyCorrelated)
# = 1.4 Task solutions for task2
# ensure the results are repeatable
set.seed(7)
# load the library
library(mlbench)
library(caret)
# load the data
load(file="./data/myGOExSet.RData")
#replace NA or jsut drop the row, otherwise can not caculate correlation matrix
myGOExSet[is.na(myGOExSet)] <- 0
head(myGOExSet)
# define the control using a random forest selection function
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
# run the RFE algorithm
results <- rfe(myGOExSet[,5:15], myGOExSet[,16], sizes=c(1:12), rfeControl=control)
# summarize the results
print(results)
# list the chosen features
predictors(results)
# plot the results
plot(results, type=c("g", "o"))
#that is everything here
# [END]
|
1e352e5853e26faba9b55611a18c67f797e6bdcb
|
a344e881871bcc2e703f82b741de8e325efc70e3
|
/man/dot-checkContrastMatrix.Rd
|
642ebb6fa5501d283e4720e7cae0601010b249e0
|
[] |
no_license
|
Vitek-Lab/MSstatsTMT
|
b697e5337ea8953b180924cbd3aa4a1aa9130c91
|
6fda14b3be54a965cd7caff9d67136c153dfe532
|
refs/heads/master
| 2023-06-07T23:09:23.792869
| 2023-02-26T23:49:33
| 2023-02-26T23:49:33
| 106,858,130
| 22
| 18
| null | 2023-04-21T13:24:17
| 2017-10-13T18:12:43
|
R
|
UTF-8
|
R
| false
| true
| 431
|
rd
|
dot-checkContrastMatrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_group_comparison.R
\name{.checkContrastMatrix}
\alias{.checkContrastMatrix}
\title{check whether pairwise comparison. If pairwise, generate a contrast matrix.}
\usage{
.checkContrastMatrix(contrast_matrix)
}
\value{
a contrast matrix
}
\description{
check whether pairwise comparison. If pairwise, generate a contrast matrix.
}
\keyword{internal}
|
1d59b7916790c8f85d15f462bfcaf18d2afdd705
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/gwfa/man/marie_galante.Rd
|
daee4a6900820ee63c70c69a5752e81bb8ad6b32
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,085
|
rd
|
marie_galante.Rd
|
\name{mariegalante}
\alias{mariegalante}
\docType{package}
\title{
Centroids of buildings of Marie-Galante (French Island in Caribbean). Marie-Galante is a dependency of Guadeloupe
}
\description{
The Marie-Galante centroids has been extracted from OpenStreetMap.
Map data copyrighted OpenStreetMap contributors and available from 'http://www.openstreetmap.org'
}
\value{
\item{x}{longitude}
\item{y}{latitude}
}
\examples{
library(gwfa)
data("mariegalante")
test=gwfa(points=mariegalante,q=0,radius=(20*2^((0:6)/2)),
bandwith=1600,sample_size=500,cell_size=2000)
test=test[test$count>100,]#select the cells with at least 100 points.
#estimate the fractal dimension on the 7 radius
X=cbind(rep(1,length(test@radius)),log2(test@radius))
fit_frac_dim=(do.call(cbind,test[,4:10]))\%*\%t(solve(t(X)\%*\%X)\%*\%t(X))
test$dimfrac=fit_frac_dim[,2]
#create spatial polygon dataframe
shp=grid_to_spdf(test,"2970")
\dontrun{
library(cartography)
choroLayer(spdf=shp,nclass=5,var="dimfrac",method="fisher-jenks")
}
}
|
c807c9f876fa97cdf0e054c2bea00f684fcdcc76
|
4eb5cda5f02f054d64745ce91923dd1fa4ea9095
|
/MAMU/eck1_MAMU_juvRatio.R
|
5d3da6a4535358f912eb81ba23888783290c469f
|
[] |
no_license
|
mczapanskiy-usgs/WERC-SC
|
e2e7cb68616ef0492144c0ef97629abb280103ae
|
caec92b844c9af737dcfc8d6dbb736de79c3e71c
|
refs/heads/master
| 2021-12-02T16:42:16.529960
| 2021-12-01T16:58:20
| 2021-12-01T16:58:20
| 36,815,737
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,074
|
r
|
eck1_MAMU_juvRatio.R
|
### JUVENILE RATIO
# this script calculates the ratio of HY to AHY MAMU on surveys
# last updated: Nov 14, 2018 by E Kelsey and J Yee
## SET WORKING DIRECTORY
setwd("~/WERC-SC/MAMU")
## LOAD LIBRARIES
library(data.table)
library(dplyr)
library(lubridate)
library(mosaic)
### READ IN OBSERVATIONS FILES
## all previous years (datafile that Jon got from Bill)
read.csv('~/WERC-SC/MAMU/JuvenileRatioData2013.csv', # MAMU_All_locs_1_03.20.2017_DRAFT.csv',
stringsAsFactors = FALSE) %>%
mutate(Surv_No = NA) -> obs_2013
## 2017
read.csv('~/WERC-SC/MAMU/JuvenileRatioData2017.csv',
stringsAsFactors = FALSE) %>%
# summarize by survey day:
group_by(Surv_No, Date, Year, Survey, Julian) %>%
summarise(AHY = sum(AHY), HY = sum(HY)) %>%
as.data.frame() -> obs_2017
## 2018
read.csv('~/WERC-SC/MAMU/JuvenileRatioData2018.csv',
stringsAsFactors = FALSE) %>%
# summarize by survey day:
group_by(Surv_No, Date, Year, Survey, Julian) %>%
summarise(AHY = sum(AHY), HY = sum(HY)) %>%
as.data.frame() -> obs_2018
## combine all years
obs_all <- rbind(obs_2013, obs_2017, obs_2018)
### SELECT ONLY MAMU DETECTIONS WITHIN JUVENILE TIMEFRAME (JULY 10 - AUGUST 24) AND CALCUALTE ADJUSTED HY AND AHY DETECTIONS
obs_mamu_all <- obs_all %>%
mutate(date = as_date(Date, format = "%m/%d/%Y"),
jdate = yday(date)) %>% # add column with Julian Date
filter(jdate <= 236 + leap_year(Year),
jdate >= 191 + leap_year(Year)) %>% # select date btwn July 10 & Aug 24 (JD 191 - 236, or JD 192 - 237 in leap years)
mutate(HYadj = HY/(-1.5433 + 0.0098 * jdate), # apply regression to account for HY birds that haven't fledged by survey date
AHYadj = ifelse(jdate <= 199, # apply regression for AHY birds that are still incubting by survey date
AHY/(1 - (18.7145545 - 0.18445455 * jdate + 0.00045455 * (jdate^2))), AHY),
Surv_No = as.numeric(as.factor(date)))
### CALUCLATE JUVENILE RATIO FROM ADJUSTED HY AND AHY DETECTION VALUES
## un-adjusted data
juvRatio_all <- obs_mamu_all %>%
group_by(Year) %>%
summarise(nSvy = n_distinct(Surv_No),
cov = cov(AHY, HY, use = "pairwise.complete.obs"),
AHYsum = sum(AHY, na.rm = TRUE), AHYvar = var(AHY, na.rm = TRUE), AHYave = mean(AHY, na.rm = TRUE),
HYsum = sum(HY, na.rm = TRUE), HYvar = var(HY, na.rm = TRUE), HYave = mean(HY, na.rm = TRUE)) %>%
mutate(juvRat = HYsum/AHYsum, # juvenile ratio per survey, adjusted
juvRatVar = (((HYvar/(AHYave^2))+((HYave^2)*AHYvar)/(AHYave^4))-(((2*HYave)*cov)/(AHYave^3)))/nSvy,
juvRatSE = juvRatVar^(0.5))
# zig survey design
juvRatio_all_zig <- obs_mamu_all %>%
group_by(Year) %>%
filter(Survey == "zig") %>%
summarise(nSvy = n_distinct(Surv_No),
cov = cov(AHY, HY, use = "pairwise.complete.obs"),
AHYsum = sum(AHY, na.rm = TRUE), AHYvar = var(AHY, na.rm = TRUE), AHYave = mean(AHY, na.rm = TRUE),
HYsum = sum(HY, na.rm = TRUE), HYvar = var(HY, na.rm = TRUE), HYave = mean(HY, na.rm = TRUE)) %>%
mutate(juvRat = HYsum/AHYsum, # juvenile ratio per survey, adjusted
juvRatVar = (((HYvar/(AHYave^2))+((HYave^2)*AHYvar)/(AHYave^4))-(((2*HYave)*cov)/(AHYave^3)))/nSvy,
juvRatSE = juvRatVar^(0.5))
# 400 m survey design
juvRatio_all_400 <- obs_mamu_all %>%
group_by(Year) %>%
filter(Survey == "400") %>%
summarise(nSvy = n_distinct(Surv_No),
cov = cov(AHY, HY, use = "pairwise.complete.obs"),
AHYsum = sum(AHY, na.rm = TRUE), AHYvar = var(AHY, na.rm = TRUE), AHYave = mean(AHY, na.rm = TRUE),
HYsum = sum(HY, na.rm = TRUE), HYvar = var(HY, na.rm = TRUE), HYave = mean(HY, na.rm = TRUE)) %>%
mutate(juvRat = HYsum/AHYsum, # juvenile ratio per survey, adjusted
juvRatVar = (((HYvar/(AHYave^2))+((HYave^2)*AHYvar)/(AHYave^4))-(((2*HYave)*cov)/(AHYave^3)))/nSvy,
juvRatSE = juvRatVar^(0.5))
## data adjusted for HY birds that haven't yet fledged and AHY birds still on the nest
juvRatio_all_cor <- obs_mamu_all %>%
group_by(Year) %>%
summarise(nSvy = n_distinct(Surv_No),
covAdj = cov(HYadj, AHYadj, use = "pairwise.complete.obs"),
AHYsumAdj = sum(AHYadj, na.rm = TRUE), AHYvarAdj = var(AHYadj, na.rm = TRUE), AHYaveAdj = mean(AHYadj, na.rm = TRUE),
HYsumAdj = sum(HYadj, na.rm = TRUE), HYvarAdj = var(HYadj, na.rm = TRUE), HYaveAdj = mean(HYadj, na.rm = TRUE)) %>% # annual covariance of adjusted HY and AHY
mutate(juvRatAdj = HYsumAdj/AHYsumAdj, # juvenile ratio per survey
juvRatVarAdj = (((HYvarAdj/(AHYaveAdj^2))+((HYaveAdj^2)*AHYvarAdj)/(AHYaveAdj^4))-(((2*HYaveAdj)*covAdj)/(AHYaveAdj^3)))/nSvy,
juvRatSEadj = juvRatVarAdj^(0.5))
# zig survey design
juvRatio_all_cor_zig <- obs_mamu_all %>%
group_by(Year) %>%
filter(Survey == "zig") %>%
summarise(nSvy = n_distinct(Surv_No),
covAdj = cov(HYadj, AHYadj, use = "pairwise.complete.obs"),
AHYsumAdj = sum(AHYadj, na.rm = TRUE), AHYvarAdj = var(AHYadj, na.rm = TRUE), AHYaveAdj = mean(AHYadj, na.rm = TRUE),
HYsumAdj = sum(HYadj, na.rm = TRUE), HYvarAdj = var(HYadj, na.rm = TRUE), HYaveAdj = mean(HYadj, na.rm = TRUE)) %>% # annual covariance of adjusted HY and AHY
mutate(juvRatAdj = HYsumAdj/AHYsumAdj, # juvenile ratio per survey
juvRatVarAdj = (((HYvarAdj/(AHYaveAdj^2))+((HYaveAdj^2)*AHYvarAdj)/(AHYaveAdj^4))-(((2*HYaveAdj)*covAdj)/(AHYaveAdj^3)))/nSvy,
juvRatSEadj = juvRatVarAdj^(0.5))
# 400 m survey design
juvRatio_all_cor_400 <- obs_mamu_all %>%
group_by(Year) %>%
filter(Survey == "400") %>%
summarise(nSvy = n_distinct(Surv_No),
covAdj = cov(HYadj, AHYadj, use = "pairwise.complete.obs"),
AHYsumAdj = sum(AHYadj, na.rm = TRUE), AHYvarAdj = var(AHYadj, na.rm = TRUE), AHYaveAdj = mean(AHYadj, na.rm = TRUE),
HYsumAdj = sum(HYadj, na.rm = TRUE), HYvarAdj = var(HYadj, na.rm = TRUE), HYaveAdj = mean(HYadj, na.rm = TRUE)) %>% # annual covariance of adjusted HY and AHY
mutate(juvRatAdj = HYsumAdj/AHYsumAdj, # juvenile ratio per survey
juvRatVarAdj = (((HYvarAdj/(AHYaveAdj^2))+((HYaveAdj^2)*AHYvarAdj)/(AHYaveAdj^4))-(((2*HYaveAdj)*covAdj)/(AHYaveAdj^3)))/nSvy,
juvRatSEadj = juvRatVarAdj^(0.5))
### SAVE juvenile ratio results and statistics
write.csv(juvRatio_all, file = '~/WERC-SC/MAMU/juvRatio_2013-2018.csv',
row.names = FALSE)
write.csv(juvRatio_all_cor, file = '~/WERC-SC/MAMU/juvRatio_2013-1018_adj.csv',
row.names = FALSE)
# zig survey design
write.csv(juvRatio_all_zig, file = '~/WERC-SC/MAMU/juvRatio_2013-2018_zig.csv',
row.names = FALSE)
write.csv(juvRatio_all_cor_zig, file = '~/WERC-SC/MAMU/juvRatio_2013-1018_adj_zig.csv',
row.names = FALSE)
# 400 m survey design
write.csv(juvRatio_all_400, file = '~/WERC-SC/MAMU/juvRatio_2013-2018_400.csv',
row.names = FALSE)
write.csv(juvRatio_all_cor_400, file = '~/WERC-SC/MAMU/juvRatio_2013-1018_adj_400.csv',
row.names = FALSE)
|
90ecd7cb8d8f1a0d310b96bfafb29a5fbb0f4668
|
50a50158132409e19368ec09446e3405fbdafff0
|
/R/quantize.R
|
4b6993fdbc18f0b1dafe01b79e5754695369e1bb
|
[
"MIT"
] |
permissive
|
leonawicz/imgpalr
|
f48a017bea258c1058e6ca777f31f54b468de180
|
8d0fb8fc50d790ed776b576d13d20aa0770c494b
|
refs/heads/master
| 2023-05-23T12:11:32.478857
| 2021-02-20T23:09:06
| 2021-02-20T23:09:06
| 195,685,492
| 45
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,220
|
r
|
quantize.R
|
#' Quantize an image using an existing color palette
#'
#' Quantize image colors by mapping all pixels to the nearest color in RGB space, respectively, given an arbitrary palette.
#'
#' The palette \code{pal} does not need to be related to the image colors.
#' Each pixel will be assigned to whichever color in \code{pal} that it is nearest to in RGB space.
#' You can use \code{pal2} to remap to arbitrary colors after quantizing.
#' This function returns the new RGB array. You can plot a preview just like with \code{image_pal} using \code{plot = TRUE}.
#' The number of k-means centers \code{k} is for binning image colors prior to mapping the palette \code{pal}.
#' It is limited by the number of unique colors in the image. Larger \code{k} provides more binned distances between image colors and palette colors,
#' but takes longer to run.
#'
#' @param file if character, file path or URL to an image. You can also provide an RGB array from an already loaded image file.
#' @param pal character, vector of hex colors, the color palette used to quantize the image colors.
#' @param pal2 character, optional vector of hex colors, same length as \code{pal}.
#' After quantizing image to \code{pal}, you can subsequently remap \code{pal} to \code{pal2}.
#' @param k integer, the number of k-means cluster centers to consider in the image. See details.
#' @param plot logical, plot the palette with quantized image reference thumbnail. If \code{FALSE}, only return the RGB array.
#' @param show_pal logical, show the palette like with \code{image_pal}. If \code{FALSE}, plot only the image; all subsequent arguments ignored.
#' @param labels logical, show hex color values in plot.
#' @param label_size numeric, label size in plot.
#' @param label_color text label color.
#' @param keep_asp logical, adjust rectangles in plot to use the image aspect ratio.
#'
#' @return an RGB array with values ranging from 0 to 1
#' @export
#' @seealso \code{\link{image_pal}}
#'
#' @examples
#' x <- system.file("blue-yellow.jpg", package = "imgpalr")
#' pal <- c("black", "navyblue", "dodgerblue", "yellow")
#' pal2 <- c("darkred", "darkgreen", "tomato", "orange")
#'
#' a <- image_quantmap(x, pal, k = 7, plot = TRUE)
#' str(a)
#'
#' a <- image_quantmap(x, pal, pal2, k = 7, plot = TRUE)
image_quantmap <- function(file, pal, pal2 = NULL, k = 100, plot = FALSE,
show_pal = TRUE, labels = TRUE, label_size = 1,
label_color = "#000000", keep_asp = TRUE){
pal <- farver::decode_colour(pal)
if(!is.null(pal2)){
if(length(pal2) != nrow(pal)){
stop("`pal2` must have same length as `pal`.", call. = FALSE)
} else {
pal2 <- farver::decode_colour(pal2)
}
}
a <- if(is.character(file)) image_load(file) else file
dm <- dim(a)
f <- function(x, i) as.numeric(x[, , i])
d <- tibble::tibble(red = f(a, 1), green = f(a, 2), blue = f(a, 3))
nmax <- nrow(dplyr::distinct(d))
x <- suppressWarnings(kmeans(d, min(k, nmax), 30))
d <- tibble::as_tibble(255 * x$centers[x$cluster, ]) %>% dplyr::mutate(id = x$cluster)
d2 <- dplyr::distinct(d)
col_map <- tibble::as_tibble(pal)
f2 <- function(r, g, b){
x <- matrix(c(r, g, b, as.numeric(t(col_map))), ncol = 3, byrow = TRUE)
which.min(as.matrix(dist(x))[-1, 1])
}
d2 <- dplyr::rowwise(d2) %>%
dplyr::mutate(idx = f2(.data[["red"]], .data[["green"]], .data[["blue"]]))
idx <- dplyr::left_join(d, d2, by = c("red", "green", "blue", "id"))$idx
d <- col_map[idx, ]
pal <- farver::encode_colour(pal)
if(is.null(pal2)){
d <- as.matrix(d) / 255
} else {
d$hex <- farver::encode_colour(d)
d <- pal2[match(d$hex, pal), ] / 255
pal <- farver::encode_colour(pal2)
}
a <- simplify2array(list(matrix(d[, 1], dm[1]), matrix(d[, 2], dm[1]), matrix(d[, 3], dm[1])))
if(plot){
if(show_pal){
.view_image_pal(a, pal, labels, label_size, label_color, keep_asp)
} else {
opar <- par(mar = rep(0, 4), xaxs = "i", yaxs = "i")
on.exit(par(opar))
plot(0, 0, xlim = c(1, dm[2]), ylim = c(1, dm[1]), asp = 1, type = "n",
xlab = "", ylab = "", axes = FALSE)
rasterImage(a, 1, 1, dm[2], dm[1], interpolate = TRUE)
}
}
a
}
|
eeeaefa9d8c88a7b50b0517aa2e6357496bda26f
|
c739d6ead9a9521ed999935108cbfd68bdf14071
|
/R_kmooc2/3weeks/3-2.κ³Όμ .R
|
e613e11021e422259b9bf541869c025bf013b4a1
|
[] |
no_license
|
HeeSeok-Kwon/R_study
|
526489c31bcf6c3eddb1b6f9e483cd2fafe28ac0
|
796fa69d4653ccf877abcfe6e986efb80c12b741
|
refs/heads/main
| 2023-03-19T09:58:21.909280
| 2021-03-20T08:49:35
| 2021-03-20T08:49:35
| 349,654,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 247
|
r
|
3-2.κ³Όμ .R
|
#1
df <- data.frame(id=c(10,20,30,40,50),name=c("John","Tom","Paul","Jane","Grace"),
score=c(95,46,98,74,85))
df
#2
df$score
df[,3]
df[,'score']
df['score']
#3
df['id']
df['score']
#4
df[2:3,]
#5
df[2,3]
|
b5c112b522ba89b5e21b110b1319a9db68035776
|
b5da81c1aab3e4b5731c515d4544eea9d5c3dd4b
|
/R/ggplot_layers.R
|
aa473a04d4f61ba3defe8a4c6e8356c9e33290a3
|
[] |
no_license
|
mikeod38/dauergut
|
7e71818ae6933bef8ced064f892d4e4533d99af5
|
aa4596a83ea21b0661d61e8dd44465bdaf886a88
|
refs/heads/master
| 2021-01-19T23:01:11.064417
| 2018-04-25T22:23:33
| 2018-04-25T22:23:33
| 88,909,918
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,176
|
r
|
ggplot_layers.R
|
#' ggplot_layers
#'
#' adds layers to ggplots
#'
#' @section
#'
#' @param width optional width for median lines
#' @examples p <- ggplot(aes(x=genotype, y=pct)
#' p + add.scatter
#' @name ggplot_layers
NULL
#' @export
#' @rdname ggplot_layers
#'
add.scatter <- function() {
geom_quasirandom(aes(y=cell.norm),colour = "#339900", cex=1,
width = 0.075,size=0.3,
method = 'smiley')}
#' @export
#' @rdname ggplot_layers
add.median <- function(width) {
if(missing(width)) {
width = 0.25
} else {
width = width
}
stat_summary(aes(y=cell.norm),fun.y = median,
fun.ymin = median,
fun.ymax = median,
geom = "crossbar", width = width, lwd = 0.35)
}
#' @export
#' @rdname ggplot_layers
add.mean <- function(width) {
if(missing(width)) {
width = 0.25
} else {
width = width
}
stat_summary(aes(y=cell.norm),fun.y = mean,
fun.ymin = mean,
fun.ymax = mean,
geom = "crossbar", width = width, lwd = 0.35, colour = "white")
}
#' @export
#' @rdname ggplot_layers
add.median.dauer <- function(width) {
if(missing(width)) {
width = 0.25
} else {
width = width
}
stat_summary(aes(y=pct),fun.y = median,
fun.ymin = median,
fun.ymax = median,
geom = "crossbar", width = width, lwd = 0.35)
}
#' @export
#' @rdname ggplot_layers
add.quartiles <- function(width) {
if(missing(width)) {
stat_summary(aes(y=cell.norm),fun.y = median,
fun.ymin = function(z) {quantile(z,0.25)},
fun.ymax = function(z) {quantile(z,0.75)},
geom = "errorbar", width = 0.15, lwd = 0.15)
} else {
stat_summary(aes(y=cell.norm),fun.y = median,
fun.ymin = function(z) {quantile(z,0.25)},
fun.ymax = function(z) {quantile(z,0.75)},
geom = "errorbar", width = width, lwd = 0.15)
}
}
#' @export
#' @rdname ggplot_layers
figure.axes <- function() {
list(theme(axis.title.x = ggplot2::element_blank(),
axis.text.x = ggplot2::element_blank(),
axis.text.y = ggplot2::element_text(size = 15),
strip.text.x = ggplot2::element_blank()),
labs( title = NULL,
subtitle = NULL))
}
#' @export
#' @rdname ggplot_layers
add.n.categorical <- function() {
stat_summary(aes(x=as.numeric(as.factor(genotype)) + 0.3, y=0),
fun.data = fun_length, geom = "text", size = 3)
}
#' @export
#' @rdname ggplot_layers
add.n <- function() {
stat_summary(aes(x= temp + 0.3, y=0),
fun.data = fun_length, geom = "text", size = 3)
}
#' @export
#' @rdname ggplot_layers
add.Bayes.CI <- function() {
list(geom_errorbar(data=mixed, aes(x=x.pos,y=mean, ymin=lower.CL, ymax=upper.CL),
width=0,colour ="grey", lwd=0.15),
geom_errorbar(data=mixed, aes(x=x.pos,y=mean, ymin = lower.25, ymax = upper.75),
width=0,colour = "darkgrey", lwd = 0.15+0.7),
geom_segment(data = mixed, aes(x = x.pos-(0.009*nrow(mixed)),
y = mean, xend = x.pos+(0.009*nrow(mixed)),
yend = mean), colour = "darkgrey"))
}
#' @export
#' @rdname ggplot_layers
#alt to theme classic
theme_my_classic <- ggplot2::theme_classic() +
ggplot2::theme(axis.text.x=ggplot2::element_text(angle=45, hjust=1, size=12),legend.key = ggplot2::element_blank())
#' @export
#' @rdname ggplot_layers
#plotting theme I use for most plots
theme_my <- ggplot2::theme_bw() + ggplot2::theme(
axis.line = ggplot2::element_line(colour = "black"),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
panel.border = ggplot2::element_blank(),
strip.background = ggplot2::element_blank(),
legend.key = ggplot2::element_blank(),
axis.text.x=ggplot2::element_text(angle=45, hjust=1, size=12)
)
theme_black = function(base_size = 12, base_family = "") {
theme_classic(base_size = base_size, base_family = base_family) %+replace%
theme(
# Specify axis options
axis.line = element_line(colour = "white"),
axis.text.x = element_text(size = base_size*0.8, color = "white", lineheight = 0.9),
axis.text.y = element_text(size = base_size*0.8, color = "white", lineheight = 0.9),
axis.ticks = element_line(color = "white", size = 0.2),
axis.title.x = element_text(size = base_size, color = "white", margin = margin(0, 10, 0, 0)),
axis.title.y = element_text(size = base_size, color = "white", angle = 90, margin = margin(0, 10, 0, 0)),
axis.ticks.length = unit(0.3, "lines"),
# Specify legend options
legend.background = element_rect(color = NA, fill = "black"),
legend.key = element_rect(color = "white", fill = "black"),
legend.key.size = unit(1.2, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = base_size*0.8, color = "white"),
legend.title = element_text(size = base_size*0.8, face = "bold", hjust = 0, color = "white"),
legend.position = "right",
legend.text.align = NULL,
legend.title.align = NULL,
legend.direction = "vertical",
legend.box = NULL,
# Specify panel options
panel.background = element_rect(fill = "black", color = NA),
panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.spacing = unit(0.5, "lines"),
# Specify facetting options
strip.background = element_blank(), #(fill = "grey30", color = "grey10"),
strip.text.x = element_text(size = base_size*0.8, color = "white", face = "italic"),
strip.text.y = element_text(size = base_size*0.8, color = "white",angle = -90),
# Specify plot options
plot.background = element_rect(color = "black", fill = "black"),
plot.title = element_text(size = base_size*1.2, color = "white"),
plot.margin = unit(rep(1, 4), "lines")
)
}
|
7e5242918931401b0f7678c637231a0d46d63094
|
33fa7ba1b68ea3401812fa958bb4122d81098826
|
/PyTorch_UNetPP_wvlcDL/point_based_eval.R
|
d9020c4774a9a278499788adbd44e502b12a227e
|
[] |
no_license
|
carlos-alberto-silva/wvview_geodl_examples
|
7a415e3e52155804fb7a6c494cbba772aae21e7e
|
b70eb4540105fac07f6be5091ef97047b961b237
|
refs/heads/master
| 2023-06-21T08:56:26.064154
| 2021-07-28T14:27:14
| 2021-07-28T14:27:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 970
|
r
|
point_based_eval.R
|
# Read in libraries ===========================
library(dplyr)
library(terra)
library(caret)
library(rfUtilities)
# Read in data ===================================
lc <- rast("C:/Users/amaxwel6/Downloads/accuracy_assessment/accuracy_assessment/WV_Spectral_classes_NAIP_2016.tif")
val_pnts <- vect("C:/Users/amaxwel6/Downloads/accuracy_assessment/accuracy_assessment/validation_points.shp")
# Extract classification from raster at validation point locations ====================
lc_ext <- extract(lc, val_pnts, factor=FALSE)
lc_ext2 <- as.factor(as.character(lc_ext[,2]))
# Make data frame with references and predictions
val_df <- as.data.frame(val_pnts)
val_df$predicted <- lc_ext2
val_df$GrndTruth <- as.factor(as.character(val_df$GrndTruth))
# Generate confusion matrix with caret
cfOut <- confusionMatrix(data=val_df$predicted, reference=val_df$GrndTruth)
# Generate assessment metrics with rfUtilities
accOut <- accuracy(val_df$predicted, val_df$GrndTruth)
|
330041a3b5f8512cd3d94fa93336ce67dd30459a
|
63e353ffd3539c69fab7d816ca4ac5e4d8f13e7a
|
/MakeFig1.R
|
848b973db350b4c931a1488ef81231fdd55b60e5
|
[] |
no_license
|
cduron1/ETV5
|
5c8deee4d5fbebd32ec47784e80d558282cd9c18
|
8b780a13aed4b415691d667aed54d0d81fa75ec3
|
refs/heads/master
| 2020-04-16T01:32:42.768954
| 2019-01-11T04:15:53
| 2019-01-11T04:15:53
| 165,177,805
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,472
|
r
|
MakeFig1.R
|
# Need to run CompleteETV5Anlaysis.Rnw through calculation of
# data_thresh (line 250)
I1=which(V(miceDESeqComplex$tumorg)$name %in% rownames(data_thresh))
HighBetween = induced_subgraph(miceDESeqComplex$tumorg,I1)
title1 = 'High Betweeness Subgraph'
#L = layout.auto(normalmax)
#L = layout.reingold(normalmax)
L1 = layout.fruchterman.reingold(HighBetween,niter=500,area=vcount(HighBetween)^2.3, repulserad=vcount(HighBetween)^2.8)
dev.new()
graphname = paste('Graphs/',title1,'2','.png')
graphname=gsub(" ","",graphname,fixed=TRUE) # remove spaces from graph name
png(graphname,width = 1600, height = 900)
plot(HighBetween,layout = L1,vertex.size=30,vertex.label.dist = 0,
vertex.label.cex = .7, vertex.color='yellow',main = c(title1))
# make size of vertex proportional to betweeness
plot(HighBetween,layout = L,vertex.size=.00001*V(HighBetween)$between,
vertex.label.dist = 0,vertex.label.cex = .8,
vertex.color='yellow',main = c(title1))
dev.off()
# For the next bit must have run CompleteETV5Analysis.Rnw through line 405
Targs = tarGenesP$ETV5[c((tarGenesP$ETV5$padj < siglevel) & (!is.na(tarGenesP$ETV5$padj))),]
Tnames = Targs[,1]
I=which(V(miceDESeqComplex$tumorg)$name %in% Tnames)
E = which(V(miceDESeqComplex$tumorg)$name == "ETV5")
I = c(I,E)
colors = rep('yellow',length(I))
ETVtarg = induced_subgraph(miceDESeqComplex$tumorg,I)
colors[V(ETVtarg)$name == 'ETV5']='lavender'
L = layout.fruchterman.reingold(ETVtarg,niter=500,
area=vcount(ETVtarg)^2.3,
repulserad=vcount(ETVtarg)^2.8)
title = "ETV and targets subgraph"
dev.new()
graphname = paste('Graphs/',title,'2','.png')
graphname=gsub(" ","",graphname,fixed=TRUE)
png(graphname,width = 1600, height = 900)
plot(ETVtarg,layout = L,vertex.size=30,vertex.label.dist = 0,
vertex.label.cex = .7, vertex.color=colors,main = c(title))
# make size of vertex proportional to betweeness
plot(ETVtarg,layout = L,vertex.size=.00001*V(ETVtarg)$between,
vertex.label.dist = 0,vertex.label.cex = .8,
vertex.color='yellow',main = c(title))
dev.off()
dev.new()
title = "High Betweeness Nodes and ETV5 Targets"
graphname = paste('Graphs/',title,'2.pdf')
pdf(graphname,width = 7, height = 7)
I2 = c(I,I1)
colors = rep('yellow',length(I2))
HiPlusETVtarg = induced_subgraph(miceDESeqComplex$tumorg,I2)
J = which(V(HiPlusETVtarg)$name %in% rownames(data_thresh))
colors[J]=rep('pink',length(rownames(data_thresh)))
colors[V(HiPlusETVtarg)$name == 'ETV5']='lavender'
L2 = layout.auto(HiPlusETVtarg)
L2 = layout.fruchterman.reingold(HiPlusETVtarg, weights = 2*E(HiPlusETVtarg)$weight)
# L2 = layout_nicely(HiPlusETVtarg)
# L2 = layout.fruchterman.reingold(HiPlusETVtarg,niter=500,
# area=vcount(HiPlusETVtarg)^2.3,
# repulserad=vcount(HiPlusETVtarg)^2.8)
plot(HiPlusETVtarg,layout = L2,vertex.size=22,vertex.label.dist = 0,
vertex.label.cex = .7, vertex.color=colors,main = c(title),cex.main = 1.1)
dev.off()
# For a high-resolution tiff, used in PLosOne paper
#dev.new()
#tiff("Fig4.tiff", width = 6, height = 5, units = 'in', res = 500, compression = 'lzw')
L2 = layout.fruchterman.reingold(HiPlusETVtarg, weights = 3*E(HiPlusETVtarg)$weight)
tiff("Graphs/Fig4_3.tiff", width = 6, height = 5, units = 'in', res = 800)
plot(HiPlusETVtarg,layout = L2,vertex.size=16,vertex.label.dist = 0,
vertex.label.cex = .35, vertex.color=colors)
dev.off()
|
f8f6fd8c8def78413c1be9bad6d2468f9906b913
|
4a0350925583ac1cec0a424ae06e0f35af75ac75
|
/misc/tcga_functions.R
|
7309cd71f9e45103e7889d876a83950b15098570
|
[] |
no_license
|
apatil1/MethylAnalyser
|
225b80acb33e993ec337c4b1c713d485043d798c
|
cd437bc37d91201a2a27a9225744203bc27216db
|
refs/heads/master
| 2021-01-19T21:28:03.022713
| 2015-07-06T14:42:44
| 2015-07-06T14:42:44
| 38,624,366
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,471
|
r
|
tcga_functions.R
|
#Function to read level 3 data from TCGA methylation data. Returns table of beta values combining all the samples.
read_level3 <- function(path, type) {
file <- list.files(path) #gets list of files
#write.csv(file, paste(type,".csv", sep=""))
#reads data from all the files
for (i in 1:length(file)) {
read <- read.table(paste(path, file[i], sep=""), sep="\t", header=T, skip=1)
if (i==1) {
data<-cbind(read[,2])
rownames(data)<-read[,1]
col<-paste("s", i, sep="")
}
else {
data<-cbind(data,read[,2])
col<-append(col,paste("s", i, sep=""))
}
}
colnames(data)<-col
#data <- na.omit(data) #omit all NA elements
head(data)
dim(data)
return (data)
}
#Function to create list of samples
sample_type <- function(file) {
f <- read.csv(file)
for (i in 1:length(f$x)) {
#get sample infromation from TCGA bar code
ch1 <- strsplit(as.character(f$x[i]), "[.]")[[1]][6]
ch2 <- substr(ch1, 14, 15)
if (ch2=="11") { #normal sample
if (i==1) {
sample <- 0
} else {
sample <- append(sample, "0")
}
} else if (ch2=="01") { #primary solid tumor
if (i==1) {
sample <- 1
} else {
sample <- append(sample, "1")
}
} else if (ch2=="02") { #recurrent solid tumor
if (i==1) {
sample <- 1
} else {
sample <- append(sample, "1")
}
}
}
head(sample)
return(sample)
}
#Function to plot heatmap
heat_tcga <- function(x, plotName, title) {
library(gplots)
jpeg(plotName, width = 800, height = 800, quality=100)
heatmap.2(as.matrix(x), col=heat.colors(16), trace="none",labCol=NA,density.info="none", hclustfun = function(x)hclust(x, method = "ward"),labRow=NA, margins=c(10,10), cexRow=0.8, cexCol=1.2, key=T, ColSideColors=sample, main=title)
dev.off()
}
heat_tcga_col <- function(x, plotName, title) {
library(gplots)
jpeg(plotName, width = 800, height = 800, quality=100)
heatmap.2(as.matrix(x), col=heat.colors(16),Rowv=FALSE, trace="none",density.info="none",labRow=NA, margins=c(10,10), cexRow=0.8, cexCol=1.2, key=T, ColSideColors=sample, main=title)
dev.off()
}
heat_tcga_scale <- function(x, plotName, title) {
library(gplots)
#breaks for the core of the distribution
breaks=seq(-4, 4, by=0.2) #41 values
#now add outliers
breaks=append(breaks, 10)
breaks=append(breaks, -10, 0)
#create colour panel with length(breaks)-1 colours
mycol <- colorpanel(n=length(breaks)-1,low="green",mid="black",high="red")
jpeg(plotName, width = 800, height = 800, quality=100)
#scale
heatmap.2(as.matrix(x), col=mycol,density.info="histogram",labCol=NA, breaks=breaks, trace="none",labRow=NA, hclustfun = function(x)hclust(x, method = "ward"), margins=c(10,10), cexRow=0.8, cexCol=1.2, key=T, ColSideColors=sample, main=title, scale="row")
dev.off()
}
#Functions to create 2 venn diagrams and genelist
venn2_all <- function(x,y, l, filename) {
png(paste(filename,".jpg",sep=""),width=500,height=500)
venn_hyper <- venndiagram(x, y, unique=T, title=title, labels=l, lines=1, lcol=1, tcol=1, diacol=1, plot=T, type="2", printsub=TRUE)
dev.off()
ff(venn_hyper, filename)
}
#Functions to create 3 venn diagrams and genelist
venn3_all <- function(x,y,z, l, filename) {
png(paste(filename,".jpg",sep=""),width=1000,height=1000)
venn_hyper <- venndiagram(x, y, z, unique=T, title=title, labels=l, lines=1, lcol=1, tcol=1, diacol=1, plot=T, type="3", printsub=TRUE)
dev.off()
ff(venn_hyper, filename)
}
#Function makes genelist from venn diagram
ff <- function(venn, title) {
Dir <- paste(getwd(),"/", sep="")
subDir_file <- title
#Create directory for plots and files generated from the analysis
dir.create(file.path(Dir, subDir_file), showWarnings = FALSE)
write(venn$q1,paste(Dir, subDir_file,"/","q1.txt", sep=""))
write(venn$q2,paste(Dir, subDir_file,"/","q2.txt", sep=""))
write(venn$q3,paste(Dir, subDir_file,"/","q3.txt", sep=""))
write(venn$q4,paste(Dir, subDir_file,"/","q4.txt", sep=""))
write(venn$q5,paste(Dir, subDir_file,"/","q5.txt", sep=""))
write(venn$q6,paste(Dir, subDir_file,"/","q6.txt", sep=""))
write(venn$q7,paste(Dir, subDir_file,"/","q7.txt", sep=""))
}
|
2a056e1eae6b5a2665d87c22db3d571cd1c670f2
|
0f530de60e70ff81f4d3d43a6f4bb5ebbabd0cb0
|
/R/pleiotropy-2traits.R
|
5ee0ba67bc1ee4132b47f0be1476f24316d9a1dd
|
[
"MIT"
] |
permissive
|
fboehm/QTLfigs
|
62b2b266b6593eadfb94f3cff697c29b6953b83f
|
f3a0a6843c784698c5f930c0210a2426220a9022
|
refs/heads/master
| 2020-04-17T13:21:46.684913
| 2019-04-15T19:48:06
| 2019-04-15T19:48:06
| 166,612,446
| 0
| 0
| null | 2019-01-20T01:44:04
| 2019-01-20T01:44:03
| null |
UTF-8
|
R
| false
| false
| 807
|
r
|
pleiotropy-2traits.R
|
library(broman)
bgcolor <- brocolors("bg")
iArrows <- igraph:::igraph.Arrows
par(fg="white",col="white",col.axis="white",col.lab="white",col.main="white",
bg=bgcolor, mar=rep(0.1, 4), bty="n")
plot(0,0, xaxt="n", yaxt="n", xlab="", ylab="", type="n",
xlim=c(-16-2/3, 150), ylim=c(0, 100), xaxs="i", yaxs="i")
x <- c(25, 75)
xmid <- 50
y <- seq(10, 90, len=5)
text(x[1], y[3], expression(Q))
text(x[2], y[2], expression(Y[2]))
#text(x[2], y[3], expression(Y[2]))
text(x[2], y[4], expression(Y[1]))
arrowcol <- brocolors("crayons")["Cerulean"]
xd <- 8
yd <- 3.5
arrowlwd <- 5
arrowlen <- 0.3
arrows(x[1]+xd, y[3] - yd, x[2] - xd, y[2] + yd, lwd=arrowlwd,
col=arrowcol, len=arrowlen)
arrows(x[1]+xd, y[3] + yd, x[2] - xd, y[4] - yd, lwd=arrowlwd,
col=arrowcol, len=arrowlen)
dev.off()
|
dbc35cfc7acd6b4bbfcb22805eb485e3c9dea17f
|
68cc383997f56bc65b22ce7c0fd37409da054ce9
|
/plot2.R
|
59ebfd7a38db345f07d74ada0b338d923c30582e
|
[] |
no_license
|
andcos/ExData_Plotting1
|
75466ab732fea73361ed1f06c96b7d1692d2a4f5
|
78240a97da346031249fb8d39408a7341ab8a138
|
refs/heads/master
| 2020-12-25T13:33:53.056844
| 2015-02-08T08:05:06
| 2015-02-08T08:05:06
| 30,464,271
| 0
| 0
| null | 2015-02-07T17:52:56
| 2015-02-07T17:52:55
| null |
UTF-8
|
R
| false
| false
| 694
|
r
|
plot2.R
|
##========================================================
## FILE 3
##========================================================
##PLOT2
##========================================================
## NOTE
## My system environment is set to German, therefore the
## weekdays are written in German.
## Do = Thursday
## Fr = Friday
## Sa = Saturday
##========================================================
load("load_energydata.R")
explore_plot2 <- function()
{
## save PNG format
png("plot2.png", width=480,height=480)
## create plot
plot(energydata$Time,energydata$Global_active_power,
type='l',xlab=" ",ylab="Global active Power (kilowatts)")
dev.off()
}
|
ba3a05b4a89bd7053022da39206716b9230b687f
|
fce46ef9ed1e6471e40bfaad9ba5ac5299c9ca5d
|
/man/show_ractive.Rd
|
4f2616446d89a921bb533b01eef49310761ca4ba
|
[] |
no_license
|
timelyportfolio/clickme
|
b73a630026006483428c4a1c304a2dd3260c2ff3
|
2781a88a4beb1ca0624bece95b1e5d1017d2970e
|
refs/heads/master
| 2021-01-25T00:29:02.888994
| 2019-04-20T22:54:04
| 2019-04-20T22:54:04
| 9,228,937
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
rd
|
show_ractive.Rd
|
\name{show_ractive}
\alias{show_ractive}
\title{Get information about a ractive}
\usage{
show_ractive(ractive, fields = NULL)
}
\arguments{
\item{ractive}{ractive name}
\item{fields}{any of the fields in template_config.yml}
}
\description{
Get information about a ractive
}
|
fc610174d5f8e28ae5dd239d1ef5d948f38b5e01
|
ec645e548b35202da0be095d6f1795ceea2d0c8b
|
/Plot6.R
|
fa7257e1e914f1a86382f1d96c7067fed1ce27c1
|
[] |
no_license
|
JahlaJazz/EDA_Proj2
|
3b25eb9e219fe012852e943abefee0ca9e197b08
|
fc046a94d4d00f3d13833c18ff51868bf8773771
|
refs/heads/master
| 2021-01-20T22:28:14.550157
| 2016-06-17T16:10:09
| 2016-06-17T16:10:09
| 61,383,394
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,214
|
r
|
Plot6.R
|
# Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources in
# Los Angeles County, California (fips == "06037"). Which city hss seen greater changes over time in motor vehicle emissions?
library(dplyr)
# read the relevant data for years 1999, 2002, 2005 and 2008
NEI <- readRDS("summarySCC_PM25.rds")
Balt <- NEI[NEI$fips=="24510",]
Balt$City <- rep("Baltimore", nrow(Balt))
LA <- NEI[NEI$fips=="06037",]
LA$City <- rep("Los Angeles", nrow(LA))
NEI <- rbind(Balt,LA)
names(NEI)[4] <- "pm"
NEI$year <- as.factor(NEI$year)
NEI$City <- as.factor(NEI$City)
SCC <- readRDS("Source_Classification_Code.rds")
SCC <- SCC[SCC$Data.Category == "Onroad" ,c(1:4,7:10)]
both <- intersect(NEI$SCC,SCC$SCC)
NEI.SCC <- merge(NEI,SCC, by = both, by.x = "SCC", by.y = "SCC")
# open a png device and create graph
png(file = "Plot6.png", width = 600, height = 480)
tt <- ggplot(NEI.SCC, aes(year, pm, colour = factor(City))) + geom_point() + facet_grid(. ~City)
tt <- tt + ggtitle("Motor Vehicle Emission in Baltimore vs Los Angeles from 1999 - 2008")
tt <- tt + xlab("Year") + ylab("Emission")
print(tt)
# close the graphic device
dev.off()
|
291b1552bc4f9d4ec8a556ec64f8a8fe5594ed85
|
c1a50c3baaaa559c0ba49490e219371b5b614e0d
|
/02-basic stats.R
|
5099f8619137809d23f2c2df650be820aa309458
|
[] |
no_license
|
adambondarzewski/titanic
|
49c06b04c65734349baf57b1a4131b215fd6ca39
|
3f0471ebda5601cce7c72869785423cd5671add4
|
refs/heads/master
| 2021-01-20T20:18:46.888186
| 2016-07-10T15:10:42
| 2016-07-10T15:10:42
| 62,999,118
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
02-basic stats.R
|
#data structure
str(DT_train)
str(DT_test)
# Survival rates in absolute numbers
table(DT_train$Survived)
# Survival rates in proportions
prop.table(table(DT_train$Survived))
# Two-way comparison: Sex and Survived
table(DT_train$Sex, DT_train$Survived)
# Two-way comparison: row-wise proportions
prop.table(table(DT_train$Sex, DT_train$Survived),1)
is_child=15
# Create the column child, and indicate whether child or no child
DT_train$Child <- NA
DT_train$Child[DT_train$Age < is_child] <-1
DT_train$Child[DT_train$Age >= is_child] <-0
DT_test$Child <- NA
DT_test$Child[DT_test$Age < is_child] <-1
DT_test$Child[DT_test$Age >= is_child] <-0
# Two-way comparison
prop.table(table(DT_train$Child, DT_train$Survived))
|
4626c32d6a523e471844722a3c56105bf94abf8f
|
a5a1dfa861d42495ea1013c42f8edd460ca89561
|
/160715/find_housekeeping_genes.2.R
|
b404caf2ea3a5409abd8e7b4343b130ce241f7bf
|
[] |
no_license
|
chanibravo/hcasmc_eqtl
|
4cca8abab75d63196d005294bf42d291128fac24
|
0c4f25b5a336349d8e590f2ac357ce0519e16032
|
refs/heads/master
| 2021-09-20T05:52:29.831159
| 2018-08-05T05:23:01
| 2018-08-05T05:23:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,422
|
r
|
find_housekeeping_genes.2.R
|
# find housekeeping genes with mean and variance analysis:
# libraries:
source('/srv/persistent/bliu2/HCASMC_eQTL/scripts/utils.R')
# read input:
hcasmc_file='/srv/persistent/bliu2/HCASMC_eQTL/processed_data/160519_rpkm/combined.rpkm'
hcasmc=read.table(hcasmc_file,header=T,check.names=F)
hcasmc_sub=subsample_gct(hcasmc,10)
res=decompose_gct(hcasmc_sub)
# inititilize:
master_count=res$count
master_col_data=data.frame(res$col_data,tissue='HCASMC')
master_row_data=res$row_data
gtex_files=list.files('/srv/persistent/bliu2/HCASMC_eQTL/data/gtex/v6p/subsampling',pattern='*.10.rpkm',recursive=T,full.name=T)
for (gtex_file in gtex_files){
tissue=str_replace(basename(gtex_file),'_subsample.10.rpkm','')
message(tissue)
count=read.table(gtex_file,header=T,check.names=F)
res=decompose_gct(count)
count=res$count
col_data=res$col_data
row_data=res$row_data
col_data$tissue=tissue
# sanity check:
stopifnot(setequal(row_data$Name,master_row_data$Name))
stopifnot(length(unique(row_data$Name))==length(master_row_data$Name))
# reorder the rows of count:
idx=match(rownames(master_count),rownames(count))
count=count[idx,]
row_data=row_data[idx,]
stopifnot(row_data$Name==master_row_data$Name)
stopifnot(row.names(count)==row.names(master_count))
# append to master:
master_col_data=rbind(master_col_data,col_data)
master_count=data.frame(master_count,count,check.names=F)
}
# merge count table and col_data to facilitate melting:
master_count_t=data.frame(t(master_count))
master_count_t$sample=rownames(master_count_t)
merged=merge(master_col_data,master_count_t,by='sample')
# melt merged data to facilitate filtering:
melted=melt(merged,id=c('sample','tissue'),variable.name='gene_id',value.name='rpkm')
melted=melted%>%group_by(gene_id,tissue)%>%mutate(median=median(rpkm))
melted=melted%>%dplyr::select(tissue,gene_id,median)%>%unique()
melted=as.data.table(melted)
# calculate log median rpkm and sd:
melted[,logmedian:=log2(median+1)]
melted[,sd:=sd(logmedian),by='gene_id']
# apply sd filter, expression filter, and log fold change filter:
filtered=melted[sd<1,]
filtered[,min:=min(median),by='gene_id']
filtered=filtered[min>0,]
filtered[,avg:=mean(logmedian),by='gene_id']
filtered[,maxdiff:=max(logmedian)-avg,by='gene_id']
filtered=filtered[maxdiff<1,]
# get and save housekeeping gene names:
hk_genes=unique(filtered$gene_id)
hk_genes=hcasmc[hcasmc$Name%in%hk_genes,c('Name','Description')]
write.table(hk_genes,'/srv/persistent/bliu2/HCASMC_eQTL/processed_data/160715/hk_genes.txt',quote=F,sep='\t',row.names=F)
# plot a housekeeping gene:
hk_gene_exp=master_count[rownames(master_count)==as.character(hk_genes[1,1]),]
stopifnot(master_col_data$sample==colnames(hk_gene_exp))
pdf('/srv/persistent/bliu2/HCASMC_eQTL/figures/160715/read_counts_for_hk_gene.pdf')
to_plot=data.frame(tissue=master_col_data$tissue,rpkm=unlist(hk_gene_exp))
ggplot(to_plot,aes(tissue,rpkm))+geom_boxplot()+theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))
dev.off()
# plot a random non housekeeping gene:
non_hk_gene_exp=master_count[which(!rownames(master_count)%in%hk_genes[,1])[2],]
pdf('/srv/persistent/bliu2/HCASMC_eQTL/figures/160715/read_counts_for_non_hk_gene.pdf')
to_plot=data.frame(tissue=master_col_data$tissue,rpkm=unlist(non_hk_gene_exp))
ggplot(to_plot,aes(tissue,rpkm))+geom_boxplot()+theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))
dev.off()
|
d48e4aa4d58343b8b6e050bfc5f639e14778bc0f
|
64c71251ad5df390e5954c3919d7c1fc10a443bb
|
/1.Data_assembly.R
|
a81e2c6c6183b62e7e620ef695c7fca59a5b20d3
|
[] |
no_license
|
robwschlegel/Trend_Analysis
|
767631a11f88b2610f43a4e3a3a1f2f417422b53
|
e84da681b67e880d374e9ced12965207cd7f39ea
|
refs/heads/master
| 2021-06-11T13:05:51.885285
| 2017-01-29T12:57:44
| 2017-01-29T12:57:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,620
|
r
|
1.Data_assembly.R
|
# This script takes the SACTN data and assembles two expanded data files:
# 1. First the data are detrended.
# 2. No interpolation takes place to fill missing temperatures.
# 3. It grows the dataset to four levels of precision and five decadal trends.
# 4. Produces one set of only the 'natural' lengths (the lengths of the time series
# in the SACTN).
# 5. Produces another set fully 'grown' (each series greater than/equal to 5 years
# in length is replicated at lengths from 5 yrs to the natural length, at one
# year increments starting from 5 yrs).
# load all packages required for the analyses -----------------------------
# probably some are superfluous
library(zoo)
library(lubridate)
library(plyr)
library(dplyr)
library(tidyr)
library(tibble)
# load the 84 time series -------------------------------------------------
load("data/SACTN_sub2.Rdata")
colnames(SACTN_sub2)[8] <- "na_perc" # your woes started because you use "NA%" as a column name
SACTN_sub2 <- as.data.frame(SACTN_sub2)
# add necessary info for models -------------------------------------------
SACTN_flat_no_interp <- SACTN_sub2 %>%
filter(complete.cases(temp)) %>%
group_by(index) %>%
mutate(time = as.numeric(date)) %>%
mutate(year = year(date)) %>%
mutate(month = month(date)) %>%
mutate(num = 1:length(date)) # to use as regressor
# detrend -----------------------------------------------------------------
resids <- ldply(dlply(SACTN_flat_no_interp, .(site, src, index),
function(df) as.numeric(residuals(lm(temp ~ time, data = df, na.action = na.omit)))), data.frame)
colnames(resids)[4] <- "residuals"
SACTN_flat_no_interp$temp <- resids$residuals
save(SACTN_flat_no_interp, file = "data/SACTN_flat_no_interp.Rdata")
# load("data/SACTN_flat_no_interp.Rdata")
# "grow" the time series --------------------------------------------------
SACTN_grown_no_interp <- data.frame()
for(i in 1:length(levels(SACTN_flat_no_interp$index))){
data1 <- data.frame(droplevels(subset(SACTN_flat_no_interp, index == levels(SACTN_flat_no_interp$index)[i])))
if(length(data1$year[data1$year == levels(as.factor(data1$year))[1]]) < 12){
data1 <- droplevels(subset(data1, year != levels(as.factor(data1$year))[1]))
}
if(length(data1$year[data1$year == levels(as.factor(data1$year))[length(levels(as.factor(data1$year)))]]) < 12){
data1 <- droplevels(subset(data1, year != levels(as.factor(data1$year))[length(levels(as.factor(data1$year)))]))
}
if(length(levels(as.factor(data1$year))) < 5){
} else {
for(j in 5:length(levels(as.factor(data1$year)))){
data2 <- droplevels(subset(data1, year %in% levels(as.factor(data1$year))[1:j]))
data2$year_index <- j
SACTN_grown_no_interp <- rbind(SACTN_grown_no_interp, data2)
}
}
}
save(SACTN_grown_no_interp, file = "data/SACTN_grown_no_interp.Rdata")
# load("data/SACTN_grown_no_interp.Rdata")
# load("data/SACTN_grow_interp.Rdata")
# Expand data to show different DT and precision --------------------------
# Uncomment as follows:
# 1. SACTN_flat_no_interp to expand the flat, uninterpolated, natural data; or
# 2. SACTN_grown_no_interp to expand the flat, uninterpolated, grown data
SACTN_full_natural_no_interp <- SACTN_flat_no_interp %>% # 1.
# SACTN_full_grown_no_interp <- SACTN_grown_no_interp %>% # 2.
group_by(site, src, type) %>%
mutate(time = as.numeric(date)) %>%
ddply(.(site, src), mutate, DT000 = (seq(0, by = (0.00 / 120), length.out = length(date))) + temp) %>%
ddply(.(site, src), mutate, DT005 = (seq(0, by = (0.05 / 120), length.out = length(date))) + temp) %>%
ddply(.(site, src), mutate, DT010 = (seq(0, by = (0.10 / 120), length.out = length(date))) + temp) %>%
ddply(.(site, src), mutate, DT015 = (seq(0, by = (0.15 / 120), length.out = length(date))) + temp) %>%
ddply(.(site, src), mutate, DT020 = (seq(0, by = (0.20 / 120), length.out = length(date))) + temp) %>%
select(-temp) %>%
gather(DT, value = temp, DT000, DT005, DT010, DT015, DT020) %>%
ddply(.(site, src), mutate, prec0001 = round_any(temp, 0.001)) %>%
ddply(.(site, src), mutate, prec001 = round_any(temp, 0.01)) %>%
ddply(.(site, src), mutate, prec01 = round_any(temp, 0.1)) %>%
ddply(.(site, src), mutate, prec05 = round_any(temp, 0.5)) %>%
select(-temp) %>%
gather(prec, value = temp, prec0001, prec001, prec01, prec05) %>%
tbl_df()
save(SACTN_full_natural_no_interp, file = "data/SACTN_full_natural_no_interp.Rdata")
save(SACTN_full_grown_no_interp, file = "data/SACTN_full_grown_no_interp.Rdata")
# load("data/SACTN_full_natural_no_interp.Rdata")
# load("data/SACTN_full_grown_no_interp.Rdata")
|
8d5eae2d2157afe8f86a02ac1f60836a66a0daba
|
ce4e30b45edb6ca97abfbc6ba50696e0fed3ed97
|
/cachematrix.R
|
f19fc1a96ebe01c72926a6cf382d36c027af8e4c
|
[] |
no_license
|
vasilBonev/ProgrammingAssignment2
|
2d27fc73b7aafc7f96cdd34c9f17a50ab424a4b5
|
9921bc8c3d690e10849245a48b9a6d0d1f7ce0ad
|
refs/heads/master
| 2021-01-20T16:28:44.234529
| 2015-01-24T10:11:53
| 2015-01-24T10:11:53
| 29,702,073
| 0
| 0
| null | 2015-01-22T21:49:05
| 2015-01-22T21:49:05
| null |
UTF-8
|
R
| false
| false
| 1,631
|
r
|
cachematrix.R
|
## Functions below are used to calculate the inverse of a given matrix x.
## Since matrix inversion is seen as costly computation there are benefits in caching the results
## instead of doing the calculation each and every time. The following two functions are used to calculate
## the inverse of a matrix
## makeCacheMatrix function should be called first prior to cacheSolve function
## This function is used to create a list of functino in order to do:
## 1. Initialize a variable m to store the inverse of matrix latter
## 2. Provide function get () to retrieve the raw matrix
## 3. Provides funntinos getinverse and setinverse which are used in order
## set the value of m to x (inverse of the matrix) and get the computed inverse of matrix x
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
# returns a R object which is a list of functions
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## This function returns the inverse of the matrix. It first check if the inverse
## has already been calculated. If so it get the result and prints it, if not it computes
## it and sets the inverse in the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("Cached matrix found.")
return(inv)
}
else {
message("No cached matrix found. Calculating inverse matrix...")
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
}
|
ea3a5a63377287d26fb7b27a2748611d8377192f
|
4c50e336c95095ce3fac4e6333fc3a83db35dbc6
|
/man/create_Points.Rd
|
c790ac0d29bd8e14433e284452f3208d7c72f230
|
[] |
no_license
|
rsbivand/CCAMLRGIS
|
ee0a55cda86401d7904f86b03ee8c8b27f3c2006
|
8fd07db6efbab3983deeb5ebf260afb80be62782
|
refs/heads/master
| 2020-11-26T09:25:29.588205
| 2020-05-25T10:38:54
| 2020-05-25T10:38:54
| 229,028,616
| 0
| 0
| null | 2019-12-19T10:14:41
| 2019-12-19T10:14:40
| null |
UTF-8
|
R
| false
| true
| 3,059
|
rd
|
create_Points.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.R
\name{create_Points}
\alias{create_Points}
\title{Create Points}
\usage{
create_Points(
Input,
OutputFormat = "ROBJECT",
OutputName = NULL,
Buffer = 0,
Clip = FALSE,
SeparateBuf = TRUE
)
}
\arguments{
\item{Input}{the name of the \code{Input} data as a .csv file or an R dataframe.
If a .csv file is used as input, this file must be in your working directory and its name given in quotes
e.g. "DataFile.csv".
\strong{The columns in the \code{Input} must be in the following order:
Latitude, Longitude, Variable 1, Variable 2, ... Variable x}}
\item{OutputFormat}{can be an R object or an ESRI Shapefile. if \code{OutputFormat} is specified as
"ROBJECT" (the default), a spatial object is created in your R environment.
if \code{OutputFormat} is specified as "SHAPEFILE", an ESRI Shapefile is exported in
your working directory.}
\item{OutputName}{if \code{OutputFormat} is specified as "SHAPEFILE", the name of the output
shapefile in quotes (e.g. "MyPoints") must be provided.}
\item{Buffer}{Radius in nautical miles by which to expand the points. Can be specified for
each point (as a numeric vector).}
\item{Clip}{if set to TRUE, polygon parts (from buffered points) that fall on land are removed (see \link{Clip2Coast}).}
\item{SeparateBuf}{If set to FALSE when adding a \code{Buffer},
all spatial objects are merged, resulting in a single spatial object.}
}
\value{
Spatial object in your environment or ESRI shapefile in your working directory.
Data within the resulting spatial object contains the data provided in the \code{Input} plus
additional "x" and "y" columns which corresponds to the projected points locations
and may be used to label points (see examples).
To see the data contained in your spatial object, type: \code{View(MyPoints@data)}.
}
\description{
Create Points to display point locations. Buffering points may be used to produce bubble charts.
}
\examples{
\donttest{
#Example 1: Simple points with labels
MyPoints=create_Points(PointData)
plot(MyPoints)
text(MyPoints$x,MyPoints$y,MyPoints$name,adj=c(0.5,-0.5),xpd=TRUE)
#Example 2: Simple points with labels, highlighting one group of points with the same name
MyPoints=create_Points(PointData)
plot(MyPoints)
text(MyPoints$x,MyPoints$y,MyPoints$name,adj=c(0.5,-0.5),xpd=TRUE)
plot(MyPoints[MyPoints$name=='four',],bg='red',pch=21,cex=1.5,add=TRUE)
#Example 3: Buffered points with radius proportional to catch
MyPoints=create_Points(PointData,Buffer=0.5*PointData$Catch)
plot(MyPoints,col='green')
text(MyPoints$x,MyPoints$y,MyPoints$name,adj=c(0.5,0.5),xpd=TRUE)
#Example 4: Buffered points with radius proportional to catch and clipped to the Coast
MyPoints=create_Points(PointData,Buffer=2*PointData$Catch,Clip=TRUE)
plot(MyPoints,col='cyan')
plot(Coast[Coast$ID=='All',],add=TRUE,col='grey')
}
}
\seealso{
\code{\link{create_Lines}}, \code{\link{create_Polys}}, \code{\link{create_PolyGrids}},
\code{\link{create_Stations}}, \code{\link{add_RefGrid}}.
}
|
989de5369248c6d46e6a20a6548b265d9a851522
|
fbf1d063ad668c576f6f098df532869d15f33369
|
/scripts/summarize_rnaseqreads_byexon.R
|
7cb8de12268ae270ad70e03b274ac5722bc89093
|
[] |
no_license
|
jrflab/modules
|
b2b061d6a50e06ebf7a03fca18e28bbc8de27908
|
dfc6133817ae26b035bf742fc840868249196489
|
refs/heads/master
| 2023-07-19T11:29:50.802907
| 2023-07-11T16:03:43
| 2023-07-11T16:03:43
| 10,337,846
| 17
| 10
| null | 2021-08-13T22:47:06
| 2013-05-28T14:45:57
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 3,632
|
r
|
summarize_rnaseqreads_byexon.R
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("GenomicFeatures"))
suppressPackageStartupMessages(library("Rsamtools"))
suppressPackageStartupMessages(library("GenomicAlignments"))
suppressPackageStartupMessages(library("TxDb.Hsapiens.UCSC.hg19.knownGene"))
suppressPackageStartupMessages(library("org.Hs.eg.db"))
optionList <- list(
make_option('--genome', action='store', default = 'b37', help = 'genome to use [%default]'),
make_option(c('-o', '--outFile'), action='store', default = NULL, help = 'output file'))
posArgs <- c('bamFile')
parser <- OptionParser(usage = paste('%prog [options]', paste(posArgs, collapse=' ')), option_list=optionList)
arguments <- parse_args(parser, positional_arguments = TRUE)
opt <- arguments$options
if (length(arguments$args) != length(posArgs)) {
print_help(parser)
print(arguments$args)
stop('Incorrect number of required positional arguments')
} else if (is.null(opt$outFile)) {
cat("Need output file\n");
print_help(parser);
stop();
} else {
cmdArgs <- arguments$args
for (i in 1:length(cmdArgs)){
assign(posArgs[i], cmdArgs[i])
}
outFile <- opt$outFile
}
if (FALSE) {
opt <- list('addChr' = TRUE, 'geneListFile' = NULL)
txdbFile <- '~/ensg69.biomart.13012013.sqlite'
bamFile <- '~/share/data/DLBCL/WTSS/bam/HS0653.bam'
outFile <- 'tmp.txt'
}
print("Loading txdb ")
if (opt$genome == "b37" || opt$genome == "hg19" || opt$genome == "GRCh37") {
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene
} else {
cat("Unsupported genome\n")
print_help(parser);
stop();
}
print('... Done')
allExons <- exons(txdb, columns = c('gene_id', 'exon_id', 'exon_name'))
print('Removing chr from chromosome names')
newSeqNames <- sub('chr', '', seqlevels(allExons))
names(newSeqNames) <- seqlevels(allExons)
allExons <- renameSeqlevels( allExons, newSeqNames )
cat("Reading", bamFile, " ... ")
si <- seqinfo(BamFile(bamFile))
gr <- GRanges(seqnames(si), IRanges(100, seqlengths(si)-100))
scf <- scanBamFlag( isDuplicate = FALSE )
reads <- readGappedReads( bamFile, param = ScanBamParam( which = gr, flag = scf ) )
cat('Finished\n')
print('Count raw exon read counts ...')
summarizedExpt <- summarizeOverlaps(allExons, reads)
countsForExons <- as.numeric( assays(summarizedExpt)$counts )
names(countsForExons) <- rownames(summarizedExpt)
print('... Done')
print('Generating expression values ...')
numBases <- width(allExons)
numKBases <- numBases / 1000
millionsMapped <- sum(countsForExons) / 10^6
rpm <- countsForExons / millionsMapped
rpkm <- rpm / numKBases
print('... Done')
print('Retrieving annotation data ...')
annotDf <- values(allExons)
print('...Done')
index = unlist(lapply(as.vector(annotDf[, 'gene_id']), length)==0)
annotDf[index,"gene_id"] = NA
index = unlist(lapply(as.vector(annotDf[, 'exon_id']), length)==0)
annotDf[index,"exon_id"] = NA
index = unlist(lapply(as.vector(annotDf[, 'exon_name']), length)==0)
annotDf[index,"exon_name"] = NA
exonsReadDf <- data.frame(
geneID = unlist(lapply(as.vector(annotDf[, 'gene_id']), function(x) {x[1]})),
exonID = unlist(as.vector(annotDf[, 'exon_id'])),
exonName = unlist(as.vector(annotDf[, 'exon_name'])),
exonCount = countsForExons,
exonRPM = rpm,
exonRPKM = rpkm,
stringsAsFactors = FALSE)
exonsReadDf <- subset(exonsReadDf, !is.na(exonsReadDf[,"geneID"]))
exonsReadDf[,"geneID"] <- as.vector(sapply(mget(exonsReadDf[,"geneID"], org.Hs.egSYMBOL, ifnotfound = NA), function (x) x[1]))
print(paste('Writing data to', outFile))
write.table(exonsReadDf, file = outFile, sep = '\t', quote = F, row.names=F)
print('...Done')
|
d5246bc0088f193c16c2811b550d4483dcb629e0
|
f4379ce632a4f110028c8a26a43565b5f2d28585
|
/plot.R
|
bff91eca7d18c68a1b43140f15d0555410502b45
|
[] |
no_license
|
yu2peng/ExData_Plotting1
|
1e1dc5df29a73b285539a9c98c0ea322c87ce090
|
0ef69d2db636b30b3388001814c3de87d05d2957
|
refs/heads/master
| 2021-01-18T03:12:03.853318
| 2014-07-13T10:58:12
| 2014-07-13T10:58:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,593
|
r
|
plot.R
|
# read data
power <- read.csv("household_power_consumption.txt", sep=";", stringsAsFactors = FALSE)
power <- power[power$Date=="1/2/2007" | power$Date=="2/2/2007",]
head(power)
str(power)
summary(power)
#power$Date <- as.Date(power$Date, "%d/%m/%Y")
power$Global_active_power <- as.numeric(power$Global_active_power)
power$Global_reactive_power <- as.numeric(power$Global_reactive_power)
power$Voltage <- as.numeric(power$Voltage)
power$Global_intensity <- as.numeric(power$Global_intensity)
power$Sub_metering_1 <- as.numeric(power$Sub_metering_1)
power$Sub_metering_2 <- as.numeric(power$Sub_metering_2)
power$Sub_metering_3 <- as.numeric(power$Sub_metering_3)
# plot1
png(filename = "plot1.png",width = 480, height = 480)
hist(power$Global_active_power, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
# plot2
Sys.setlocale("LC_TIME", "C")
power$weekday <- weekdays(power$Date)
power$datetime <- paste(power$Date, power$Time)
power$datetime <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S")
png(filename = "plot2.png",width = 480, height = 480)
with(power, plot(datetime, Global_active_power,type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
# plot3
png(filename = "plot3.png",width = 480, height = 480)
with(power, plot(datetime, Global_intensity, type="n", xlab="", ylab="Energy sub metering", ylim=c(0, 38)))
with(subset(power, Sub_metering_1>0), lines(datetime, Sub_metering_1))
with(subset(power, Sub_metering_2>0), lines(datetime, Sub_metering_2, col="red"))
with(subset(power, Sub_metering_3>0), lines(datetime, Sub_metering_3, col="blue"))
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red", "blue"), lty=1)
dev.off()
# plot4
png(filename = "plot4.png",width = 480, height = 480)
par(mfrow=c(2,2))
with(power, {
plot(datetime, Global_active_power,type="l", xlab="", ylab="Global Active Power")
plot(datetime, Voltage, type="l", ylab="Voltage")
plot(datetime, Global_intensity, type="n", xlab="", ylab="Energy sub metering", ylim=c(0, 38))
with(subset(power, Sub_metering_1>0), lines(datetime, Sub_metering_1))
with(subset(power, Sub_metering_2>0), lines(datetime, Sub_metering_2, col="red"))
with(subset(power, Sub_metering_3>0), lines(datetime, Sub_metering_3, col="blue"))
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red", "blue"), lty=1, bty="n")
plot(datetime, Global_reactive_power, type="l")
})
dev.off()
|
ff0ebca963e29d134cc7a311858b2441f319e6ca
|
89ef1d10dbcf03e7ac7a1dcf1ae04643de662cc4
|
/R/outbreaker_data.R
|
c9b9df691b507ee963ed8e496ecdbdae8f33695d
|
[] |
no_license
|
cran/outbreaker2
|
b13ddc66d650b1ffaf9de46dcdf4776a5fc41c1c
|
45597077c99506c0415bd280a0b6788f3ee23e2e
|
refs/heads/master
| 2022-06-08T12:11:11.054415
| 2022-05-23T11:20:02
| 2022-05-23T11:20:02
| 112,343,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,418
|
r
|
outbreaker_data.R
|
#' Process input data for outbreaker
#'
#' This function performs various checks on input data given to outbreaker. It
#' takes a list of named items as input, performs various checks, set defaults
#' where arguments are missing, and return a correct list of data input. If no
#' input is given, it returns the default settings.
#'
#' Acceptables arguments for ... are:
#' \describe{
#' \item{dates}{dates a vector indicating the collection dates, provided either as
#' integer numbers or in a usual date format such as \code{Date} or
#' \code{POSIXct} format. By convention, zero will indicate the oldest date. If
#' the vector is named, the vector names will be used for matching cases to
#' contact tracing data and labelled DNA sequences.}
#'
#' \item{dna}{the DNA sequences in \code{DNAbin} format (see
#' \code{\link[ape]{read.dna}} in the ape package); this can be imported from a
#' fasta file (extension .fa, .fas, or .fasta) using \code{adegenet}'s function
#' \link[adegenet]{fasta2DNAbin}.}
#'
#' \item{ctd}{the contact tracing data provided as a matrix/dataframe of two
#' columns, indicating a reported contact between the two individuals whose ids
#' are provided in a given row of the data, or an epicontacts object. In the case
#' of the latter, linelist IDs will be used for matching dates and DNA
#' sequences}
#'
#' \item{w_dens}{a vector of numeric values indicating the generation time
#' distribution, reflecting the infectious potential of a case t = 1, 2, ...
#' time steps after infection. By convention, it is assumed that
#' newly infected patients cannot see new infections on the same time step. If not
#' standardized, this distribution is rescaled to sum to 1.}
#'
#' \item{f_dens}{similar to \code{w_dens}, except that this is the distribution
#' of the colonization time, i_e. time interval during which the pathogen can
#' be sampled from the patient.}}
#'
#' @param ... a list of data items to be processed (see description)
#'
#' @param data optionally, an existing list of data item as returned by \code{outbreaker_data}.
#'
#' @author Thibaut Jombart (\email{thibautjombart@@gmail.com})
#'
#' @export
#'
#' @examples
#'
#' x <- fake_outbreak
#' outbreaker_data(dates = x$sample, dna = x$dna, w_dens = x$w)
#'
outbreaker_data <- function(..., data = list(...)) {
## SET DEFAULTS ##
defaults <- list(dates = NULL,
w_dens = NULL,
f_dens = NULL,
dna = NULL,
ctd = NULL,
N = 0L,
L = 0L,
D = NULL,
max_range = NA,
can_be_ances = NULL,
log_w_dens = NULL,
log_f_dens = NULL,
contacts = NULL,
C_combn = NULL,
C_nrow = NULL,
ids = NULL,
has_dna = logical(0),
id_in_dna = integer(0))
## MODIFY DATA WITH ARGUMENTS ##
data <- modify_defaults(defaults, data, FALSE)
## Set up case ids
if(is.null(data$ids)) {
if(!is.null(names(data$dates))) {
data$ids <- names(data$dates)
} else if(!is.null(data$ctd) & inherits(data$ctd, "epicontacts")){
data$ids <- as.character(data$ctd$linelist$id)
} else {
data$ids <- as.character(seq_along(data$dates))
}
}
## CHECK DATA ##
## CHECK DATES
if (!is.null(data$dates)) {
if (inherits(data$dates, "Date")) {
data$dates <- data$dates-min(data$dates)
}
if (inherits(data$dates, "POSIXct")) {
data$dates <- difftime(data$dates, min(data$dates), units="days")
}
if (inherits(data$dates, "numeric") && any(data$dates %% 1 != 0)) {
warning("Rounding non-integer dates to nearest integer")
}
data$dates <- as.integer(round(data$dates))
data$N <- length(data$dates)
data$max_range <- diff(range(data$dates))
}
## CHECK W_DENS
if (!is.null(data$w_dens)) {
if (any(data$w_dens<0)) {
stop("w_dens has negative entries (these should be probabilities!)")
}
if (any(!is.finite(data$w_dens))) {
stop("non-finite values detected in w_dens")
}
## Remove trailing zeroes to prevent starting with -Inf temporal loglike
if(data$w_dens[length(data$w_dens)] < 1e-15) {
final_index <- max(which(data$w_dens > 1e-15))
data$w_dens <- data$w_dens[1:final_index]
}
## add an exponential tail summing to 1e-4 to 'w'
## to cover the span of the outbreak
## (avoids starting with -Inf temporal loglike)
if (length(data$w_dens) < data$max_range) {
length_to_add <- (data$max_range-length(data$w_dens)) + 10 # +10 to be on the safe side
val_to_add <- stats::dexp(seq_len(length_to_add), 1)
val_to_add <- 1e-4*(val_to_add/sum(val_to_add))
data$w_dens <- c(data$w_dens, val_to_add)
}
## standardize the mass function
data$w_dens <- data$w_dens / sum(data$w_dens)
data$log_w_dens <- matrix(log(data$w_dens), nrow = 1)
}
## CHECK F_DENS
if (!is.null(data$w_dens) && is.null(data$f_dens)) {
data$f_dens <- data$w_dens
}
if (!is.null(data$f_dens)) {
if (any(data$f_dens<0)) {
stop("f_dens has negative entries (these should be probabilities!)")
}
if (any(!is.finite(data$f_dens))) {
stop("non-finite values detected in f_dens")
}
data$f_dens <- data$f_dens / sum(data$f_dens)
data$log_f_dens <- log(data$f_dens)
}
## CHECK POTENTIAL ANCESTRIES
if(!is.null(data$dates)) {
## get temporal ordering constraint:
## canBeAnces[i,j] is 'i' can be ancestor of 'j'
## Calculate the serial interval from w_dens and f_dens
.get_SI <- function(w_dens, f_dens) {
wf <- stats::convolve(w_dens, rev(f_dens), type = 'open')
conv <- stats::convolve(rev(f_dens), rev(wf), type = 'open')
lf <- length(f_dens)
lw <- length(w_dens)
return(data.frame(x = (-lf + 2):(lw + lf - 1), d = conv))
}
## Check if difference in sampling dates falls within serial interval
## This allows for i to infect j even if it sampled after (SI < 0)
.can_be_ances <- function(date1, date2, SI) {
tdiff <- date2 - date1
out <- sapply(tdiff, function(i) return(i %in% SI$x))
return(out)
}
SI <- .get_SI(data$w_dens, data$f_dens)
data$can_be_ances <- outer(data$dates,
data$dates,
FUN=.can_be_ances,
SI = SI) # strict < is needed as we impose w(0)=0
diag(data$can_be_ances) <- FALSE
}
## CHECK DNA
if (!is.null(data$dna)) {
if (!inherits(data$dna, "DNAbin")) stop("dna is not a DNAbin object.")
if (!is.matrix(data$dna)) data$dna <- as.matrix(data$dna)
## get matrix of distances
data$L <- ncol(data$dna) # (genome length)
data$D <- as.matrix(ape::dist.dna(data$dna, model="N")) # distance matrix
storage.mode(data$D) <- "integer" # essential for C/C++ interface
## get matching between sequences and cases
if (is.null(rownames(data$dna))) {
if (nrow(data$dna) != data$N) {
msg <- sprintf(paste("numbers of sequences and cases differ (%d vs %d):",
"please label sequences"),
nrow(data$dna), data$N)
stop(msg)
}
## These need to be indices
rownames(data$D) <- colnames(data$D) <- seq_len(data$N)
## These need to match dates/ctd ids
rownames(data$dna) <- data$ids
}
data$id_in_dna <- match(data$ids, rownames(data$dna))
if(any(is.na(match(rownames(data$dna), data$ids)))) {
stop("DNA sequence labels don't match case ids")
}
} else {
data$L <- 0L
data$D <- matrix(integer(0), ncol = 0, nrow = 0)
data$id_in_dna <- rep(NA_integer_, data$N)
}
data$has_dna <- !is.na(data$id_in_dna)
## CHECK CTD
if (!is.null(data$ctd)) {
ctd <- data$ctd
if (inherits(ctd, c("matrix", "data.frame"))) {
## prevent factor -> integer conversion
ctd <- apply(ctd, 2, as.character)
if (!is.matrix(ctd)) {
ctd <- as.matrix(ctd)
}
if(ncol(ctd) != 2) {
stop("ctd must contain two columns")
}
} else if(inherits(ctd, "epicontacts")) {
## prevent factor -> integer conversion
ctd <- apply(ctd$contacts[c("from", "to")], 2, as.character)
} else {
stop("ctd is not a matrix, data.frame or epicontacts object")
}
unq <- unique(as.vector(ctd[,1:2]))
not_found <- unq[!unq %in% data$ids]
if (length(not_found) != 0) {
not_found <- sort(unique(not_found))
stop(paste("Individual(s)", paste(not_found, collapse = ", "),
"in ctd are unknown cases (idx < 1 or > N")
)
}
contacts <- matrix(0, data$N, data$N)
mtch_1 <- match(ctd[,1], data$ids)
mtch_2 <- match(ctd[,2], data$ids)
contacts[cbind(mtch_2, mtch_1)] <- 1
data$contacts <- contacts
data$C_combn <- data$N*(data$N - 1)
data$C_nrow <- nrow(ctd)
} else {
data$contacts <- matrix(integer(0), ncol = 0, nrow = 0)
}
## output is a list of checked data
return(data)
}
|
8f0004b743244688cca22f4ce0a4bf7578875382
|
ffe7b55a975d6ea398e2a10a39ee3e3d41273ced
|
/prediction_file.R
|
4891d3e0c722ba2ce3ccffe01a85dfb0af145711
|
[] |
no_license
|
NikhilsCodeHub/PredMacLearn-035
|
25e7514d748f02f8e60cb5a4b1de805969ac4df4
|
23ca7fc7d00177ca76fcfc38283fd302e10d132c
|
refs/heads/master
| 2021-01-10T15:03:47.050214
| 2015-12-26T18:53:05
| 2015-12-26T18:53:05
| 48,502,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
prediction_file.R
|
#### PRediction Over Test Data
pred_pml_test <- predict(d_fit3, pml_test, type="class")
## Output
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
## B A B A A E D B A A B C B A E E A B B B
## 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
## B A B A A E D B A A B C B A E E A B B B
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
|
5387b40a363ba0fd9d6b43fa83c6fdcb6b6998cd
|
20c4d95568d991915590ce548020d089cfb1b955
|
/man/gmm_replication.Rd
|
55c1601d8c62c0ce92aa68fe4c806e60a17497aa
|
[] |
no_license
|
t-sager/pubias
|
e690a2c6987b874e2fe493f226bbf66e35b5a669
|
b31c49fc199153f63ae4b05595a2b3371f9ae014
|
refs/heads/master
| 2023-05-13T13:51:15.342523
| 2021-05-06T07:36:03
| 2021-05-06T07:36:03
| 355,796,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,599
|
rd
|
gmm_replication.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gmm_replication.R
\name{gmm_replication}
\alias{gmm_replication}
\title{Computing the publication probability in replication studies}
\usage{
gmm_replication(Z, sigmaZ2, symmetric, cluster_ID, cutoffs, studynames)
}
\arguments{
\item{Z}{A \verb{n x 2} matrix where the first (second) column contains the standardized original estimates (replication estimates), where \code{n} is the number of estimates.}
\item{sigmaZ2}{A \verb{n x 1} matrix containing the standard errors (se_replication divided by se_original) of the estimates, where \code{n} is the number of estimates.}
\item{symmetric}{If set to \code{1}, the publication probability is assumed to be symmetric around zero. If set to \code{0}, asymmetry is allowed.}
\item{cluster_ID}{A \verb{n x 1} matrix containing IDs going from 1 to \code{n}, where \code{n} is the number of estimates.}
\item{cutoffs}{A matrix containing the thresholds for the steps of the publication probability. Should be strictly increasing column
vector of size \verb{k x 1} where \code{k} is the number of cutoffs.}
\item{studynames}{A vector of type \code{character} containing all the Studynames of size \code{n} in the same order as the argument \code{data}.}
}
\value{
Returns a list object with the publication probability (\code{Psihat}), its variance (\code{Varhat}) and robust standard errors (\code{se_robust}).
}
\description{
\code{gmm_replication()} calculates the publication probability, its variance and robust standard errors
of meta-analyses by a GMM approach.
}
|
afa9a1ad1cdbfba988254cf8b8b3a66c38fd39fc
|
4ce79b9b9d9d6b30576b3a05898d65f1c455cd25
|
/examples/ex1.R
|
0e6025a524274256bd577f8d3d3f576ac106caee
|
[] |
no_license
|
beartell/R2Time
|
7e00035619c13bc6f439e723e5bea762aca75366
|
19f4faf770c5a568b19d555293c828d56097e091
|
refs/heads/master
| 2021-05-31T05:45:22.565033
| 2016-05-03T09:04:32
| 2016-05-03T09:04:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,958
|
r
|
ex1.R
|
#########################################################################################
# Author: Bikash Agrawal
# Date: 06-07-2013
# Description: This example is used to calculate mean running MapReduce for timeseries data
#stored by OpenTSDB
#########################################################################################
## Load all the necessary libraries
library(r2time)
library(Rhipe)
rhinit() ## Initialize rhipe framework.
library(rJava)
.jinit() ## Initialize rJava
r2t.init() ## Initialize R2Time framework.
library(bitops) ## Load library for bits operation, It is used for conversion between float and integer numbers.
tagk = c("host") ## Tag keys. It could be list
tagv = c("*") ## Tag values. It could be list or can be separate multiple by pipe
metric = 'r2time.load.test' ## Assign multiple metrics
startdate ='2011/01/16-00:00:00' ## Start date and time of timeseries
enddate ='2014/09/21-04:00:00' ## End date and time of timeseries
output = "/home/bikash/tmp/ex1.1" ## Output file, should be in HDFS file system.
jobname= "MapReduce job example 1.1" ## Assign relevant job description name.
mapred <- list(mapred.reduce.tasks=0) ## Mapreduce configuration, you can assign number of mapper and reducer for a task. For this case is 0, no reducer is required.
rhput("/home/bikash/jar/r2time.jar", "/home/bikash/tmp/r2time.jar")
#rhput("/home/bikash/jar/asynchbase.jar", "/home/bikash/tmp/asynchbase.jar")
jars=c("/home/bikash/tmp/r2time.jar","/home/bikash/tmp/zookeeper.jar", "/home/bikash/tmp/hbase.jar", "/home/bikash/tmp/asynchbase.jar")
# This jars need to be in HDFS file system. You can copy jar in HDFS using RHIPE rhput command
# rhput("/home/bikash/jar/r2time.jar", "/home/bikash/tmp/r2time.jar")
# rhput("/home/bikash/jar/hbase.jar" , "/home/bikash/tmp/hbase.jar")
# rhput("/home/bikash/jar/zookeeper.jar" , "/home/bikash/tmp/zookeeper.jar")
## Assign Zookeeper configuration. For HBase to read data zookeeper quorum must be define.
zooinfo=list(zookeeper.znode.parent='/hbase',hbase.zookeeper.quorum='localhost')
## Map function to calculate mean. At first it will convert all byte array in to float point, then it will calculate mean rowwise, Then finally overall mean.
map <- expression({
library(bitops)
library(r2time)
library(gtools)
x <-lapply(seq_along(map.values), function(r) {
v <- r2t.toFloat(map.values[[r]][[1]])
k1<-r2t.getRowBaseTimestamp(map.keys[[r]])
k <-r2t.getRealTimestamp(k1,map.values[[r]])
a <- list(k,v)
#rhcollect(k,v)
#rhcollect(k1,map.values[[r]])
})
rhcollect(1,x)
})
## Run job in R2Time.
r2t.job(table='tsdb',sdate=startdate, edate=enddate, metrics=metric, tagk=tagk, tagv=tagv, jars=jars, zooinfo=zooinfo,
output=output, jobname=jobname, mapred=mapred, map=map, reduce=0)
## Read Output file
out1 <- rhread(output)
out1
#########################################################################################
|
9faaac01b63cc3beeebd59ebc062c080438548a3
|
0bcecf297a8bc807a02666cf835d734b2bb8c8c7
|
/Toyota Corolla.R
|
5ebfcf180e2faeb1d3ed8e607d1a98268b0edc99
|
[] |
no_license
|
Bharathyramakrishnan/Machine-Learning-Algorithms
|
4e4d0aa79d157fc5a28e3939e57f3b882fb081de
|
41c732757c264df764faffd665998f14d7f89ca5
|
refs/heads/master
| 2020-04-29T03:18:40.055082
| 2019-03-27T13:47:55
| 2019-03-27T13:47:55
| 175,804,279
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,304
|
r
|
Toyota Corolla.R
|
#3.Consider only the below columns and prepare a prediction model for predicting Price.
#Corolla<-Corolla[c("Price","Age_08_04","KM","HP","cc","Doors","Gears","Quarterly_Tax","Weight")]
#Output->Continuous - price, Input - > X - Multiple - Age_08_04,KM,HP,cc,Doors,Gears,Quarterly_Tax,Weight
#Continuous Output and Multiple Input - GO with Multiple Linear Regression model
#Reading csv File for Corolla
Corolla <- read.csv(file.choose())
attach(Corolla) # to Manipulate the specific dataset.
# There are 38 variables but we just considering only 9 so binding the required variables
Corolla1 <- cbind(Price,Age_08_04,KM,HP,cc,Doors,Gears,Quarterly_Tax,Weight)
Corolla_1 <- as.data.frame(Corolla1)
class(Corolla_1)
View(Corolla_1)
# Exploratory data analysis:
# 1. Measures of central tendency
# 2. Measures of dispersion
# 3. Third moment business decision
# 4. Fourth moment business decision
# 5. Probability distributions of variables
# 6. Graphical representations (Histogram, Box plot, Dot plot, Stem & Leaf plot, Bar plot, etc.)
summary(Corolla_1)
attach(Corolla_1)# To avoid the refrence of the dataset
windows()
# Find the correlation between Output (Price) & inputs (speed,hd,ram,screen,cd,multi,premium,ads,trend) - SCATTER DIAGRAM
pairs(Corolla_1)
# Correlation coefficient - Strength & Direction of correlation
cor(Corolla_1)
#Quarterly_Tax and Weight moderate correlated
### Partial Correlation matrix - Pure Correlation b/n the varibles
#install.packages("corpcor")
library(corpcor)
cor2pcor(cor(Corolla_1))
library(psych)
pairs.panels(Corolla_1)
# The Linear Model of interest with all the variables
cp <- lm(Price~Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+Weight,data = Corolla_1)
summary(cp)
#Multiple R-squared: 0.8638, Adjusted R-squared: 0.863 , p-value: < 2.2e-16
#But Individual p-values for cc and doors are insignificant
# delete cc and doors
cpr <- lm(Price~Age_08_04+KM+HP+Gears+Quarterly_Tax+Weight,data = Corolla_1)
summary(cpr)
#To check wheather any influence obs affect the R-squared
library(mvinfluence)
library(car)
influence.measures(cp)
influenceIndexPlot(cp, id.n=3) # Index Plots of the influence measures
influencePlot(cp, id.n=3)
# 81 is the influence observation so we remove and check the R-squared value.
cp1 <- lm(Price~Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+Weight,data=Corolla_1[-81,])
summary(cp1)
#Multiple R-squared: 0.8694, Adjusted R-squared: 0.8686
#p-value: < 2.2e-16
vif(cp1) # VIF is > 10 => collinearity
avPlots(cp1, id.n=2, id.cex=0.7) # Added Variable Plots
finalmodel <- lm(Price~Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+Weight,data = Corolla_1)
summary(finalmodel)
#Multiple R-squared: 0.8636, Adjusted R-squared: 0.863 , p-value: < 2.2e-16
#Since cp model gives high R-squared value we consider that model and predict
confint(cp,level=0.95)
Price_Predict <- predict(cp,interval="predict")
Pred_final <- predict(cp)
# Evaluate model LINE assumptions
plot(cp)
#View(Final)
hist(residuals(cp)) # close to normal distribution
# Residual Plots, QQ-Plos, Std. Residuals vs Fitted, Cook's distance
qqPlot((cp),id.n=5) # QQ plots of studentized residuals, helps identify outliers
#81 is oulier
library("MASS")
stepAIC(cp)
|
d8f5811b67973616df2455796d3f905f0ef8ebbb
|
209601cd3a949606041ea1902cbfc7d42fa35b0d
|
/bikestations.R
|
651354d1b92f41189e083d36e8965ddea2bbd6ff
|
[] |
no_license
|
tts/mapchallenge
|
11be7ee807b80300d4a187b38da6ec11c8797c75
|
84c61cc71061a21abba39afa21c5f473ebcbd9fb
|
refs/heads/master
| 2020-09-02T06:25:04.996462
| 2020-04-12T13:01:15
| 2020-04-12T13:01:15
| 219,154,769
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,808
|
r
|
bikestations.R
|
library(tidyverse)
library(ggrepel)
bikestations <- "https://opendata.arcgis.com/datasets/1b492e1a580e4563907a6c7214698720_0.csv"
temp <- tempfile()
download.file(bikestations, temp)
stations <- read.csv(temp, encoding = "UTF-8")
unlink(temp)
stations_cleaned <- stations %>%
rename(x = X.U.FEFF.X,
y = Y) %>%
select(x, y, name, id)
baana <- stations_cleaned %>%
filter(name == "Baana")
therest <- stations_cleaned %>%
filter(name != "Baana")
sample <- therest %>%
sample_n(30)
p <- ggplot(data = therest,
aes(x = x, y = y)) +
geom_point(color = "#F5F008") +
geom_point(data = baana, aes(x = x, y = y), colour = "red")
p +
geom_text_repel(data = sample,
aes(label = name),
point.padding = 1,
color = "#666658") +
theme(axis.title = element_blank(),
axis.text = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(color = "black", fill = "black"),
plot.title = element_text(size = 12*1.2, color = "#EAE9CD"),
plot.subtitle = element_text(size = 12*0.8, color = "#EAE9CD"),
plot.caption = element_text(size = 12*0.5, color = "#EAE9CD"),
panel.background = element_rect(fill = "black", color = NA)) +
labs(x = NULL, y = NULL,
title = "Helsinki city bike stations",
subtitle = "The red one is station Baana",
caption = "Source: Helsinki Region Transportβs (HSL) city bicycle stations.\nhttps://hri.fi/data/fi/dataset/hsl-n-kaupunkipyoraasemat\nCreative Commons Attribution 4.0.\n")
ggsave(
"st.png",
width = 35,
height = 25,
dpi = 72,
units = "cm",
device='png'
)
|
0e7a05055e103a8904e7c6b8121ca1fe3763810b
|
abbe9809ce60ee3dc9a1db74b0ec41da9475cd0f
|
/tests/testthat/test_edges_nodes.R
|
d725834bff12afa2c32520b6fc17dea8ae72e886
|
[
"Apache-2.0"
] |
permissive
|
luukvdmeer/sfnetworks
|
e2bf953487038b4cf5c8b625579a15b1c7b27058
|
23a4125030178f5c42ebb48d2547052a31a3cdea
|
refs/heads/main
| 2023-05-10T18:55:53.430210
| 2023-03-22T16:08:27
| 2023-03-22T16:08:27
| 164,439,774
| 305
| 25
|
NOASSERTION
| 2023-08-11T10:43:19
| 2019-01-07T13:52:56
|
R
|
UTF-8
|
R
| false
| false
| 5,723
|
r
|
test_edges_nodes.R
|
library(sf)
library(dplyr)
library(igraph)
# toynet
p1 = st_point(c(0, 1))
p2 = st_point(c(1, 1))
p3 = st_point(c(2, 1))
p4 = st_point(c(3, 1))
p5 = st_point(c(4, 1))
p6 = st_point(c(3, 2))
p7 = st_point(c(3, 0))
p8 = st_point(c(4, 3))
p9 = st_point(c(4, 2))
p10 = st_point(c(4, 0))
p11 = st_point(c(5, 2))
p12 = st_point(c(5, 0))
p13 = st_point(c(5, -1))
l1 = st_sfc(st_linestring(c(p1, p2, p3)))
l2 = st_sfc(st_linestring(c(p3, p4, p5)))
l3 = st_sfc(st_linestring(c(p6, p4, p7)))
l4 = st_sfc(st_linestring(c(p8, p11, p9)))
l5 = st_sfc(st_linestring(c(p9, p5, p10)))
l6 = st_sfc(st_linestring(c(p8, p9)))
l7 = st_sfc(st_linestring(c(p10, p12, p13, p10)))
lines = c(l1, l2, l3, l4, l5, l6, l7)
square = st_sfc(st_cast(st_multipoint(c(p6, p7, p12, p11)), "POLYGON"))
point = st_sfc(st_point(c(2, 0)))
net = as_sfnetwork(lines)
## Edge measures
circuity_with_nan = net %>%
activate("edges") %>%
mutate(circuity = edge_circuity(Inf_as_NaN = TRUE)) %>%
pull(circuity)
circuity_with_inf = net %>%
activate("edges") %>%
mutate(circuity = edge_circuity(Inf_as_NaN = FALSE)) %>%
pull(circuity)
length = net %>%
activate("edges") %>%
mutate(length = edge_length()) %>%
pull(length)
displacement = net %>%
activate("edges") %>%
mutate(disp = edge_displacement()) %>%
pull(disp)
implicit_length = lines %>%
as_sfnetwork(edges_as_lines = F) %>%
activate("edges") %>%
mutate(length = edge_length()) %>%
pull(length)
test_that("spatial_edge_measures return correct (known) values", {
expect_setequal(
round(circuity_with_nan, 6),
c(1.000000, 1.000000, 1.000000, 2.414214, 1.000000, 1.000000, NaN)
)
expect_setequal(
round(circuity_with_inf, 6),
c(1.000000, 1.000000, 1.000000, 2.414214, 1.000000, 1.000000, Inf)
)
expect_setequal(
round(length, 6),
c(2.000000, 2.000000, 2.000000, 2.414214, 2.000000, 1.000000, 3.414214)
)
expect_setequal(
displacement,
c(2, 2, 2, 1, 2, 1, 0)
)
})
test_that("edge_length returns same output as edge_displacement with
spatially implicit edges", {
expect_setequal(
as.vector(displacement),
as.vector(implicit_length)
)
})
## spatial predicates
# Edge predicates
net = net %>%
activate("edges")
edgeint = net %>%
filter(edge_intersects(square))
edgecross = net %>%
filter(edge_crosses(square))
edgecov = net %>%
filter(edge_is_covered_by(square))
edgedisj = net %>%
filter(edge_is_disjoint(square))
edgetouch = net %>%
filter(edge_touches(square))
edgewithin = net %>%
filter(edge_is_within(square))
edgewithindist = net %>%
filter(edge_is_within_distance(point, 1))
test_that("spatial edge predicates return correct edges", {
expect_true(
all(diag(
st_geometry(st_as_sf(edgeint, "edges")) %>%
st_equals(c(l2, l3, l4, l5, l6, l7), sparse = FALSE)
))
)
expect_true(
st_geometry(st_as_sf(edgecross, "edges")) %>%
st_equals(l2, sparse = FALSE)
)
expect_true(
all(diag(
st_geometry(st_as_sf(edgecov, "edges")) %>%
st_equals(c(l3, l5), sparse = FALSE)
))
)
expect_true(
st_geometry(st_as_sf(edgedisj, "edges")) %>%
st_equals(l1, sparse = FALSE)
)
expect_true(
all(diag(
st_geometry(st_as_sf(edgetouch, "edges")) %>%
st_equals(c(l3, l4, l6, l7), sparse = FALSE)
))
)
expect_true(
st_geometry(st_as_sf(edgewithin, "edges")) %>%
st_equals(l5, sparse = FALSE)
)
expect_true(
all(diag(
st_geometry(st_as_sf(edgewithindist, "edges")) %>%
st_equals(c(l1, l2, l3), sparse = FALSE)
))
)
})
test_that("spatial edge predicates always return the total number of nodes", {
expect_equal(vcount(edgeint), vcount(net))
expect_equal(vcount(edgecross), vcount(net))
expect_equal(vcount(edgecov), vcount(net))
expect_equal(vcount(edgedisj), vcount(net))
expect_equal(vcount(edgetouch), vcount(net))
})
# Node predicates
net = net %>%
activate("nodes")
nodeint = net %>%
filter(node_intersects(square))
nodewithin = net %>%
filter(node_is_within(square))
nodecov = net %>%
filter(node_is_covered_by(square))
nodedisj = net %>%
filter(node_is_disjoint(square))
nodetouch = net %>%
filter(node_touches(square))
nodewithindist = net %>%
filter(node_is_within_distance(point, 1))
test_that("spatial node predicates return correct nodes and edges", {
expect_true(
all(diag(
st_geometry(st_as_sf(nodeint, "nodes")) %>%
st_equals(st_sfc(p5, p6, p7, p9, p10), sparse = FALSE)
))
)
expect_true(
all(diag(
st_geometry(st_as_sf(nodeint, "edges")) %>%
st_equals(c(l3, l5, l7), sparse = FALSE)
))
)
expect_true(
st_geometry(st_as_sf(nodewithin, "nodes")) %>%
st_equals(p5, sparse = FALSE)
)
expect_true(
all(diag(
st_geometry(st_as_sf(nodecov, "nodes")) %>%
st_equals(st_sfc(p5, p6, p7, p9, p10), sparse = FALSE)
))
)
expect_true(
all(diag(
st_geometry(st_as_sf(nodecov, "edges")) %>%
st_equals(c(l3, l5, l7), sparse = FALSE)
))
)
expect_true(
all(diag(
st_geometry(st_as_sf(nodedisj, "nodes")) %>%
st_equals(st_sfc(p1, p3, p8), sparse = FALSE)
))
)
expect_true(
st_geometry(st_as_sf(nodedisj, "edges")) %>%
st_equals(l1, sparse = FALSE)
)
expect_true(
all(diag(
st_geometry(st_as_sf(nodetouch, "nodes")) %>%
st_equals(st_sfc(p6, p7, p9, p10), sparse = FALSE)
))
)
expect_true(
all(diag(
st_geometry(st_as_sf(nodetouch, "edges")) %>%
st_equals(c(l3, l5, l7), sparse = FALSE)
))
)
expect_true(
all(diag(
st_geometry(st_as_sf(nodewithindist, "nodes")) %>%
st_equals(st_sfc(p3, p7), sparse = FALSE)
))
)
})
|
672d022521e820fef3430a53b01043f544f9b078
|
ee6e4d67425b493fe9f67fce795ee058a26cfadd
|
/Problem 7.R
|
55e80f06dcee169711bb389af2e2254162ae1e6e
|
[] |
no_license
|
wkfunk/project-euler
|
d282513c0544a9428f711a9ba22e7867ce091591
|
8f9399bf5fe3d0522b25932fa51e9e65d793f798
|
refs/heads/master
| 2021-01-25T10:06:35.702630
| 2011-12-01T23:25:29
| 2011-12-01T23:25:29
| 2,884,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 441
|
r
|
Problem 7.R
|
##############################Problem 7:##############################
# Find the 10001st prime.
findPrimeNumbers <- function (num) {
primeNums <- c(2,3,4,7)
init <- 11
while (length(primeNums) < num) {
max <- sqrt(init)
digg <- seq(3,max,2)
if (all((init/digg)%%1 != 0)) {
primeNums = append(primeNums, init)
}
init <- init + 2
}
return(primeNums)
}
nah = findPrimeNumbers(10001)
nah[10001]
# [1] 104743
|
ecc7301058c533fd0ed41f8bf17585a9e60b5297
|
492be04a3393fcbcf80b2f867d9cda9b7ed16430
|
/10-Fall2020/Projects_StarterCodes/Project3-ImbalancedClassification/lib/test.R
|
4c3d33e85d75c40f53170a675cb61cf833dad8e6
|
[] |
no_license
|
TZstatsADS/ADS_Teaching
|
84276d8c7817718c410f466b749f0d5742348d73
|
2755c04a8fc501d9b285ba59e2e6d341906f220d
|
refs/heads/master
| 2023-09-04T19:33:41.673025
| 2023-09-04T15:51:01
| 2023-09-04T15:51:01
| 49,824,541
| 205
| 415
| null | 2020-10-14T02:55:16
| 2016-01-17T16:08:51
|
HTML
|
UTF-8
|
R
| false
| false
| 365
|
r
|
test.R
|
###########################################################
### Make predictions with test features ###
###########################################################
test <- function(model, features, pred.type){
res <- predict(model, newx = features, type = pred.type)
return(res)
}
# This function is not necessary.
# We put it here just to show the structure.
|
34dd0c964ee385741f3c55378aef95748c3ffccc
|
86f9af514b1cd5f1393da9e07d33fd974cfe0223
|
/R/main_splash_point.R
|
e0e9290b32a3647397905f0fc12f450d1d58bf3f
|
[] |
no_license
|
prenticelab/SPLASH_R_DSandoval
|
dc15abbe0f9b1ded4cbd47677b20592d3ad489be
|
6eba75cf5475924b7bb3279857b31806f665d6aa
|
refs/heads/master
| 2020-07-17T13:16:23.941519
| 2019-09-01T20:17:48
| 2019-09-01T20:17:48
| 206,026,816
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 16,405
|
r
|
main_splash_point.R
|
#' splash.point
#'
#' Apply splash algorithm
#' @param sw_in, lon
#' @param tc, lon
#' @param pn, lon
#' @param elev, lon
#' @return a matrix xts type
#' @import Rcpp
#' @import xts
#' @keywords splash
#' @export
#' @examples
#' splash.grid()
splash.point<-function(sw_in, tc, pn, lat,elev,slop,asp,soil_data,Au,resolution){
# require(xts)
# Extract time info from data
# year
y<-as.numeric(unique(format(time(pn),'%Y')))
# ndays in the year
ny <- julian_day(y + 1, 1, 1) - julian_day(y, 1, 1)
# time index
ztime<-time(pn)
# time frequency
time.freq<-abs(as.numeric(ztime[1]-ztime[2], units = "days"))
if (time.freq<2){
if (length(y)==1){
initial<-rspin_up(lat,elev, sw_in, tc, pn, slop,asp, y[1],soil_data,Au,resolution)
result<-run_one_year(lat,elev,slop,asp,sw_in, tc, pn,initial$sm, y[1], initial$snow,soil_data,Au,resolution)
# result<-xts(result,ztime)
result<-do.call(cbind,result)
}
else if(length(y)>1){
end<-cumsum(ny)
start<-end+1
result<-list()
sw_av<-tapply(sw_in,format(time(sw_in),"%j"),mean, na.rm=TRUE)
tc_av<-tapply(tc,format(time(sw_in),"%j"),mean, na.rm=TRUE)
pn_av<-tapply(pn,format(time(sw_in),"%j"),mean, na.rm=TRUE)
# initial<-rspin_up(lat,elev, sw_in[1:ny[1]], tc[1:ny[1]], pn[1:ny[1]], slop,asp, y[1],soil_data,Au,resolution)
initial<-rspin_up(lat,elev, sw_av, tc_av, pn_av, slop,asp, y[1],soil_data,Au,resolution)
result[[1]]<-run_one_year(lat,elev,slop,asp,sw_in[1:ny[1]], tc[1:ny[1]], pn[1:ny[1]],initial$sm, y[1], initial$snow,
soil_data,Au,resolution)
for (i in 2:length(y)){
stidx<-i-1
# correct for leap years
result[[i]]<-run_one_year(lat,elev,slop,asp, sw_in[start[stidx]:end[i]], tc[start[stidx]:end[i]], pn[start[stidx]:end[i]],
result[[stidx]]$wn,y[i],result[[stidx]]$snow,soil_data,Au,resolution)
}
result<-lapply(result,FUN=as.data.frame)
result<-do.call(rbind,result)
}
}
# order results as time series
result<-xts(result,ztime)
return(result)
}
# require(Rcpp)
Rcpp::loadModule("splash_module", TRUE)
soil_hydro<-function(sand, clay, OM, fgravel=0,bd=NA, ...) {
# Hydrophysics V2
# ************************************************************************
# Name: soil_hydro
# Input: - float, sand, (percent)
# - float, clay, (percent)
# - float, OM Organic Matter (percent)
# - float,fgravel, (percent-volumetric)
# Output: list:
# - float, FC, (volumetric fraction)
# - float, WP (volumetric fraction)
# - float,SAT, (volumetric fraction)
# - float, AWC (volumetric fraction)
# - float,Ksat, Saturate hydraulic conductivity/infiltration capacity(mm/hr)
# - float, A (Coefficient)
# - float, B (Clapp and Hornberger (1978) pore-size distribution index)
# Features: calculate some soil hydrophysic characteristics
# Ref: Saxton, K.E., Rawls, W.J., 2006. Soil Water Characteristic Estimates
# by Texture and Organic Matter for Hydrologic Solutions.
# Soil Sci. Soc. Am. J. 70, 1569. doi:10.2136/sssaj2005.0117
# ************************************************************************
results<-list()
# testing
# sand<-60
# clay<-30
# silt<-100-sand-clay
# OM<-10
# end test
# get fractions
sand<-sand/100
clay<-clay/100
OM<-OM/100
fgravel<-fgravel/100
depth<-30
dp<-1/((OM/1.3)+((1-OM)/2.65))
if(is.na(bd)){
bd<-(1.5 + (dp-1.5-1.10*(1 - clay))*(1-exp(-0.022*depth)))/(1+6.27*OM)
}
sat<-1-(bd/dp)
# fc<-(sat/bd)*(0.565 + (0.991 - 0.565)*clay^0.5)*exp(-(0.103*sand - 0.785* OM)/(sat/bd))
fc<-(sat/bd)*(0.3366685 + (1.417544 - 0.3366685)*clay^0.5)*exp(-(0.03320495*sand - 0.2755312* OM)/(sat/bd))
# fc<-
fc[fc<0]<-0.1
fc[fc>1]<-1
wp<- fc*(0.1437904 + (0.8398534 - 0.1437904)*clay^0.5)
L_10_Ksat<- -2.793574+3.12048*log10(dp-bd)+4.358185*sand
ksat<-10^L_10_Ksat
# to mm/h
ksat<-ksat*10
moist_fvol33init<-0.278*sand+0.034*clay+0.022*OM-0.018*(sand*OM)-0.027*(clay*OM)-0.584*(sand*clay)+0.078
moist_fvol33<-moist_fvol33init+(0.636*moist_fvol33init-0.107)
# get parameters for BC eqn form SAxton 2006
coef_B<-(log(1500)-log(33))/(log(fc)-log(wp))
coef_A<-exp(log(33)+coef_B*log(fc))
coef_lambda<-1/coef_B
# Ksat<-1930*(SAT_fvol-FC_fvol)^(3-coef_lambda)
bub_init<--21.6*sand-27.93*clay-81.97*moist_fvol33+71.12*(sand*moist_fvol33)+8.29*(clay*moist_fvol33)+14.05*(sand*clay)+27.16
bubbling_p<-bub_init+(0.02*bub_init^2-0.113*bub_init-0.7)
# 101.97162129779 converts from KPa to mmH2O
bubbling_p<-bubbling_p*-101.97162129779
# error in empirical fitting, not possible matric potential positive
# bubbling_p<-ifelse(bubbling_p>0,bubbling_p*-1,bubbling_p)
bubbling_p[bubbling_p>0]<-bubbling_p[bubbling_p>0]*-1
# residual water content for BC eqn, Rawls, 1985
sand<-sand*100
clay<-clay*100
silt<-100-sand-clay
OM<-OM*100
# Ksat<-10*2.54*10^(-0.6+0.012*sand-0.0064*clay)
RES<--0.018+0.0009*sand+0.005*clay+0.029*sat -0.0002*clay^2-0.001*sand*sat-0.0002*clay^2*sat^2+0.0003*clay^2*sat -0.002*sat^2*clay
# parameters for van Genutchen eqn
topsoil<-1
alpha<-exp(-14.96 + 0.03135*clay + 0.0351*silt + 0.646*OM +15.29*dp - 0.192*topsoil -4.671*dp^2- 0.000781*clay^2 - 0.00687*OM^2 + 0.0449/OM + 0.0663*log(silt) + 0.1482*log(OM) - 0.04546*dp *silt - 0.4852*dp*OM + 0.00673*topsoil*clay)
n<-1.0+exp(-25.23 - 0.02195*clay + 0.0074*silt - 0.1940*OM + 45.5*dp - 7.24*dp^2 +0.0003658*clay^2 + 0.002885*OM^2 -12.81/dp - 0.1524/silt - 0.01958/OM - 0.2876*log(silt) - 0.0709*log(OM) -44.6*log(dp) - 0.02264*dp*clay + 0.0896*dp*OM +0.00718*topsoil*clay)
m<-1-(1/n)
results$SAT<-sat*(1-fgravel)
results$FC<-fc*(1-fgravel)
results$WP<-wp*(1-fgravel)
results$bd<-bd
results$AWC<-(fc-wp)
results$Ksat<-ksat
results$A<-coef_A
results$B<-coef_B
results$RES<-RES*(1-fgravel)
results$bubbling_p<-bubbling_p
results$VG_alpha<-alpha
results$VG_n<-n
# results$VG_m<-m
return(results)
}
julian_day <- function(y, m, i) {
# ************************************************************************
# Name: julian_day
# Inputs: - double, year (y)
# - double, month (m)
# - double, day of month (i)
# Returns: double, Julian day
# Features: This function converts a date in the Gregorian calendar
# to a Julian day number (i.e., a method of consecutative
# numbering of days---does not have anything to do with
# the Julian calendar!)
# * valid for dates after -4712 January 1 (i.e., jde >= 0)
# Ref: Eq. 7.1 J. Meeus (1991), Chapter 7 "Julian Day", Astronomical
# Algorithms
# ************************************************************************
if (m <= 2) {
y <- y - 1
m <- m + 12
}
a <- floor(y/100)
b <- 2 - a + floor(a/4)
jde <- floor(365.25*(y + 4716)) + floor(30.6001*(m + 1)) + i + b - 1524.5
return(jde)
}
dsin <- function(d) {
# ************************************************************************
# Name: dsin
# Inputs: double (d), angle in degrees
# Returns: double, sine of angle
# Features: This function calculates the sine of an angle (d) given
# in degrees.
# Depends: pir
# ************************************************************************
pir <- pi/180 # pi in radians
sin(d*pir)
}
cum.interp<-function(x.months,y){
# ************************************************************************
# Name: cum.interp, avg.interp
# Inputs:
# x.month ..... double, variable monthly value
# y ....... year
# Returns: double, daily values
# Features: Interpolate monthly values to daily values
# Depends: julian day
# ************************************************************************
ny <- julian_day(y + 1, 1, 1) - julian_day(y, 1, 1)
ndaysmonth<-rep(NA,12)
for(i in 1: 12){ndaysmonth[i]<-julian_day(y,i+1,1)-julian_day(y,i,1)}
x.days<-x.months/ndaysmonth
ind.month<-seq(as.Date(paste(y,1,sep="-"),format="%Y-%j"),as.Date(paste(y,ny,sep="-"),format="%Y-%j"), by="month")
ind.day<-seq(as.Date(paste(y,1,sep="-"),format="%Y-%j"),as.Date(paste(y,ny,sep="-"),format="%Y-%j"), by="day")
if (sum(!is.na(x.months)) < 2) {return(rep(NA, ny))}
else {approx(ind.month, x.days, ind.day, method = "linear", rule=2)$y}
}
avg.interp<-function(x.months,y){
ny <- julian_day(y + 1, 1, 1) - julian_day(y, 1, 1)
ind.month<-seq(as.Date(paste(y,1,sep="-"),format="%Y-%j"),as.Date(paste(y,ny,sep="-"),format="%Y-%j"), by="month")
ind.day<-seq(as.Date(paste(y,1,sep="-"),format="%Y-%j"),as.Date(paste(y,ny,sep="-"),format="%Y-%j"), by="day")
if (sum(!is.na(x.months)) < 2) {return(rep(NA, ny))}
else {approx(ind.month, x.months, ind.day, method = "linear", rule=2)$y}
}
frain_func<-function(tc,Tt,Tr,y,ny=NULL){
# ************************************************************************
# Name: frain_func
# Inputs:
# tc ..... double, air daily temperature C
# Tt... double, parameter threshold temperature,where 50% of precipitation falls as rain
# Tr... double, TR is range of temperatures where both rainfall and snowfall can occur, in Β°C, typically around 13 Β°C
# y ....... year
# Returns: fraction of the precipitaion falling as rain
# Features: calculates the fraction of the precipitaion falling as rain, accounting for monthly variations of the parameters
# Depends: julian day
# Ref: Kienzle, 2008
# ************************************************************************
if(length(tc)>1){
ny <- julian_day(y + 1, 1, 1) - julian_day(y, 1, 1)
ndaysmonth<-rep(NA,12)
for(i in 1: 12){ndaysmonth[i]<-julian_day(y,i+1,1)-julian_day(y,i,1)}
m_ind<-rep(1,ndaysmonth[1])
for(i in 2:12){m_ind<-c(m_ind,rep(i,ndaysmonth[i]))}
}else{
m_ind<-as.Date(paste(y,ny,sep="-"),format="%Y-%j")
m_ind<-format(m_ind,"%m")
m_ind<-as.numeric(m_ind)
}
Ttm<-Tt+(Tt*dsin((m_ind+2)/1.91))
Trm<-Tr*(0.55+dsin(m_ind+4))*0.6
frain<-ifelse(tc<=Ttm,5*((tc-Ttm)/(1.4*Trm))^3+6.76*((tc-Ttm)/(1.4*Trm))^2+3.19*((tc-Ttm)/(1.4*Trm))+0.5, 5*((tc-Ttm)/(1.4*Trm))^3-6.76*((tc-Ttm)/(1.4*Trm))^2+3.19*((tc-Ttm)/(1.4*Trm))+0.5)
frain[frain<0]<-0
frain[frain>1]<-1
result<-list(frain,Ttm)
return(result)
}
# ************************************************************************
snowfall_prob<-function(tc,lat,elev){
# ************************************************************************
# Name: snowfall_prob
# Input: - float, tc, (deg C)
# Output:
# - float, snowfall occurrence probability
# Features: calculate the probability of snowfall occurrence probability, if >0.5, snowfall will occur
# Ref: Jennings, K.S., Winchell, T.S., Livneh, B., Molotch, N.P., 2018. Spatial variation of the
# rain-snow temperature threshold across the Northern Hemisphere. Nat. Commun. 9, 1β9. doi:10.1038/s41467-018-03629-7
# ************************************************************************
p_snow<-1/(1+exp(-0.5827+1.319*as.numeric(tc)-as.numeric(elev)*4.18E-4-abs(as.numeric(lat))*1.140E-2))
return(p_snow)
}
rspin_up <-function(lat,elev, sw_in, tc, pn, slop,asp, y,soil_data, Au,resolution) {
# ************************************************************************
# Name: spin_up
# Inputs: - list, meteorological data (mdat)
# $num_lines ..... double, length of meteorol. variable lists
# $lat_deg ....... double latitude (degrees)
# $elev_m ......... double, elevation (m)
# $year .......... double, year
# $sw_in ............ list, fraction of sunshine hours
# $tair .......... list, mean daily air temperature (deg. C)
# $pn ............ list, precipitation (mm/d)
# - list, daily totals (dtot)
# $wm ............ list, daily soil moisture (mm)
# Returns: list, daily totals
# Features: Updates the soil moisture in daily totals until equilibrium
# Depends: quick_run
# ************************************************************************
# get number of days in the year y
ny <- julian_day(y + 1, 1, 1) - julian_day(y, 1, 1)
# interpolate monthly to daily
if(length(sw_in)==12){sw_in<-avg.interp(sw_in,y)}
if(length(tc)==12){tc<-avg.interp(tc,y)}
if(length(pn)==12){pn<-cum.interp(pn,y)}
# get soil hydrophysical characteristics
soil_info<-soil_hydro(sand=soil_data[1],clay=soil_data[2],OM=soil_data[3],fgravel =soil_data[4] ,bd = soil_data[5])
depth <- soil_data[6]
SAT<-soil_info$SAT*(1-soil_data[4]/100)*depth*1000
WP<-soil_info$WP*(1-soil_data[4]/100)*depth*1000
FC<-soil_info$FC*(1-soil_data[4]/100)*depth*1000
RES<-soil_info$RES*(1-soil_data[4]/100)*depth*1000
lambda<-1/soil_info$B
bub_press<-soil_info$bubbling_p
soil_info<-c(SAT,WP,FC,soil_info$Ksat,lambda,depth,bub_press,RES,Au,resolution^2)
# define snowfall occurrence:
# 1. get snowfall probability of occurrence
p_snow<-snowfall_prob(tc,lat,elev)
# 2. get the treshold for snowfall occurrence
Tt<-max(tc[p_snow>=0.5])
# 3. get the fraction of precipitation falling as rain
f_rain<-ifelse(p_snow>=0.5,frain_func(tc,Tt,13.3,y)[[1]],1)
# define snowfall and rainfall:
snowfall<-pn*(1-f_rain)
pn<-pn*f_rain
# initialize c++ program
my_splash = new(SPLASH, lat, elev)
# run spin up
result<-my_splash$spin_up(as.integer(ny), as.integer(y), as.numeric(sw_in), as.numeric(tc),as.numeric(pn),slop,asp,as.numeric(snowfall),soil_info)
return(result)
}
run_one_year <- function(lat,elev,slop,asp,sw_in, tc, pn, wn, y, snow,soil_data,Au,resolution) {
# ************************************************************************
# Name: run_one_year
# Inputs: - double, latitude, deg (lat)
# - double, elevation, m (elev)
# - double, day of year (n)
# - double, year (y)
# - double, daily soil moisture content, mm (wn)
# - double, daily fraction of bright sunshine (sw_in)
# - double, daily air temperature, deg C (tc)
# - double, daily precipitation, mm (pn)
# Returns: list
# $ho - daily solar irradiation, J/m2
# $hn - daily net radiation, J/m2
# $ppfd - daily PPFD, mol/m2
# $cond - daily condensation water, mm
# $eet - daily equilibrium ET, mm
# $pet - daily potential ET, mm
# $aet - daily actual ET, mm
# $wn - daily soil moisture, mm
# $ro - daily runoff, mm
# Features: Runs SPLASH at a single location/pixel for one year.
# Depends: run_one_day
# ************************************************************************
ny <- julian_day(y + 1, 1, 1) - julian_day(y, 1, 1)
if(length(sw_in)==12){sw_in<-avg.interp(sw_in,y)}
if(length(tc)==12){tc<-avg.interp(tc,y)}
if(length(pn)==12){pn<-cum.interp(pn,y)}
if(length(wn)==12){wn<-avg.interp(wn,y)}
if(length(wn)>length(pn)){
wn<-wn[1:length(pn)]
snow<-snow[1:length(pn)]
}else if(length(wn)<length(pn)){
wn<-c(wn,wn[length(wn)])
snow<-c(snow,snow[length(snow)])
}
# get soil hydrophysical characteristics
soil_info<-soil_hydro(sand=soil_data[1],clay=soil_data[2],OM=soil_data[3],fgravel =soil_data[4] ,bd = soil_data[5])
depth <- soil_data[6]
SAT<-soil_info$SAT*(1-soil_data[4]/100)*depth*1000
WP<-soil_info$WP*(1-soil_data[4]/100)*depth*1000
FC<-soil_info$FC*(1-soil_data[4]/100)*depth*1000
RES<-soil_info$RES*(1-soil_data[4]/100)*depth*1000
lambda<-1/soil_info$B
bub_press<-soil_info$bubbling_p
soil_info<-c(SAT,WP,FC,soil_info$Ksat,lambda,depth,bub_press,RES,Au,resolution^2)
# define snowfall occurrence:
# 1. get snowfall probability of occurrence
p_snow<-snowfall_prob(tc,lat,elev)
# 3. get the treshold for snowfall occurrence
if(length(tc[p_snow>=0.5])>=1){
Tt<-max(tc[p_snow>=0.5])
}else{
Tt<-0
}
# 2. get the fraction of precipitation falling as rain
f_rain<-ifelse(p_snow>=0.5,frain_func(tc,Tt,13.3,y)[[1]],1)
# define snowfall and rainfall:
snowfall<-pn*(1-f_rain)
pn<-pn*f_rain
# initialize c++ program
my_splash = new(SPLASH, lat, elev)
# run spin up
daily_totals<-my_splash$run_one_year(as.integer(ny), as.integer(y),as.numeric(sw_in),as.numeric(tc),as.numeric(pn),as.numeric(wn),slop,asp,as.numeric(snow),as.numeric(snowfall),soil_info)
return(daily_totals)
}
|
dda1b72482ddcc368856ab8d1fa7773e4c71a6ba
|
20036893929f5078bf99a066775d5ebcd37cac49
|
/code/Expression_Matrix_Compile.R
|
249d680231bf7b1525d1f244fdc905f0d404dab5
|
[] |
no_license
|
zehualilab/RNAseq_singlecellfetal
|
f88da23b8a1b2ba1d1c47d60f51d28f9cd237d21
|
fb71ea75c29ea114861105e6f52c81a38df038d4
|
refs/heads/master
| 2020-09-11T00:45:28.481841
| 2019-01-17T03:50:49
| 2019-01-17T03:50:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,895
|
r
|
Expression_Matrix_Compile.R
|
# Damon Polioudakis
# 2016-10-12
# Clustering of Drop-seq cells by digital gene expression
# Must load modules:
# module load gcc/4.9.3
# module load R/3.3.0
################################################################################
rm(list = ls())
sessionInfo()
### Digital gene expression - merged lanes
# Input directory paths
inSeqDir <- c("DS-002-011", "DS-003-004", "DS-005-006-007-008", "DS-008-011", "DS-009")
inDirs <- list.dirs(
paste0("../", inSeqDir, "/data/digital_gene_expression/GRCh38_Gencode25"))
inDirs <- inDirs[c(grep("merged", inDirs, perl = TRUE)
)]
inDirs <- inDirs[grep("N7[0-2][0-9]$", inDirs, perl = TRUE)]
# Don't use DS-002 N705
# inDirs <- inDirs[grep("SxaQSEQsXbp083L1/N705", inDirs, perl = TRUE, invert = TRUE)]
# Read in each lane as list of data frames
exLDF <- lapply(inDirs, function(inDirs) {
if (grepl("DS-008-011|DS-009", inDirs)){
read.table(paste0(inDirs, "/out_gene_exon_tagged_dge.txt.gz"), header = TRUE)
}
else {
read.table(paste0(inDirs, "/out_gene_exon_tagged_dge_FtMm250.txt"))
}
})
## Combine samples into 1 data frame
for (i in 1:length(exLDF)) {
df <- exLDF[[i]]
if (i == 1) {
exDF <- df}
else {
exDF <- merge(exDF, df, by.x = "GENE", by.y = "GENE", all = TRUE)
}
}
str(exDF)
# 40k cells fails with 48G of RAM, works with 64G
exDF[is.na(exDF)] <- 0
row.names(exDF) <- exDF$GENE
exDF <- exDF[ ,-1]
print("Number of cells input:")
print(ncol(exDF))
## Metadata
# Nextera index
nexIdx <- c()
for (i in 1:length(inDirs)) {
nexIdx <- c(nexIdx, rep(basename(inDirs)[i], ncol(exLDF[[i]])-1))
}
# Sequencing run
seq_run <- c()
for (i in 1:length(inDirs)) {
seq_run <- c(seq_run, rep(basename(dirname(dirname(dirname(dirname(dirname(inDirs))))))[i], ncol(exLDF[[i]])-1))
}
# Metadata
metDF <- data.frame(CELL = colnames(exDF)
, NEXTERA = nexIdx
, SEQ_RUN = seq_run, stringsAsFactors = FALSE)
# SeqRun
metDF$SEQ_RUN[metDF$SEQ_RUN == "DS-005-006-007-008" & metDF$NEXTERA == "N710"] <- "DS-007-008"
metDF$SEQ_RUN[metDF$SEQ_RUN == "DS-005-006-007-008" & metDF$NEXTERA == "N711"] <- "DS-007-008"
metDF$SEQ_RUN[metDF$SEQ_RUN == "DS-005-006-007-008"] <- "DS-005-006"
# Brain
metDF$BRAIN <- 2
metDF$BRAIN[metDF$SEQ_RUN == "DS-005-006" | metDF$SEQ_RUN == "DS-007-008"] <- 3
metDF$BRAIN[metDF$SEQ_RUN == "DS-008-011" & metDF$NEXTERA == "N712"] <- 4
metDF$BRAIN[metDF$SEQ_RUN == "DS-008-011" & metDF$NEXTERA == "N714"] <- 4
metDF$BRAIN[metDF$SEQ_RUN == "DS-008-011" & metDF$NEXTERA == "N715"] <- 5
metDF$BRAIN[metDF$SEQ_RUN == "DS-008-011" & metDF$NEXTERA == "N716"] <- 5
metDF$BRAIN[metDF$SEQ_RUN == "DS-009" & metDF$NEXTERA == "N722"] <- 4
metDF$BRAIN[metDF$SEQ_RUN == "DS-009" & metDF$NEXTERA == "N723"] <- 4
metDF$BRAIN[metDF$SEQ_RUN == "DS-009" & metDF$NEXTERA == "N724"] <- 5
metDF$BRAIN[metDF$SEQ_RUN == "DS-009" & metDF$NEXTERA == "N726"] <- 5
# Region
metDF$REGION <- "GZ"
metDF$REGION[metDF$SEQ_RUN == "DS-007-008" & metDF$NEXTERA == "N711"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-005-006" & metDF$NEXTERA == "N705"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-005-006" & metDF$NEXTERA == "N702"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-002-011" & metDF$NEXTERA == "N701"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-002-011" & metDF$NEXTERA == "N704"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-003-004" & metDF$NEXTERA == "N706"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-003-004" & metDF$NEXTERA == "N709"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-008-011" & metDF$NEXTERA == "N712"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-008-011" & metDF$NEXTERA == "N715"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-009" & metDF$NEXTERA == "N722"] <- "CP"
metDF$REGION[metDF$SEQ_RUN == "DS-009" & metDF$NEXTERA == "N724"] <- "CP"
# Library prep
metDF$LIBRARY <- "Plath"
metDF$LIBRARY[metDF$SEQ_RUN == "DS-005-006" & metDF$NEXTERA == "N702"] <- "Geschwind"
metDF$LIBRARY[metDF$SEQ_RUN == "DS-005-006" & metDF$NEXTERA == "N701"] <- "Geschwind"
metDF$LIBRARY[metDF$SEQ_RUN == "DS-007-008" & metDF$NEXTERA == "N710"] <- "Geschwind"
metDF$LIBRARY[metDF$SEQ_RUN == "DS-007-008" & metDF$NEXTERA == "N711"] <- "Geschwind"
metDF$LIBRARY[metDF$SEQ_RUN == "DS-008-011"] <- "Geschwind"
metDF$LIBRARY[metDF$SEQ_RUN == "DS-009"] <- "Geschwind"
save(exDF, metDF
, file = "../analysis/Expression_Matrix_Compile_dge_FtMm250_DS-2-3-4-5-6-7-8-9-11.Rdata")
################################################################################
# ### Digital gene expression - each lane separately
#
# # Input directory paths
# laneDirs <- list.files("../data/digital_gene_expression/GRCh37_75_assembly_NoERCC/")
# laneDirs <- laneDirs[grep("Sxa", laneDirs)]
# inDirs <- list.dirs(paste0("../data/digital_gene_expression/GRCh37_75_assembly_NoERCC/", laneDirs))
# inDirs <- inDirs[grep("N70[0-9]$", inDirs, perl = TRUE)]
#
# # Read in each lane as list of dataframes
# exLDF <- lapply(inDirs, function(inDirs) {
# read.table(paste0(inDirs, "/out_gene_exon_tagged_dge_FtMm250.txt"))
# })
#
# ## Combine samples into 1 dataframe
# for (i in 1:length(exLDF)) {
# df <- exLDF[[i]]
# if (i == 1) {
# exDF <- df}
# else {
# exDF <- merge(exDF, df, by.x = "GENE", by.y = "GENE", all = TRUE)
# }
# }
# str(exDF)
# exDF[is.na(exDF)] <- 0
# row.names(exDF) <- exDF$GENE
# exDF <- exDF[ ,-1]
# print("Number of cells input:")
# print(ncol(exDF))
#
# ## Metadata
# # Samples
# samples <- c()
# for (i in 1:length(inDirs)) {
# samples <- c(samples, rep(basename(inDirs)[i], ncol(exLDF[[i]])-1))
# }
# # Lanes
# lanes <- c()
# for (i in 1:length(inDirs)) {
# lanes <- c(lanes, rep(basename(dirname(inDirs))[i], ncol(exLDF[[i]])-1))
# }
# # Metadata
# metDF <- data.frame(CELL = colnames(exDF), LANE = lanes, SAMPLE = samples)
#
# save(exDF, metDF, file = "../analysis/Expression_Matrix_Compile_dge_FtMm250_LanesSeparate.Rdata")
# ################################################################################
|
2ac999abc55d03570767b61278d2835dc1fe7db8
|
67d04319bc2ecb0ab773f44e15888cd154585cc7
|
/plot3.R
|
e6e173c80cb7633719b7c8e868564762b6ffaf93
|
[] |
no_license
|
Ellaqyzeng/ExDataAnalysis_Project_2
|
1d72880f55545c72c6549f64656ddfe306bfc5d7
|
cccd3d266d588a8850c6056ddbb7918176af96a2
|
refs/heads/master
| 2022-12-16T23:58:13.120554
| 2020-09-26T20:11:27
| 2020-09-26T20:11:27
| 298,799,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
plot3.R
|
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
#downloadin data
library(ggplot2)
#importing ggplot2
bal<-subset(NEI,fips=="24510")
dbal<-aggregate(Emissions ~ year + type, bal, sum)
#extracting and tidying data
g<-ggplot(dbal,aes(x=year,y=Emissions))
png(filename='plot3.png')
g+geom_point()+facet_grid(.~type)+geom_line(color="blue")+ggtitle("Emissions from 1999β2008 in Baltimore City by source")
dev.off()
|
ad7d9b645393a784fafec09c1819c11a7a57bd0a
|
0d31fc55c8391cd45e38a7e1a40c00853176b536
|
/run_analysis.R
|
516a96e42d7fb7660084f59e288b03288bfbda5e
|
[] |
no_license
|
yuliangzhang/Getting-and-Cleaning-Data-Course-Project
|
75178b92b9373d04e3f545197388e5cbae373035
|
e43e2e8108b1f1026293e7f376a318843bad6650
|
refs/heads/master
| 2020-07-06T07:33:27.729493
| 2016-09-12T07:06:08
| 2016-09-12T07:06:08
| 67,977,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,922
|
r
|
run_analysis.R
|
##Set workspace and create special file to store data
setwd("D:\\Rworkspace");
if(!file.exists("./data")){
dir.create("./data")
}
## download project data set and unzip it
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip";
download.file(fileUrl, destfile = "./data/Dataset.zip");
unzip("./data/Dataset.zip", exdir = "./data")
##load data into R
trainX <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
trainY <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
testX <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
testY <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subjectTrain <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
subjectTest <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
feature <-read.table("./data/UCI HAR Dataset/features.txt")
activityNames <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
##1. Merges the training and the test sets to create one data set.
## 1)Merge training and testing data together
mDataX <- rbind(trainX,testX);
mDataY <- rbind(trainY, testY);
mDataSub <- rbind(subjectTrain, subjectTest);
#mDataSet <- cbind(cbind(mDataSub, mDataX), mDataY);
## 2) Set the feature name of mDataX
names(mDataX) <- feature[,2];
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
temp <- feature[,2];
selectCol <- grepl("(*mean*)|(*std*)", temp)
selMDataX <- mDataX[, selectCol]
## 3. Uses descriptive activity names to name the activities in the data set
mDataY$V1 = as.factor(mDataY$V1)
levels(mDataY$V1)
levels(mDataY$V1) <- activityNames[,2]
head(mDataY)
## 4. Appropriately labels the data set with descriptive variable names.
names(mDataY) <- "activity"
names(mDataSub) <- "subject"
## 5. From the data set in step 4, creates a second, independent tidy data
## set with the average of each variable for each activity and each subject.
## step 1: combine all the data together
mData <- cbind(mDataSub,selMDataX, mDataY);
stat <- by(mData[,2:80], mData[, c("subject", "activity")], colMeans)
arrayData <- as.data.frame.array(stat)
res <- array(,dim = c(180,81))
arrayNames <- names(arrayData)
for (i in c(1:6)){
for(j in c(1:30)){
tmp <- arrayData[[i]][[j]]
res[(i-1) * 30 + j, 1] = j;
res[(i-1) * 30 + j, 81] = arrayNames[i];
res[(i-1) * 30 + j, 2:80] = tmp
}
}
cleanData <- as.data.frame(res)
variableNames <- names(arrayData[[1]][[1]])
## Set the names of data frame
cleanDataNames <- names(cleanData)
cleanDataNames[1] <- "subject-no";
cleanDataNames[81] <- "activity"
cleanDataNames[2:80] <- variableNames;
names(cleanData) <- cleanDataNames
## write data to file
write.table(cleanData, "./data/UCI HAR Dataset/average_group_by_activity_and_subject.txt", row.name=FALSE)
|
710a20bd6367c6bb91a181c81cc8136910ac8638
|
9c222adf00fbc29ec24cc620fcf789680957ab2c
|
/app.R
|
e45c6cb9a688182fe968eb809970ac633388b643
|
[] |
no_license
|
yeqiaoling/Web-Based-Email
|
3877f910a526c48522fe6e866942880ff44d92d9
|
9c60ddebc07617d153cb3c28f5f185013bb3ffec
|
refs/heads/master
| 2022-12-07T15:16:11.256995
| 2020-08-22T05:59:42
| 2020-08-22T05:59:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,066
|
r
|
app.R
|
library(shiny)
library(shinydashboard)
library(DT)
library(shinyjs)
# library(sodium)
# pages
loginpage <- div(id = "loginpage", style = "width: 500px; max-width: 100%; margin: 0 auto; padding: 20px;",
wellPanel(
tags$h2("SIGN IN", class = "text-center", style = "padding-top: 0;color:#333; font-weight:600;"),
textInput("userName", placeholder="Username", label = tagList(icon("user"), "Username")),
passwordInput("passwd", placeholder="Password", label = tagList(icon("unlock-alt"), "Password")),
br(),
div(
style = "text-align: center;",
actionButton("loggedIn", "SIGN IN", style = "color: white; background-color:#3c8dbc;
padding: 10px 15px; width: 150px; cursor: pointer;
font-size: 18px; font-weight: 600;"),
shinyjs::hidden(
div(id = "nomatch",
tags$p("Oops! Incorrect username or password!",
style = "color: red; font-weight: 600;
padding-top: 5px;font-size:16px;",
class = "text-center"))),
br(),
shinyjs::hidden(div(id = "no_existing",
tags$p("Oops! This email is not existing!",
style = "color: red; font-weight: 600;
padding-top: 5px;font-size:16px;",
class = "text-center")))
)
)
)
signuppage <- div(id = "signuppage", style = "width: 500px; max-width: 100%; margin: 0 auto; padding: 20px;",
wellPanel(
tags$h2("SIGN UP", class = "text-center", style = "padding-top: 0;color:#333; font-weight:600;"),
textInput("email", label = "Email"),
textInput("phonNum", label = "Phone Number"),
textInput("firstName", label = "First Name"),
textInput("midName", label = "Middle Name"),
textInput("lastName", label = "Last Name"),
textInput("mailAdd", label = "Mailing Address"),
textInput("occupation", label = "Occupation"),
textInput("userName_su", label = "Username"),
textInput("passwd_su", label = "Password"),
# passwordInput("passwd", label = "Password"),
br(),
div(
style = "text-align: center;",
actionButton("submitted", "SUBMIT", style = "color: white; background-color:#3c8dbc;
padding: 10px 15px; width: 150px; cursor: pointer;
font-size: 18px; font-weight: 600;"),
shinyjs::hidden(
div(id = "existing_username",
tags$p("Oops! This username has been used!",
style = "color: red; font-weight: 600;
padding-top: 5px;font-size:16px;",
class = "text-center"))),
br(),
shinyjs::hidden(div(id = "existing_email",
tags$p("Oops! This email address has been used!",
style = "color: red; font-weight: 600;
padding-top: 5px;font-size:16px;",
class = "text-center")),
br(),
shinyjs::hidden(div(id = "incomplete_info",
tags$p("Oops! Please fill in all fields!",
style = "color: red; font-weight: 600;
padding-top: 5px;font-size:16px;",
class = "text-center")))
)
)
)
)
emailpage <- div(id = "emailpage", style = "width: 500px; max-width: 100%; margin: 0 auto; padding: 20px;",
wellPanel(
tags$h2("Welcome! This is your email.", class = "text-center", style = "padding-top: 0;color:#333; font-weight:600;")
)
)
retrievepage <- div(id = "retrievepage", style = "width: 500px; max-width: 100%; margin: 0 auto; padding: 20px;",
wellPanel(
tags$h2("RETRIEVE USERNAME AND/OR PASSWORD", class = "text-center", style = "padding-top: 0;color:#333; font-weight:600;"),
textInput("email_rtr", label = "What is your email address?"),
br(),
div(
style = "text-align: center;",
actionButton("retrieved", "RETRIEVE", style = "color: white; background-color:#3c8dbc;
padding: 10px 15px; width: 150px; cursor: pointer;
font-size: 18px; font-weight: 600;"),
shinyjs::hidden(
div(id = "noexist",
tags$p("Oops! This email address does not exist!",
style = "color: red; font-weight: 600;
padding-top: 5px;font-size:16px;",
class = "text-center")))
))
)
infopage <- fluidPage(
textOutput('userinfo'),
textOutput('passwdinfo'),
actionButton("login_from_info", "LOG IN", style = "color: white; background-color:#3c8dbc;
padding: 6px 10px; width: 130px; cursor: pointer;
font-size: 12px; font-weight: 400;"),
)
# ui
header <- dashboardHeader( title = "Singularity Inc", uiOutput("logoutbtn"))
sidebar <- dashboardSidebar(uiOutput("sidebarpanel"),
actionButton(inputId = 'sign_in', label = 'Sign in'),
actionButton(inputId = 'sign_up', label = 'Sign up'),
actionButton(inputId = 'retrieve', label = 'Forgot username/password')
#, actionButton(inputId = 'upload', label = 'Upload file')
)
body <- dashboardBody(shinyjs::useShinyjs(), uiOutput("body"))
ui <- dashboardPage(header, sidebar, body, skin = "blue")
# server
server <- function(input, output) {
login <- FALSE
intension <- ""
email <- ""
fn <- "data/user_info.rds"
if (file.exists(fn)) {
credentials <- readRDS(fn)
} else {
credentials <- data.frame()
}
USER <- reactiveValues(login = login, intension = intension, email = email)
observeEvent(input$sign_in, {
USER$intension <- 'want_sign_in'
USER$login <- F
})
observeEvent(input$sign_up, {
USER$intension <- 'want_sign_up'
USER$login <- F
})
observeEvent(input$retrieve, {
USER$intension <- 'want_retrieve'
USER$login <- F
})
# clicked log in botton, set login = T and intension as 'logged_in' if success, otherwise display alert
observe({
if (USER$login == FALSE) {
if (!is.null(input$loggedIn)) {
if (input$loggedIn > 0) {
Username <- isolate(input$userName)
Password <- isolate(input$passwd)
indx <- which(tolower(credentials$username_id) ==
tolower(Username))
if(length(indx) == 1) {
pasmatch <- credentials["passod"][indx,]
# pasverify <- password_verify(pasmatch, Password)
pasverify <- pasmatch == Password
if(pasverify) {
USER$login <- TRUE
USER$intension <- 'logged_in'
} else {
shinyjs::toggle(id = "nomatch", anim = TRUE, time = 1, animType = "fade")
shinyjs::delay(3000, shinyjs::toggle(id = "nomatch", anim = TRUE, time = 1, animType = "fade"))
}
} else {
shinyjs::toggle(id = "no_existing", anim = TRUE, time = 1, animType = "fade")
shinyjs::delay(3000, shinyjs::toggle(id = "no_existing", anim = TRUE, time = 1, animType = "fade"))
}
}
}
}
})
# click submit sign up botton, set login = T and save information if success
observe({
if (USER$login == FALSE) {
if ( !is.null(input$submitted)) {
if ( input$submitted > 0 ){
# extract info
Email <- isolate(input$email)
Username <- isolate(input$userName_su)
Password <- isolate(input$passwd_su)
phonNum <- isolate(input$phonNum)
firstName <- isolate(input$firstName)
midName <- isolate(input$midName)
lastName <- isolate(input$lastName)
mailAdd <- isolate(input$mailAdd)
occupation <- isolate(input$occupation)
# check existing & incomplete infor
existing <- length(which(tolower(credentials$username_id) ==
tolower(Username))) == 1
existing_email <- length(which(tolower(credentials$email_id) ==
tolower(Email))) == 1
incomplete <- Email == '' | phonNum == '' | firstName == '' |
midName == '' | lastName == '' | mailAdd == '' |
occupation == '' | Username == '' | Password == ''
if (existing_email) {
shinyjs::toggle(id = "existing_email", anim = TRUE, time = 1, animType = "fade")
shinyjs::delay(3000, shinyjs::toggle(id = "existing_email", anim = TRUE, time = 1, animType = "fade"))
} else if (existing) {
shinyjs::toggle(id = "existing_username", anim = TRUE, time = 1, animType = "fade")
shinyjs::delay(3000, shinyjs::toggle(id = "existing_username", anim = TRUE, time = 1, animType = "fade"))
} else if (incomplete) {
shinyjs::toggle(id = "incomplete_info", anim = TRUE, time = 1, animType = "fade")
shinyjs::delay(3000, shinyjs::toggle(id = "incomplete_info", anim = TRUE, time = 1, animType = "fade"))
} else {
# add user info, if successed sign up
newUser <- data.frame(
email_id = Email,
username_id = Username,
passod = Password,
stringsAsFactors = F)
credentials <- rbind(credentials, newUser)
saveRDS(credentials, file = fn)
USER$login <- TRUE
USER$intension <- 'logged_in'
}
}
}
}
})
# click retrieve password botton
observe({
if ( !is.null(input$retrieved)) {
if ( input$retrieved > 0 ){
Email <- isolate(input$email_rtr)
existing <- length(which(tolower(credentials$email_id) ==
tolower(Email))) == 1
if (existing) {
USER$email <- Email
USER$intension <- 'display_info'
} else {
shinyjs::toggle(id = "noexist", anim = TRUE, time = 1, animType = "fade")
shinyjs::delay(3000, shinyjs::toggle(id = "noexist", anim = TRUE, time = 1, animType = "fade"))
}
}
}
})
# back to log in from display information
observe({
if (USER$login == FALSE) {
if (!is.null(input$login_from_info)) {
if (input$login_from_info > 0) {
USER$intension <- 'want_sign_in'
}
}
}
})
# if logged in
# display log ou functionality
output$logoutbtn <- renderUI({
req(USER$login)
tags$li(a(icon("fa fa-sign-out"), "Logout",
href="javascript:window.location.reload(true)"),
class = "dropdown",
style = "background-color: #eee !important; border: 0;
font-weight: bold; margin:5px; padding: 10px;")
})
output$body <- renderUI({
if (USER$intension == 'want_sign_in') {
loginpage
} else if (USER$intension == 'want_sign_up') {
signuppage
} else if (USER$intension == 'want_retrieve') {
retrievepage
} else if (USER$intension == 'display_info') {
# find user & password info
indx <- which(tolower(credentials$email_id) == tolower(USER$email))
username <- credentials$username_id[indx]
passwd <- credentials$passod[indx]
output$userinfo <- renderText({paste0('Username: ', username)})
output$passwdinfo <- renderText({paste0('Password: ', passwd)})
# display
infopage
} else if (USER$intension == 'logged_in') {
emailpage
}
})
}
shinyApp(server = server, ui = ui)
|
6584815892014a29d42282b0255d846d31b8a6c1
|
8d4b2f0ec29916d0e9c0b72aa6d3d37d6afa1dcd
|
/man/get_posterior.Rd
|
68dd739007a0b0eefe9be46a227d5677cc6b9e7b
|
[] |
no_license
|
EpiModel/EpiABC
|
fbffee553b2d384990dba43c18fac790bb56f5bf
|
ad80ccb31734e24d42d282232796716afbd00383
|
refs/heads/master
| 2021-06-24T00:03:20.664401
| 2020-12-05T14:09:47
| 2020-12-05T14:09:47
| 164,778,789
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 698
|
rd
|
get_posterior.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slurm.R
\name{get_posterior}
\alias{get_posterior}
\title{Extracts Posterior Distribution for Parameters and Summary Statistics}
\usage{
get_posterior(wave, input = "data/")
}
\arguments{
\item{wave}{If \code{input} is a character string, the wave file that should
be read from that directory.}
\item{input}{Either a character string with the directory to read the wave
files created with \code{\link{abc_smc_wave}} from, or the direct object
itself.}
}
\description{
Once a simulation wave is complete, this function processes the output and
stores it in a format that is useful for numerical analysis and plotting.
}
|
b9b682ac4e992939da3218c1b7bc639580557f6f
|
ebe187244f13f3d118bd24ad75f9d80ea89de00d
|
/R/hypothesis_test_equal_tailed.R
|
3877ad2ad3def912028a0b9cb42a0d696bf97148
|
[
"MIT"
] |
permissive
|
adviksh/winference
|
d12c811daaa91580aec4c12ea9f77a2ed31813ce
|
765229f2bdc251c8fc127f4cccbbcba6ec95f03e
|
refs/heads/master
| 2022-09-05T14:48:00.150154
| 2020-05-31T03:44:28
| 2020-05-31T03:44:28
| 268,204,277
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,109
|
r
|
hypothesis_test_equal_tailed.R
|
equal_tailed_test_rejects <- function(mu, y, sigma, alpha = 0.05,
trunc_lo = -Inf, trunc_hi = Inf) {
# Immediately reject if observation falls outside the truncation set
if (is_contained(y, c(trunc_lo, trunc_hi)) == FALSE) return(TRUE)
left_test <- one_sided_test_rejects(mu = mu,
sigma = sigma,
y = y,
side = "left",
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
alpha = alpha / 2)
right_test <- one_sided_test_rejects(mu = mu,
sigma = sigma,
y = y,
side = "right",
trunc_lo = trunc_lo,
trunc_hi = trunc_hi,
alpha = alpha / 2)
left_test | right_test
}
|
de24f6543f9b66a64649ea42eb6e68acdcbeb2a4
|
0dcce8658bd17f8e896120c13eafcf82264d1e73
|
/hpv.icc.correllation/hpv.icc.correlation.R
|
0b584c3ac4253b5a1186df5595d6085251b3d263
|
[] |
no_license
|
rojjsf/hpv.icc.correllation.hub
|
bd506106ef7784d118dcac721b393ae3c867632d
|
0e99f2990d06fb8150740ec0f95e88fbea0554cf
|
refs/heads/master
| 2020-03-25T18:31:11.620650
| 2018-08-08T15:56:22
| 2018-08-08T15:56:22
| 144,034,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,234
|
r
|
hpv.icc.correlation.R
|
#### age-stratified correlation of hpv in several countries ####
#### read in data ####
library(haven)
library(Epi)
library(dplyr)
library(xlsx)
library(writexl)
alg.data <- read_dta("C:/Users/schultefrohlinder/Documents/HPV_Prevalence/Data/Algeria/pooled_db/algeriapool.dta")
str(alg.data)
# organize the data
# missing values
sel.paper0.rm <- which(alg.data$sel_paper0 == 0)
# sel_paper0 = 0 means that the woman has not been selected.
exclu.reason <- alg.data[sel.paper0.rm, "exclur0"]
exclu.reason
# to check why these women have been removed
alg.data <- alg.data[-sel.paper0.rm, ]
# delete the rows in all colums of the women that have not been selected
nrow (alg.data)
# select high risk types:
Hrisk <- c("ahpv16", "ahpv18", "ahpv31","ahpv33","ahpv35","ahpv39","ahpv45","ahpv51","ahpv52","ahpv56","ahpv58","ahpv59","ahpv68", "ahpv73", "ahpv82", "ahpvhrx")
# age groups [15 - 90) in 5 year age groups, only keeping high risk types, id of women and their age.
alg.hrisk <- alg.data[, c("sgid", Hrisk, "sga3")]
alg.hrisk$age.grp <- cut(alg.hrisk$sga3, seq(10, 90, 5))
str(alg.hrisk) # hpv pos/neg coded as numeric 1/0
alg.hrisk <- alg.hrisk %>%
mutate(hpvpos = rowSums(alg.hrisk[, Hrisk])) %>% # number of different hpv infections
mutate(hpvsino = ifelse(alg.hrisk$hpvpos > 0, 1, 0)) %>% # factor if hpv positive or negative for high risk types. NA now as 0 !?
mutate(hpvsino = factor(alg.hrisk$hpvsino, levels = c(0,1), labels = c("neg", "pos")))
alg.table <- table(alg.hrisk$age.grp, alg.hrisk$hpvsino)
alg.table <- as.data.frame.matrix(alg.table) # as.data.frame treats pos, neg as variables like age and therefore creates one long column containing both
# other option: spread() with tidyr, but then column names have to be redefined
alg.table <- alg.table %>%
mutate("age.grp" = levels(alg.hrisk$age.grp)) %>%
mutate("prev" = round((pos * 100/ (neg + pos)), 1)) %>%
mutate("se" = round(1.96 * sqrt(prev * (100 - prev) / ((neg + pos)*100)), 1)) %>% # prevalence s.e. is calculated as binomial
mutate("ci" = paste(prev - se, prev + se, sep = "-")) # as characters
alg.table
write.xlsx(alg.table, file = "hpv.prevalence.xlsx", sheetName="algeria")
|
0dec1ed7e7d34d8f896b650cb7c657004550d2e9
|
0ba08455f27022f23296c06f57a6da24fdd1168d
|
/RSPCMA/R/rsplus5.r
|
be214679b250977e30de3ddc4421dcb782ea6790
|
[] |
no_license
|
lbraun/applied_mathematics
|
ba44c1f1f15387c9c5b80be99639d4cac2b0f8d8
|
a077b676e34c90d2f3e5858efc15d1d5e57aef79
|
refs/heads/master
| 2021-05-11T18:13:24.544412
| 2018-02-05T08:39:27
| 2018-02-05T08:39:27
| 117,818,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,113
|
r
|
rsplus5.r
|
airline.dist<-source("c:\\allwork\\rsplus\\chap5airdist.dat")$value
#
airline.mds<-cmdscale(as.matrix(airline.dist),k=9,eig=T)
airline.mds$eig
#
sum(abs(airline.mds$eig[1:2]))/sum(abs(airline.mds$eig))
sum(airline.mds$eig[1:2]^2)/sum(airline.mds$eig^2)
#
par(pty="s")
plot(-airline.mds$points[,1],airline.mds$points[,2],type="n",xlab="Coordinate 1",ylab="Coordinate 2",
xlim=c(-2000,1500),ylim=c(-2000,1500))
text(-airline.mds$points[,1],airline.mds$points[,2],labels=row.names(airline.dist))
#
#
sex<-matrix(c(21,21,14,13,8,8,9,6,8,2,2,3,4,10,10),ncol=5,byrow=TRUE)
#
ncol<-5
nrow<-3
n<-sum(sex)
rtot<-apply(sex,1,sum)
ctot<-apply(sex,2,sum)
#
xrtot<-cbind(rtot,rtot,rtot,rtot,rtot)
xctot<-rbind(ctot,ctot,ctot)
#
xrtot<-sex/xrtot
xctot<-sex/xctot
#
rdot<-rtot/n
cdot<-ctot/n
dcols<-matrix(0,ncol,ncol)
for(i in 1:ncol){
for(j in 1:ncol){d<-0
for(k in 1:nrow) d<-d+(xctot[k,i]-xctot[k,j])^2/rdot[k]
dcols[i,j]<-sqrt(d)}}
#
drows<-matrix(0,nrow,nrow)
for(i in 1:nrow){
for(j in 1:nrow){d<-0
for(k in 1:ncol) d<-d+(xrtot[i,k]-xrtot[j,k])^2/cdot[k]
drows[i,j]<-sqrt(d)}}
#
#
r1<-cmdscale(dcols,eig=TRUE)
r1$points
r1$eig
c1<-cmdscale(drows,eig=TRUE)
c1$points
c1$eig
xrtot
par(pty="s")
plot(r1$points,xlim=range(r1$points[,1],c1$points[,1]),ylim=range(r1$points[,1],c1$points[,1]),type="n",
xlab="Coordinate 1",ylab="Coordinate 2",lwd=2)
text(r1$points,labels=c("AG1","AG2","AG3","AG4","AG5"),lwd=2)
text(c1$points,labels=c("nobf","bfns","bfs"),lwd=4)
#
abline(h=0,lty=2)
abline(v=0,lty=2)
#
#
#
skulls<-source("c:\\allwork\\rsplus\\chap5skulls.dat")$value
#
#
#
#Mahalanobis distances
#
#
labs<-rep(1:5,rep(30,5))
#
centres<-matrix(0,5,4)
S<-matrix(0,4,4)
#
for(i in 1:5) {
centres[i,]<-apply(skulls[labs==i,-1],2,mean)
S<-S+29*var(skulls[,-1])
}
#
S<-S/145
#
mahal<-matrix(0,5,5)
#
for(i in 1:5) {
mahal[i,]<-mahalanobis(centres,centres[i,],S)
}
#
par(pty="s")
coords<-cmdscale(mahal)
xlim<-c(-1.5,1.5)
plot(coords,xlab="C1",ylab="C2",type="n",xlim=xlim,ylim=xlim,lwd=2)
text(coords,labels=c("c4000BC","c3300BC","c1850BC","c200BC","cAD150"),lwd=3)
#
#
|
cb3e77c941c4237701e792016eda760ba82d5d4d
|
0abb72f6b1db11a876ee9a8342139c6c48cec587
|
/_clutter/week1_code.r
|
59d8c97245e4d42f5b000a0cef6b53abf35249b4
|
[] |
no_license
|
lefft/r_norc_minicourse
|
9d0cac94f18c72e9dedf94c250182641c4dc5508
|
9ef70a95710d4e942e52919c1a3cdbd7f6ac946f
|
refs/heads/master
| 2021-01-18T23:32:43.427790
| 2017-06-09T18:44:48
| 2017-06-09T18:44:48
| 87,112,614
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,461
|
r
|
week1_code.r
|
#'---
#'title: "week 1 notes etc."
#'author: "timothy leffel may19/2017"
#'date: ""
#'css: css/notes.css
#'---
#'<hr>
# THIS IS THE DOC TO CANNIBALIZE
# THIS IS THE DOC TO CANNIBALIZE
# THIS IS THE DOC TO CANNIBALIZE
# THIS IS THE DOC TO CANNIBALIZE
# THIS IS THE DOC TO CANNIBALIZE
#'<hr>
#'
#'<hr style="height:2px; background-color: gray; color:gray;"><br>
#'This is a version of the class code that you can look at alongside its output, chunk by chunk. Each week
#'<br><br><br>
#'
#'### 1. Hello, R!
#'
#'<hr style="height:1px; background-color:lightgray; color:lightgray;">
# welcome to the R mini-course. in keeping with tradition...
print("...an obligatory 'hello, world!'")
# this line is a comment, so R will always ignore it.
# this is a comment too, since it also starts with "#".
# but the next one is a line of real R code, which does some arithmetic:
5 * 3
# we can do all kinds of familiar math operations:
5 * 3 + 1
# 'member "PEMDAS"?? applies here too -- compare the last line to this one:
5 * (3 + 1)
# spaces usually don't matter, but should be used for legibility
5 * 3+1
5*(3+ 1)
#'<br><br>
#'
#'### 2. Variables and Assignments
#'
#'<hr style="height:1px; background-color:lightgray; color:lightgray;">
# usually when we do some math, we want to save the result for future use.
# we can do this by **assigning** a computation to a **variable**
firstvar <- 5 * (3 + 1)
# now 'firstvar' is an **object**. we can see its value by printing it.
# sending `firstvar` to the interpreter is equivalent to `print(firstvar)`
firstvar
# the word "variable" is unfortunately overloaded with meanings.
# here a variable is something like a container, which we can put stuff in.
# this is importantly different from the more common "pointer" semantics
# for variables found in python and most other programming languages.
# the difference between "container" semantics and "pointer" semantics is
# illustrated in the week 1 notes.
# we can put basically anything into a variable, and we can call a variable
# pretty much whatever we want (but do avoid special characters besides "_")
myvar <- "boosh!"
myvar
myVar <- 5.5
myVar
# including other variables or computations involving them:
my_var <- myvar
my_var
myvar0 <- myVar / (myVar * 1.5)
myvar0
# when you introduce variables, they'll appear in the environment tab of the
# top-right pane in R Studio. you can remove variables you're no longer
# using with `rm()`. (this isn't necessary, but it saves space in both
# your brain and your computer's
rm(myvar)
rm(my_var)
rm(myVar)
rm(myvar0)
#'<br><br>
#'
#'### 3. Vectors
#'
#'<hr style="height:1px; background-color:lightgray; color:lightgray;">
# R was designed with statistical applications in mind, so naturally there's
# lots of ways to represent collections or sequences of values (e.g. numbers).
# in R, a "vector" is the simplest list-like data structure.
# you can create a vector with the `c()` function (for "concatenate")
myvec <- c(1, 2, 3, 4, 5)
myvec
anothervec <- c(4.5, 4.12, 1.0, 7.99)
anothervec
# vectors can hold elements of any type, but they must all be of the same type.
# to keep things straight in your head, maybe include the data type in the name
myvec_char <- c("a","b","c","d","e")
myvec_char
# if we try the following, R will coerce the numbers into characters:
myvec2 <- c("a","b","c",1,2,3)
myvec2
rm(myvec2)
# you can put vectors or values together with `c()`
longvec <- c(0, myvec, 9, 80, anothervec, 0, NA)
# suppose the only reason we created `myvec` and `anothervec` was to put
# them together with some other stuff, and save that to `longvec`.
# in this case, we can just remove `myvec` and `anothervec`, and use `longvec`
rm(myvec)
rm(anothervec)
longvec
# note also that the whole numbers (**integers**) now have decimals in them.
# this is because R coerced them into "floating-point" numbers, which are a
# computer's decimal-based representation of the real numbers
# (**doubles** -- you'll learn some cool stuff if you google:
# "why are floats called 'doubles' in R?", including what a "float" is).
# to see how many elements a vector has, get its `length()`
length(longvec)
# to see what the unique values are, use `unique()` (you'll get a vector back)
unique(longvec)
# a very common operation is to see how many unique values there are:
length(unique(longvec))
# to see a frequency table over a vector, use `table()`
table(longvec)
# note that this works for all kinds of vectors
table(c("a","b","c","b","b","b","a"))
table(c(TRUE, FALSE, FALSE, FALSE, TRUE, FALSE))
# an important but not obvious thing:
# R has a special value called `NA`, which represents missing data.
# by default, `table()` won't tell you about NA's (annoying, ik!).
# so get in the habit of specifying the `useNA` **argument** of `table()`
table(c(1,2,3,2,2,NA,3,NA,NA,1,1))
table(c(1,2,3,2,2,NA,3,NA,NA,1,1), useNA="ifany") # or "always" or "no"
# notice that the structure of this command is:
# table(VECTOR, useNA=CHARACTERSTRING)
# some terminology:
# - `table()` is a **function**
# - `table()` has **argument positions** for a vector and for a string
# - we provided `table()` with two **arguments**:
# * a vector
# * a character string
# - the second argument position was **named** `useNA`
# - we used the **argument binding** syntax `useNA="ifany"`
# argument-binding is kind of like variable assignment, but `useNA` doesn't
# become directly available for use after we give it a value.
# this might feel kinda abstract, but i promise the intuition
# will become clearer the further along we get. some arguments -- like `useNA`
# here -- can be thought of as "options" of the function they belong to.
# here's an example that might clarify the concept of argument binding:
round(3.141592653, digits=4)
# `round()` is a commonly used function. it's additionally relevant here
# because it illustrates an important concept called **vectorization**.
# many functions in R are vectorized by default, which means that they can
# take an individual value (like the `round()` call above), or they can take
# a vector of values. in the latter case, the function will apply pointwise
# to each element of the vector, and then return the resulting values as a
# vector, with the same length and order as the input:
round(longvec, digits=4)
# as we saw with `table()`, some functions have optional argument positions.
# this is true of `round()` too -- if we don't tell it how many digits to
# round to, it uses the default of 0. we'll see how this works later.
round(longvec)
# INSERT CONTENT FROM SLIDES + THEN ANNOTATE, STARTING HERE
# INSERT CONTENT FROM SLIDES + THEN ANNOTATE, STARTING HERE
# INSERT CONTENT FROM SLIDES + THEN ANNOTATE, STARTING HERE
"Table Header | Second Header
------------- | -------------
Table Cell | Cell 2
Cell 3 | Cell 4
Cell 5 | Cell 6"
# Mathjax is supported, so with latex syntax you can get inline \(A = \pi
# \times r^2 \) or separate maths:
#
# \[
# \begin{aligned}
# I_{1} &= \int_{0}^{2*\pi} \sin(x)
# \\
# I_{2} &= \int_{0}^{2*\pi} \cos(x)
# \end{aligned}
# \]
# `rep()` is a simple example of the "DRY" principle in programming
# ("don't repeat yourself"")
#'<br><br>
#'
#'### 4. Data Frames
#'
#'<hr style="height:1px; background-color:lightgray; color:lightgray;">
#'<br><br>
#'
#'### 5. INSERT TITLE/CONTENT
#'
#'<hr style="height:1px; background-color:lightgray; color:lightgray;">
#'<br><br>
#'
#'### 6. INSERT TITLE/CONTENT
#'
#'<hr style="height:1px; background-color:lightgray; color:lightgray;">
#'
#'<link rel="stylesheet" type="text/css"
#'href="https://fonts.googleapis.com/css?family=Open+Sans:300,400,400i,700">
#'
#'<link href="https://fonts.googleapis.com/css?family=Roboto+Mono:300,400,500" rel="stylesheet">
#'
#' <style>
#'body {
#' padding: 10px;
#' font-size: 12pt;
#' font-family: 'Open Sans', sans-serif;
#'}
#'
#'h1 {
#' font-size: 20px;
#' color: DarkGreen;
#' font-weight: bold;
#'}
#'
#'h2 {
#' font-size: 16px;
#' color: green;
#'}
#'
#'h3 {
#' font-size: 24px;
#' color: green;
#' font-weight: bold;
#'}
#'
#'code {
#' font-family: 'Roboto Mono', monospace;
#' font-size: 14px;
#'}
#'
#'pre {
#' font-family: 'Roboto Mono', monospace;
#' font-size: 14px;
#'}
#'
#'</style>
#'
|
bf72b6c8d4f0e96c07c18bf9798125a501a30b1c
|
a5ee8b73b277c3bc2c1c8f9898b6e9da88b308f5
|
/R/vimpPlot.R
|
10c133a98e56f13356200d2360a383c3f4455414
|
[] |
no_license
|
cran/boostmtree
|
341d83fdf088d162e6dfedea09508ca55939f5e3
|
b402ee8976f8846c45ec996fdb435cf7bb311e88
|
refs/heads/master
| 2022-05-15T01:10:15.088762
| 2022-03-10T08:40:05
| 2022-03-10T08:40:05
| 54,140,236
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,965
|
r
|
vimpPlot.R
|
####**********************************************************************
####**********************************************************************
####
#### BOOSTED MULTIVARIATE TREES FOR LONGITUDINAL DATA (BOOSTMTREE)
#### Version 1.5.1 (_PROJECT_BUILD_ID_)
####
#### Copyright 2016, University of Miami
####
#### This program is free software; you can redistribute it and/or
#### modify it under the terms of the GNU General Public License
#### as published by the Free Software Foundation; either version 3
#### of the License, or (at your option) any later version.
####
#### This program is distributed in the hope that it will be useful,
#### but WITHOUT ANY WARRANTY; without even the implied warranty of
#### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#### GNU General Public License for more details.
####
#### You should have received a copy of the GNU General Public
#### License along with this program; if not, write to the Free
#### Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
#### Boston, MA 02110-1301, USA.
####
#### ----------------------------------------------------------------
#### Project Partially Funded By:
#### ----------------------------------------------------------------
#### Dr. Ishwaran's work was funded in part by grant R01 CA163739 from
#### the National Cancer Institute.
####
#### Dr. Kogalur's work was funded in part by grant R01 CA163739 from
#### the National Cancer Institute.
#### ----------------------------------------------------------------
#### Written by:
#### ----------------------------------------------------------------
#### Hemant Ishwaran, Ph.D.
#### Professor, Division of Biostatistics
#### Clinical Research Building, Room 1058
#### 1120 NW 14th Street
#### University of Miami, Miami FL 33136
####
#### email: hemant.ishwaran@gmail.com
#### URL: http://web.ccs.miami.edu/~hishwaran
#### --------------------------------------------------------------
#### Amol Pande, Ph.D.
#### Assistant Staff,
#### Thoracic and Cardiovascular Surgery
#### Heart and Vascular Institute
#### JJ4, Room 508B,
#### 9500 Euclid Ave,
#### Cleveland Clinic, Cleveland, Ohio, 44195
####
#### email: amoljpande@gmail.com
#### --------------------------------------------------------------
#### Udaya B. Kogalur, Ph.D.
#### Kogalur & Company, Inc.
#### 5425 Nestleway Drive, Suite L1
#### Clemmons, NC 27012
####
#### email: ubk@kogalur.com
#### URL: http://www.kogalur.com
#### --------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
vimpPlot <- function(vimp,
Q_set = NULL,
Time_Interaction = TRUE,
xvar.names = NULL,
cex.xlab = NULL,
ymaxlim = 0,
ymaxtimelim = 0,
subhead.cexval = 1,
yaxishead = NULL,
xaxishead = NULL,
main = "Variable Importance (%)",
col = grey(.80),
cex.lab = 1.5,
subhead.labels = c("Time-Interactions Effects","Main Effects"),
ylbl = FALSE,
seplim = NULL,
eps = 0.1,
Width_Bar = 1,
path_saveplot = NULL,
Verbose = TRUE
)
{
if(is.null(vimp) ){
stop("vimp is not present in the object")
}
p <- nrow(vimp[[1]])
if(is.null(xvar.names)){
xvar.names <- paste("x",1:p,sep="")
}
vimp <- lapply(vimp,function(v){
v*100
})
n.Q <- ncol(vimp[[1]])
if(is.null(Q_set)){
Q_set <- paste("V",seq(n.Q),sep="")
}
for(q in 1:n.Q){
if(is.null(path_saveplot)){
path_saveplot <- tempdir()
}
Plot_Name <- if(n.Q == 1) "VIMPplot.pdf" else paste("VIMPplot_Prob(y = ",Q_set[q],")",".pdf",sep="")
pdf(file = paste(path_saveplot,"/",Plot_Name,sep=""),width = 10,height = 10)
if(!Time_Interaction){
ylim <- range(vimp[[1]][,q]) + c(-0,ymaxlim)
yaxs <- pretty(ylim)
yat <- abs(yaxs)
bp <- barplot(pmax(as.matrix(vimp[[1]][,q]),0),beside=T,width = Width_Bar,col=col,ylim=ylim,yaxt="n",main = main,cex.lab=cex.lab)
text(c(bp), pmax(as.matrix(vimp[[1]][,q]),0) + eps, rep(xvar.names, 3),srt=90,adj= 0.0,cex=if(!is.null(cex.xlab)) cex.xlab else 1)
axis(2,yaxs,yat)
}else
{
vimp.x <- vimp[[1]][,q]
vimp.time <- vimp[[2]][,q]
ylim <- max(c(vimp.x,vimp.time)) * c(-1, 1) + c(-ymaxtimelim,ymaxlim)
if(ylbl){
ylabel <- paste("Time-Interactions", "Main Effects", sep = if(!is.null(seplim)) seplim else " " )
}else
{
ylabel <- ""
}
yaxs <- pretty(ylim)
yat <- abs(yaxs)
if(is.null(yaxishead)){
yaxishead <- c(-ylim[1],ylim[2])
}
if(is.null(xaxishead)){
xaxishead <- c(floor(p/4),floor(p/4))
}
bp1 <- barplot(pmax(as.matrix(vimp.x),0),width = Width_Bar,horiz = FALSE,beside=T,col=col,ylim=ylim,yaxt="n",ylab = ylabel,cex.lab=cex.lab,
main = main)
text(c(bp1), pmax(as.matrix(vimp.x),0) + eps, rep(xvar.names, 3),srt=90,adj= 0.0,cex=if(!is.null(cex.xlab)) cex.xlab else 1)
text(xaxishead[2],yaxishead[2],labels = subhead.labels[2],cex = subhead.cexval)
bp2 <- barplot(-pmax(as.matrix(vimp.time),0) - eps,width = Width_Bar,horiz = FALSE,beside=T,col=col,add=TRUE,yaxt="n")
#text(c(bp2), -4, rep(xvar.names, 3),srt=270,adj= 0,yaxt="n",cex=if(!is.null(cex.xlab)) cex.xlab else 1)
text(xaxishead[1],-yaxishead[1],labels = subhead.labels[1],cex = subhead.cexval)
axis(2,yaxs,yat)
}
dev.off()
if(Verbose){
cat("Plot will be saved at:",path_saveplot,sep = "","\n")
}
}
}
|
6b10611dbadc467f15a435c0fcbf9833d09a2996
|
e6708b6ab96e58cd359419563c3da4448f080cbc
|
/plot3.R
|
8816f929aa2381c37f755d7789c6ca361e656a35
|
[] |
no_license
|
eudeseduR/Exploratory-Data-Analysis
|
e0ce6afd23482b502b43144999188e6f068945fb
|
5378ed93d293ebf1b8521afb5307231bddbf625c
|
refs/heads/master
| 2021-01-21T02:21:16.248752
| 2015-12-13T20:47:49
| 2015-12-13T20:47:49
| 47,934,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 845
|
r
|
plot3.R
|
setwd("~/Documents/Coursera/Data Science/04 - Exploratory Data Analysis/Project 1")
dataset <- read.table("./household_power_consumption.txt", header=T, sep=";", na.strings="?", stringsAsFactors=F)
View(dataset)
datefilter <- dataset[dataset$Date %in% c("1/2/2007","2/2/2007") ,]
View(datefilter)
newdatetime <- strptime(paste(datefilter$Date, datefilter$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
plot(newdatetime, as.numeric(datefilter$Sub_metering_1), type="l", xlab="", ylab="Energy Submetering")
lines(newdatetime, as.numeric(datefilter$Sub_metering_2), type="l", col="red")
lines(newdatetime, as.numeric(datefilter$Sub_metering_3), type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2, col=c("black", "red", "blue"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
5f72cbde444ff4cc9ada2081802428fe94670a3f
|
2e075a0fe5b46c8a2643cff3032c370f5c4e8c9a
|
/R_scripts/diamond_princess_tidied.R
|
50601cbe92fa063b105975e0cd62c30029c84585
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
laasousa/COVID19_CFR_submission
|
17f8032e93f50326466779e93d52f89aa103d899
|
af183448e4d58ddf9b68ccb7f8e48e134a91498d
|
refs/heads/master
| 2021-04-08T22:10:32.071065
| 2020-03-18T15:35:28
| 2020-03-18T15:35:28
| 248,814,057
| 1
| 0
|
MIT
| 2020-03-20T17:26:52
| 2020-03-20T17:26:51
| null |
UTF-8
|
R
| false
| false
| 2,552
|
r
|
diamond_princess_tidied.R
|
##########################################################
# ESTIMATE IFR ON THE DIAMOND PRINCESS
##########################################################
library(binom)
logit<-function(x) log(x/(1-x))
invlogit<-function(x) exp(x)/(1+exp(x))
tests<-read.csv("data/test_dates_dp.csv")
date_format <- "%d/%m/%Y"
tests$date_report<-as.numeric(as.Date(as.character(tests$date_report),date_format))
tests<-tests[which(!is.na(tests$date_report)),]
tot_cases<-max(tests$cum_positive,na.rm=T)
tests$cum_pc_pos<-tests$cum_positive/tot_cases
tests$logit_cum_pos<-logit(tests$cum_pc_pos-0.5/tot_cases)
tests$time_elapsed<-tests$date_report-min(tests$date_report)
### FITTED ONSET TO DEATH PARAMETERS FROM MAIN ANALYSIS
mean_OD<-17.8324 #18.8
s_OD<-0.4226 #0.45
alpha <- 1/(s_OD^2)
beta <- 1/(1/(mean_OD*s_OD^2))
### ANALYSIS
today<-as.Date("2020-03-05")
## generate weight data points from each day.
tests$p<-(tests$num_positive+1e-10)/tests$num_tested ## ## sums to 657, not the total 706 cases but the most we have.
tests$wgt<- 1/(tests$p*(1-tests$p)/tests$num_tested) ## 1/variance of proportion.
cx<-lm(tests$logit_cum_pos ~tests$time_elapsed, weights=tests$wgt)
a<-cx$coefficients[1]
b<-cx$coefficients[2]
tests$predict_logit_pos<-a +tests$time_elapsed*b
tests$predict_pc_pos<-invlogit(tests$predict_logit_pos)
tests$predict_pc_pos<-tests$predict_pc_pos/max(tests$predict_pc_pos)
x<-seq(0,30,0.1)
predict_pos<-invlogit(a +x*b)
plot(tests$time_elapsed,tests$cum_pc_pos,xlab="days since 5th Feb",ylab="proportion positive",pch=19)
lines(x,predict_pos)
tests$predict_new<-tests$predict_pc_pos
tests$predict_new[2:nrow(tests)]<-tests$predict_pc_pos[2:nrow(tests)]-tests$predict_pc_pos[1:(nrow(tests)-1)]
### estimate proportion of deaths we expect to have occurred by now allowing for onset times
tests$elapsed<-as.numeric(as.Date("2020-03-05"))-tests$date_report
tests$prop_deaths_by_now<-pgamma(tests$elapsed,shape=alpha,scale=beta,lower.tail = T)
tests$prop_deaths_by_now_weighted<-tests$predict_new*tests$prop_deaths_by_now
prop_deaths_by_now<-sum(tests$prop_deaths_by_now_weighted)
print(prop_deaths_by_now)
### 20.6 total deaths are predicted, obtained by applying age-specific IFR estimates from China to the age distribution
# of passengers on the diamond princess
prop_deaths_by_now*20.6 ## number of deaths expected by 5th March using the age specific IFR.
## 95% CI around deaths currently observed among cases on the ship.
binom.confint(x=7,n=706 , method='exact')
|
ac28fc92dcd558f429e5058e6cf680394fe1d45c
|
fa782fbc6cef7e575fcd137a41eecbd6886812d1
|
/man/rowCounts.Rd
|
353839b9d03aa407fcba3b9482435dcef405a2b3
|
[] |
no_license
|
kaushikg06/matrixStats
|
6f5ae3032cfeb30ade1d88d6b53e5effae9770bd
|
87d3a6eeefcd37571214af7f1700ca98be7b9703
|
refs/heads/master
| 2021-01-21T06:47:28.960648
| 2016-10-09T15:35:30
| 2016-10-09T15:35:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,996
|
rd
|
rowCounts.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% rowCounts.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{rowCounts}
\alias{rowCounts}
\alias{colCounts}
\alias{count}
\alias{allValue}
\alias{anyValue}
\alias{rowAnys}
\alias{colAnys}
\alias{rowAlls}
\alias{colAlls}
\title{Counts the number of TRUE values in each row (column) of a matrix}
\description{
Counts the number of TRUE values in each row (column) of a matrix.
}
\usage{
count(x, idxs=NULL, value=TRUE, na.rm=FALSE, ...)
rowCounts(x, rows=NULL, cols=NULL, value=TRUE, na.rm=FALSE, dim.=dim(x), ...)
colCounts(x, rows=NULL, cols=NULL, value=TRUE, na.rm=FALSE, dim.=dim(x), ...)
rowAlls(x, rows=NULL, cols=NULL, value=TRUE, na.rm=FALSE, dim.=dim(x), ...)
colAlls(x, rows=NULL, cols=NULL, value=TRUE, na.rm=FALSE, dim.=dim(x), ...)
allValue(x, idxs=NULL, value=TRUE, na.rm=FALSE, ...)
rowAnys(x, rows=NULL, cols=NULL, value=TRUE, na.rm=FALSE, dim.=dim(x), ...)
colAnys(x, rows=NULL, cols=NULL, value=TRUE, na.rm=FALSE, dim.=dim(x), ...)
anyValue(x, idxs=NULL, value=TRUE, na.rm=FALSE, ...)
}
\arguments{
\item{x}{An NxK \code{\link[base]{matrix}} or an N*K \code{\link[base]{vector}}.}
\item{idxs, rows, cols}{A \code{\link[base]{vector}} indicating subset of elements (or rows and/or columns)
to operate over. If \code{\link[base]{NULL}}, no subsetting is done.}
\item{value}{A value to search for.}
\item{na.rm}{If \code{\link[base:logical]{TRUE}}, \code{\link[base]{NA}}s are excluded first, otherwise not.}
\item{dim.}{An \code{\link[base]{integer}} \code{\link[base]{vector}} of length two specifying the
dimension of \code{x}, also when not a \code{\link[base]{matrix}}.}
\item{...}{Not used.}
}
\value{
\code{rowCounts()} (\code{colCounts()}) returns an \code{\link[base]{integer}} \code{\link[base]{vector}}
of length N (K).
The other methods returns a \code{\link[base]{logical}} \code{\link[base]{vector}} of length N (K).
}
\details{
These functions takes either a @matrix or a @vector as input.
If a @vector, then argument \code{dim} must be specified and
fulfill \code{prod(dim) == length(x)}. The result will be
identical to the results obtained when passing
\code{matrix(x, nrow=dim[1L], ncol=dim[2L])}, but avoids
having to temporarily create/allocate a @matrix, if only such
is needed only for these calculations.
}
\examples{
x <- matrix(FALSE, nrow=10, ncol=5)
x[3:7,c(2,4)] <- TRUE
x[2:4,] <- TRUE
x[,1] <- TRUE
x[5,] <- FALSE
x[,5] <- FALSE
print(x)
print(rowCounts(x)) # 1 4 4 4 0 3 3 1 1 1
print(colCounts(x)) # 9 5 3 5 0
print(rowAnys(x))
print(which(rowAnys(x))) # 1 2 3 4 6 7 8 9 10
print(colAnys(x))
print(which(colAnys(x))) # 1 2 3 4
}
\author{Henrik Bengtsson}
\keyword{array}
\keyword{logic}
\keyword{iteration}
\keyword{univar}
|
9a8d9281a160bf507281e3910cc35286f8bafa7c
|
10b73702a67647732753a696da393b2ac1d7f78d
|
/Raster_afstande.R
|
861068a982eb4bc1fd22cfa6967ff7adf3ff52de
|
[] |
no_license
|
JohanFunder/Kirkeugle
|
929a1f72e63f3309c20a6429f11932ec1d102cd8
|
b82052c00594770b501acfa71d9f984dd2532e73
|
refs/heads/main
| 2023-02-02T02:06:31.815939
| 2020-12-23T04:51:44
| 2020-12-23T04:51:44
| 317,076,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,777
|
r
|
Raster_afstande.R
|
#install.packages('raster')
#install.packages('sf')
library(sf)
library(raster)
library(plotly)
library(lattice)
library(plot3D)
library(rgl)
require(rgdal)
# Indlæs shp pointdata for de forskellige individer
points_45067 <- readOGR(dsn="D:/GIS_Kirkeugle/shp_filer_ugledata/Sidste_positioner", layer="45067_sidste_positioner") # read .shp
points_45068 <- readOGR(dsn="D:/GIS_Kirkeugle/shp_filer_ugledata/Sidste_positioner", layer="45068_sidste_positioner") # read .shp
points_45069 <- readOGR(dsn="D:/GIS_Kirkeugle/shp_filer_ugledata/Sidste_positioner", layer="45069_sidste_positioner") # read .shp
points_45072 <- readOGR(dsn="D:/GIS_Kirkeugle/shp_filer_ugledata/Sidste_positioner", layer="45072_sidste_positioner") # read .shp
points_45074 <- readOGR(dsn="D:/GIS_Kirkeugle/shp_filer_ugledata/Sidste_positioner", layer="45074_sidste_positioner") # read .shp
points_45076 <- readOGR(dsn="D:/GIS_Kirkeugle/shp_filer_ugledata/Sidste_positioner", layer="45076_sidste_positioner") # read .shp
# Lav samlet dataframe for points
points_samlet <- NULL
points_samlet <- rbind.data.frame(c(data.frame(points_45067,id='45067')),
c(data.frame(points_45068,id='45068')),
c(data.frame(points_45069,id='45069')),
c(data.frame(points_45072,id='45072')),
c(data.frame(points_45074,id='45074')),
c(data.frame(points_45076,id='45076')))
# Fjern nul-geometrirækker
#points_samlet <- points_samlet[points_samlet$coords.x1 != 0, ]
# Define id levels
library('data.table')
setattr(points_samlet$id,"levels",c('45067','45068',
'45069','45072',
'45074','45076'))
###### Plot data
library(ggplot2)
ggplot() + coord_fixed(ratio = 1,expand = TRUE, clip = "on") +
geom_point(data = points_samlet, aes(x = coords.x1, y = coords.x2, color = id),
size=2.5, alpha=0.6) +
geom_path(data = points_samlet, aes(x = coords.x1, y = coords.x2, color = id),
size=.5, alpha=0.6) +
scale_colour_manual(values=c("45067"="black","45068"="navyblue",
"45069"="grey50","45072"="green4",
"45074"="grey30","45076"="red4"),
name="Activity") +
theme_bw() +
theme(text = element_text(size=15),
axis.text.y = element_text(face="bold"),
axis.text.y.right = element_text(face="bold",color='navyblue'),
axis.title.y.right = element_text(color = "navyblue"),
axis.text.x = element_text(face = "bold",angle=-45, vjust=0.3, hjust=0),
panel.grid.minor = element_blank(),
legend.position = "right") +
scale_y_continuous(name = "Latitude",
breaks = c(6270000,6280000,6290000,6300000,6310000),
limits = c(6279600,6310000)) +
scale_x_continuous(name = "Longitude",
breaks = c(540000,550000,560000,570000,580000),
limits = c(545000,570400))
###### Plot data (updateret version)
library(ggplot2)
ggplot() + coord_fixed(ratio = 1,expand = TRUE, clip = "on") +
geom_point(data = points_samlet, aes(x = coords.x1, y = coords.x2, color = id),
size=2.5, alpha=0.6) +
geom_path(data = points_samlet, aes(x = coords.x1, y = coords.x2, color = id),
size=.5, alpha=0.6) +
scale_colour_manual(values=c("45067"="black","45068"="navyblue",
"45069"="grey50","45072"="green4",
"45074"="grey30","45076"="red4"),
name="GPS-tag number") +
guides(color = guide_legend(override.aes = list(alpha = 1,size = 1.6))) +
theme_bw() +
theme(text = element_text(size=20),
axis.text.y = element_text(face="bold"),
axis.text.y.right = element_text(face="bold",color='navyblue'),
axis.title.y.right = element_text(color = "navyblue"),
axis.text.x = element_text(face = "bold"),
panel.grid.minor = element_blank(),
legend.key.size = unit(2,"line"),
legend.title = element_text(size = 25),
legend.text = element_text(size = 20),
legend.justification = "top") +
scale_y_continuous(name = "Latitude",
breaks = c(6270000,6280000,6290000,6300000,6310000),
limits = c(6279600,6310000)) +
scale_x_continuous(name = "Longitude",
breaks = c(540000,550000,560000,570000,580000),
limits = c(545000,570400))
# Set the size of the point in the legend to 2 mm
grid::grid.gedit("key-[-0-9]-1-1", size = unit(4, "mm"))
|
f2bf1b0f3afb77d241a379928ee629ea192b3f5c
|
9578bd1bf4ebe7fd4cf2779449214379d5399fb2
|
/R/condition_fun.R
|
6052f195ebe5ff94f1eac4ce97cc96e74edca26d
|
[
"MIT"
] |
permissive
|
yanheluke/scorecard
|
b6c44f6bed5fa19b8624eed5981447fa2c626e20
|
81bddfd43831a909e12f9cc9d190574da5c61ed8
|
refs/heads/master
| 2020-03-17T05:34:01.256856
| 2018-05-12T07:46:33
| 2018-05-12T07:46:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,163
|
r
|
condition_fun.R
|
# conditions # https://adv-r.hadley.nz/debugging
# remove date time # rm_datetime_col
# remove columns if len(x.unique()) == 1
rmcol_datetime_unique1 = function(dt) {
dt = setDT(dt)
unique1_cols = names(which(dt[,sapply(.SD, function(x) length(unique(x))==1)]))
if (length(unique1_cols > 0)) {
warning(paste0("There are ", length(unique1_cols), " columns have only one unique values, which are removed from input dataset. \n (ColumnNames: ", paste0(unique1_cols, collapse=', '), ")" ))
dt = copy(dt)[, (unique1_cols) := NULL]
}
isdatetime = function(x) (class(x)[1] %in% c("Date","POSIXlt","POSIXct","POSIXt")) == TRUE
datetime_col = names(which(dt[,sapply(.SD, isdatetime)]))
if (length(datetime_col) > 0) {
warning(paste0("The date/times columns (",paste0(datetime_col,collapse = ","),") are removed from input dataset."))
dt = copy(dt)[,(datetime_col) := NULL]
}
return(dt)
}
# replace blank by NA
#' @import data.table
#'
rep_blank_na = function(dt) {
dt = setDT(dt)
if (any(dt == "", na.rm = TRUE)) {
warning("There are blank characters in the columns of \"", paste0(names(which(dt[,sapply(.SD, function(x) any(x=="",na.rm = T))])), collapse = ",") ,"\", which were replaced by NAs.")
dt[dt == ""] = NA
}
return(dt)
}
# check y
#' @import data.table
#'
check_y = function(dt, y, positive){
dt = setDT(dt)
# ncol of dt
if (ncol(dt) <=1 & !is.null(ncol(dt))) stop("Incorrect inputs; dt should have at least two columns.")
# y ------
if (!(y %in% names(dt))) stop(paste0("Incorrect inputs; there is no \"", y, "\" column in dt."))
# length of y == 1
if (length(y) != 1) stop("Incorrect inputs; the length of \"",y,"\" != 1.")
# remove na in y
if ( anyNA(dt[[y]]) ) {
warning(paste0("There are NAs in ", y, ". The rows with NA in \"", y, "\" were removed from input data."))
y_sel = !is.na(dt[[y]]); dt = dt[y_sel]
}
# length of unique values in y
if (length(unique(dt[[y]])) == 2) {
if ( any(c(0,1) %in% unique(dt[[y]]) == FALSE) ) {
if (any(grepl(positive, dt[[y]])==TRUE)) {
warning(paste0("The positive value in \"", y,"\" was replaced by 1 and negative value by 0."))
dt[[y]] = ifelse(grepl(positive, dt[[y]]), 1, 0)
} else {
stop(paste0("Incorrect inputs; the positive value in \"", y, "\" is not specified"))
}
}
} else {
stop(paste0("Incorrect inputs; the length of unique values in \"",y , "\" != 2."))
}
return(dt)
}
# check print_step
#' @import data.table
#'
check_print_step = function(print_step) {
if (!is.numeric(print_step) || print_step<0) {
warning("Incorrect inputs; print_step should be a non-negative integer. It was set to 1L.")
print_step = 1L
}
return(print_step)
}
# x variable
x_variable = function(dt, y, x) {
x_all = setdiff(names(dt), y)
if (is.null(x)) x = x_all
if ( length(setdiff(x,x_all)) > 0 ) {
warning(paste0("Incorrect inputs; the variables \n\"", paste0(setdiff(x,x_all), collapse = ","), "\"\n are not exist in input data, which are removed."))
x = intersect(x, x_all)
}
return(x)
}
# check break_list
check_breaks_list = function(breaks_list, xs) {
if (!is.null(breaks_list)) {
if (is.character(breaks_list)) {
breaks_list = eval(parse(text = breaks_list))
}
if (!is.list(breaks_list)) {
stop("Incorrect inputs; breaks_list should be a list.")
} else {
xs_breakslist = names(breaks_list)
if (!identical(xs_breakslist, xs)) {
names_bl_x = setdiff(xs_breakslist, xs)
if (length(names_bl_x) > 0) {
warning(paste0("Incorrect inputs; the variables \n", paste0(names_bl_x, collapse = ","), "\n specified in breaks_list donot exist in x."))
}
names_x_bl = setdiff(xs, xs_breakslist)
if (length(names_x_bl) >0) {
warning("There are ",length(names_x_bl)," x variables that donot specified in breaks_list are using optimal binning.")
}
}
}
}
return(breaks_list)
}
# check special_values
check_special_values = function(special_values, xs) {
if (!is.null(special_values)) {
if (is.vector(special_values) & !is.list(special_values)) {
# transfer vector to list
special_values_list = list()
for (i in xs) {
special_values_list[[i]] = special_values
}
special_values=special_values_list
} else if (is.list(special_values)) {
# x variables of special_values
xs_sv = names(special_values)
# if xs_sv != xs
if (!identical(xs_sv, xs)) {
names_bl_x = setdiff(xs_sv, xs)
if (length(names_bl_x) > 0) {
warning(paste0("Incorrect inputs; the variables \n", paste0(names_bl_x, collapse = ","), "\n specified in special_values donot exist in x."))
}
names_x_bl = setdiff(xs, xs_sv)
if (length(names_x_bl) >0) {
warning("There are ",length(names_x_bl)," x variables that donot specified in special_values donot have special values.")
}
}
} else {
stop("Incorrect inputs; special_values should be a vector or list.")
}
}
return(special_values)
}
|
52aa752d4dca62e534d5b0b1dfe159af9fca2848
|
e2ecbae6733ba2c9246b0f6f1d889340d3460137
|
/GeneBook/man/f.dir.create.Rd
|
8efe685b209152cb78920211227679f243c1db20
|
[] |
no_license
|
siyanc/GeneBook-Package
|
628e95ed226359dac9a8f41688514dc97fb77f27
|
e2f4ec6246f417bf1cacd90b45981da156302275
|
refs/heads/master
| 2020-06-15T15:19:34.300338
| 2020-06-05T20:11:55
| 2020-06-05T20:11:55
| 195,330,228
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
rd
|
f.dir.create.Rd
|
\name{f.dir.create}
\alias{f.dir.create}
\title{
Create Output Path for a folder
}
\description{
function to create output path
}
\usage{
f.dir.create()
}
|
d6beff12b9dfdd626b1e67fdb30bb45aaec3b34a
|
4f77baba507831ee4e171727796de6b3326da785
|
/weatherApp/man/get_forecast.Rd
|
e90b17b66974bdb46654a0fd95a424cd72ecf929
|
[
"MIT"
] |
permissive
|
Programming-The-Next-Step/weather_app
|
4032fef148452522bef035bf3d8c920a766dca98
|
5daac9e5eacaee15cdfba6a19c679a593cf9e91e
|
refs/heads/master
| 2022-10-06T04:22:22.651798
| 2020-05-29T09:03:00
| 2020-05-29T09:03:00
| 260,908,608
| 0
| 0
| null | 2020-05-29T09:03:01
| 2020-05-03T12:18:02
|
R
|
UTF-8
|
R
| false
| true
| 781
|
rd
|
get_forecast.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weather_app_functions.R
\name{get_forecast}
\alias{get_forecast}
\title{get_forecast gets the current/hourly/daily weather forecast for a location.}
\usage{
get_forecast(cur_hour_day = "current", location, api_key)
}
\arguments{
\item{cur_hour_day}{String, either "current","hourly" or "daily".}
\item{location}{String that describes a geographic location.}
\item{api_key}{String that represents your personal api Key from www.openweathermap.org}
}
\value{
A complex list with hourly/daily/current weather data.
}
\description{
The function \emph{get_forecast} retrieves weather data from openweathermap.org.
}
\examples{
my_forecast <- get_forecast("hourly", "Amsterdam, Niederlande", my_api_key)
}
|
9191f1d817c274b9731ab262d8085c6fb61cfb28
|
fe34071285a5367525db81f26d459ed64af2fbe0
|
/src/armodel.r
|
a93efe5711e344e7262fb4a4412bd5b5190718e5
|
[] |
no_license
|
xiangzhu/fmclst
|
12f6f047f0bb28af06c613a28a8febb69d66471d
|
504a44260ade90a31876cbe79a407bcbaa6a2082
|
refs/heads/master
| 2021-03-12T21:24:05.897200
| 2016-01-24T22:08:20
| 2016-01-24T22:08:20
| 25,955,011
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,970
|
r
|
armodel.r
|
# The six models that are used to define the factor process in the simulation.
# Model 1: AR
aur <- function(L,sigma){
XD <- NULL
XD[1] <- 0
for (i in 2:100){
XD[i] <- XD[i-1] * 0.6 + rnorm(1,mean=0,sd=sigma)
}
XD[1] <- XD[100]
for (i in 2:L){
XD[i] <- XD[i-1] * 0.6 + rnorm(1,mean=0,sd=sigma)
}
return(XD)
}
# Model 2: Bilinear
bl <- function(L,sigma){
XD <- NULL
XD[1] <- 0
for (i in 2:100){
XD[i] <- (0.3-0.2*rnorm(1,mean=0,sd=sigma)) * XD[i-1] + 1 + rnorm(1,mean=0,sd=sigma)
}
XD[1] <- XD[100]
for (i in 2:L){
XD[i] <- (0.3-0.2*rnorm(1,mean=0,sd=sigma)) * XD[i-1] + 1 + rnorm(1,mean=0,sd=sigma)
}
return(XD)
}
# Model 3: EXPAR
ex <- function(L,sigma){
XD <- NULL
XD[1] <- 0
for (i in 2:100){
XD[i] <- (0.9*exp(-(XD[i-1])^2) - 0.6) * XD[i-1] + 1 + rnorm(1,mean=0,sd=sigma)
}
XD[1] <- XD[100]
for (i in 2:L){
XD[i] <- (0.9*exp(-(XD[i-1])^2) - 0.6) * XD[i-1] + 1 + rnorm(1,mean=0,sd=sigma)
}
return(XD)
}
# Model 4: SETAR
se <- function(L,sigma){
XD <- NULL
XD[1] <- 0
for (i in 2:100){
if (XD[i-1] >= 0.2) TEMP <- 0.3*XD[i-1]+1 else TEMP <- -(0.3*XD[i-1]-1)
XD[i] <- TEMP + rnorm(1,mean=0,sd=sigma)
}
XD[1] <- XD[100]
for (i in 2:L){
if (XD[i-1] >= 0.2) TEMP <- 0.3*XD[i-1]+1 else TEMP <- -(0.3*XD[i-1]-1)
XD[i] <- TEMP + rnorm(1,mean=0,sd=sigma)
}
return(XD)
}
# Model 5: NLAR
nl <- function(L,sigma){
XD <- NULL
XD[1] <- 0
for (i in 2:100){
XD[i] <- 0.7 * abs(XD[i-1]) / (2+abs(XD[i-1])) + rnorm(1,mean=0,sd=sigma)
}
XD[1] <- XD[100]
for (i in 2:L){
XD[i] <- 0.7 * abs(XD[i-1]) / (2+abs(XD[i-1])) + rnorm(1,mean=0,sd=sigma)
}
return(XD)
}
# Model 6: STAR
st <- function(L,sigma){
XD <- NULL
XD[1] <- 0
for (i in 2:100){
XD[i] <- 0.8*XD[i-1] - 0.8*XD[i-1]/(1+exp(-10*XD[i-1])) + rnorm(1,mean=0,sd=sigma)
}
XD[1] <- XD[100]
for (i in 2:L){
XD[i] <- 0.8*XD[i-1] - 0.8*XD[i-1]/(1+exp(-10*XD[i-1])) + rnorm(1,mean=0,sd=sigma)
}
return(XD)
}
|
171ac2fe7251b730d6e6d56b96500f25ce7f268f
|
1249a147bbd4c426633ccf6ee2e150acae374291
|
/R/art.R
|
05bd5e569ff4f9e69972a507319b09240201e49f
|
[] |
no_license
|
analythium/assets
|
28e004931e606dae6b6742aabc68b3fa4ff491ab
|
0cdf4f0d5721e84439d34ce3b11dc03b8e20a16b
|
refs/heads/master
| 2023-07-09T09:00:25.471993
| 2023-07-08T18:36:15
| 2023-07-08T18:36:15
| 253,088,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,635
|
r
|
art.R
|
library(tidyverse)
library(magick)
# functions from https://github.com/djnavarro/ashtree
# helper functions --------------------------------------------------------
radians <- function(degree) {
2 * pi * degree / 360
}
adjust_scale <- function(s, scales) {
s * sample(x = scales, size = length(s), replace = TRUE)
}
adjust_angle <- function(a, angles) {
a + sample(x = angles, size = length(a), replace = TRUE)
}
adjust_x <- function(x, scale, angle) {
x + scale * cos(radians(angle))
}
adjust_y <- function(y, scale, angle) {
y + scale * sin(radians(angle))
}
# ashtree functions -------------------------------------------------------
grow_sapling <- function() {
sapling <- tibble(
old_x = 0, # sapling grows from the origin
old_y = 0, # sapling grows from the origin
new_x = 0, # sapling doesn't grow horizontally
new_y = 1, # sapling does grow vertically
angle = 90, # angle from horizontal is 90 degrees
scale = 1, # length of the sapling is 1
)
return(sapling)
}
grow_from <- function(tips, settings) {
# read off the relevant settings
all_scales <- settings$scales
all_angles <- settings$angles
# mutate the tips tibble
new_growth <- tips %>%
mutate(
scale = adjust_scale(scale, all_scales), # change segment length
angle = adjust_angle(angle, all_angles), # change segment angle
old_x = new_x, # begin where last seg ended
old_y = new_y, # begin where last seg ended
new_x = adjust_x(old_x, scale, angle), # end where this seg ends!
new_y = adjust_y(old_y, scale, angle) # end where this seg ends!
)
return(new_growth)
}
grow_many <- function(tips, settings) {
# read off the relevant settings
splits <- settings$splits
new_tips <- map_dfr(1:splits, ~grow_from(tips, settings))
return(new_tips)
}
grow_tree <- function(settings) {
# read off the relevant settings
cycles <- settings$cycles
# initialise
tips <- grow_sapling()
# grow tree in a loop
tree <- accumulate(2:cycles, ~grow_many(.x, settings), .init = tips)
tree <- bind_rows(tree)
return(tree)
}
draw_tree <- function(tree) {
pic <- ggplot(
data = tree,
mapping = aes(
x = old_x,
y = old_y,
xend = new_x,
yend = new_y
)
) +
geom_segment(show.legend = FALSE) +
theme_void() +
coord_equal()
return(pic)
}
# generate a tree ------------------------------------
settings <- list(
seed = 1,
cycles = 14,
splits = 2,
scales = c(.5, .8, .9),
angles = c(-25, -10, -5, 5, 10, 20, 35)
)
set.seed(settings$seed)
tree <- grow_tree(settings)
# simple base plot to draw the tree ----------------------
base_plot <- function(tree, subset=NULL, xlim=NULL, ylim=NULL, add=FALSE, ...) {
if (is.null(xlim))
xlim <- c(range(tree$old_x, tree$new_x))
if (is.null(ylim))
ylim <- c(range(tree$old_y, tree$new_y))
if (!add)
plot(0, type="n", axes=FALSE, ann=FALSE, xaxs = "i", yaxs = "i", asp=1,
xlim=xlim, ylim=ylim)
segments(tree$old_x, tree$old_y, tree$new_x, tree$new_y, ...)
invisible(tree)
}
# save images ------------------------------
k <- 20 # number of images to save
files <- sprintf("image-%s.png", 0:(2*k))
# background color
bg <- 1
# color palette to display along the branches
col <- colorRampPalette(c("darkgrey", "grey", "lightgrey", "orange"))(k)
lwd <- rep(0.5, k) # line widths
br <- exp(seq(0, log(nrow(tree)), length.out = k+2)) # breaks
br <- round(br[-1])
br[1] <- 0
ii <- as.integer(cut(seq_len(nrow(tree)), br))
# 1st image is background only, i.e. no tree
png(files[1])
op <- par(mar=c(0,0,0,0), bg=bg)
base_plot(tree, col=bg)
par(op)
dev.off()
# save images for the growing tree:
# draw branches with orange color at the tips
for (j in 1:k) {
png(files[j+1])
op <- par(mar=c(0,0,0,0), bg=bg)
base_plot(tree, col=bg)
for (i in 1:j) {
.col <- rev(rev(col)[1:j])
.lwd <- rev(rev(lwd)[1:j])
base_plot(tree[ii==i,], col=.col[i], add=TRUE, lwd=.lwd[i])
}
par(op)
dev.off()
}
# save images for fade-out:
# all the orange fades away to leave a grey tree behind
for (j in 1:k) {
png(files[j+k+1])
op <- par(mar=c(0,0,0,0), bg=bg)
base_plot(tree, col=bg)
.col <- c(rep(col[1], j), rev(rev(col)[-(1:j)]))
for (i in 1:k) {
base_plot(tree[ii==i,], col=.col[i], add=TRUE, lwd=lwd[i])
}
par(op)
dev.off()
}
img <- image_read(files) # read in images
img3 <- image_animate(img, 10, loop=1, optimize=TRUE) # animate
image_write(img3, "tree.gif") # save gif
unlink(files) # clean up png files
|
295d5aa71f042ccad5202c3da5185a1c29c4826b
|
42dedcc81d5dc9a61a79dbcea9bdd7363cad97be
|
/simulations/archive/10_gather_subjects.R
|
d79fd043e7064d3a11393f07ae773a8e49d12c04
|
[] |
no_license
|
vishalmeeni/cwas-paper
|
31f4bf36919bba6caf287eca2abd7b57f03d2c99
|
7d8fe59e68bc7c242f9b3cfcd1ebe6fe6918225c
|
refs/heads/master
| 2020-04-05T18:32:46.641314
| 2015-09-02T18:45:10
| 2015-09-02T18:45:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,427
|
r
|
10_gather_subjects.R
|
#!/usr/bin/env Rscript
#' # Overview
#'
#' For these, simulations we have decided to make use of real data. We will
#' take the residuals from real resting-state data and then add varying amounts
#' of group differences to simulate group effects. The data I will be using will
#' be from 5 different sites (found on FCON1000 and INDI) including
#'
#' * Beijing
#' * Cambridge
#' * Rockland
#' * NYU (A...not sure what that means)
#' * Berlin
#'
#' These subjects were already preprocessing and what not in the age+gender
#' folder. So my job here will be to compile info from the other folder.
#' # Setup
#+ setup
# Needed libraries
library(plyr)
# Base Paths
basedir <- "/home/data/Projects/CWAS"
sdir <- file.path(basedir, "share/age+gender/subinfo")
# Subject info and paths
df <- read.csv(file.path(sdir, "04_all_df.csv"))
#' # Filter
#' We will only be looking at subject data for the desired sites.
#+ filter
sites.to_use <- c("Beijing A", "Cambridge", "Rockland", "New York A", "Berlin")
filt.df <- subset(df, site %in% sites.to_use)
roipaths <- file.path(filt.df$outdir, "func/bandpass_freqs_0.01.0.1/rois_random_k0400.nii.gz")
if (!all(file.exists(roipaths))) stop("not all roipaths exist")
#can't use compcor since didn't extract ts for it
#strat0 <- "linear1.wm1.motion1.csf1_CSF_0.98_GM_0.7_WM_0.98"
#strat1 <- "_compcor_ncomponents_5_linear1.motion1.compcor1.CSF_0.98_GM_0.7_WM_0.98"
#roipaths <- sub(strat0, strat1, roipaths)
#' # Summary
#' Let's see the Ns for each site
#+ site-ns
ddply(filt.df, .(site), nrow)
# site V1
# 1 Beijing A 188
# 2 Berlin 74
# 3 Cambridge 184
# 4 New York A 78
# 5 Rockland 132
#' # Save
#+ save
write.csv(filt.df, file="subinfo/10_subject_info.csv")
write.table(roipaths, row.names=F, col.names=F, file="subinfo/12_rois0400_paths.txt")
# TODO
# 1. Read in subject data
# 2. Compute connectivity for all possible connections
# a. then vary selection of sample size
# 3. Add in the group effect randomly
# a. only positive or half pos / half neg group differences
# b. select N nodes and vary number of connections with that node with group difference
# c. vary group difference (effect size) that added
# 4. Determine the mean global connectivity
# 5. Regress out the main effects including or not including the mean global connectivity
# 6. Run ANOVA, global, and MDMR
|
2fabb55d176d61b1baa903caf340de3859400223
|
77da83540db02cfa5c0caf0e470c2d50e9201e63
|
/DomacaNaloga1/arhiv/brilej_3naloga.r
|
8f1c0d4e768ce3cd80b7d74cba266833711107cf
|
[] |
no_license
|
tinarazic/machine_learning
|
1d42a8ee5d209a988407e73385a03f4ddd898379
|
9e8f9c73079ae8f8b2fd8e2b4ef35ab0737b2bf2
|
refs/heads/master
| 2021-01-26T03:41:57.676366
| 2020-06-16T21:11:04
| 2020-06-16T21:11:04
| 243,295,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,247
|
r
|
brilej_3naloga.r
|
#####################################
# nalozi knjiznice, ki jih potrebujes
# load the libraries you need
#####################################
library(caret)
library(dplyr)
library(pROC)
library(traineR)
library(matlib)
# nalozi jih tukaj, ne po klicu RNGkind spodaj
# load them here and not after the call of the RNGkind method below
#########################################################################
# Ignoriraj opozorilo (ignore the warning)
# RNGkind(sample.kind = "Rounding") : non-uniform 'Rounding' sampler used
#########################################################################
RNGkind(sample.kind = "Rounding")
#####################################
# Nekaj testov
# Some tests
#####################################
test_runif()
test_sample()
#####################################
# Nalozi se potrebne funkcije
# Load the necessary functions
#####################################
# setwd("pot do mape (path to directory)")
naloga_problem = 1
source(sprintf("funkcije%d.R", naloga_problem))
####################################
# Resi nalogo
# Solve the problem
####################################
source("funkcije3.R"); seme = 288; podatki3 = naloziPodatke(seme);
# 3.1
# prvi naΔin - sfunkcijo poly
RMSE <- c()
for (i in 1:10){
f <- bquote(y ~ poly(x, .(i), raw = TRUE))
model<- train(as.formula(f), data = podatki3 ,method = "lm")
rmse <- RMSE(podatki3$y, predict(model, podatki3))
RMSE[[i]] <- rmse
}
min(RMSE)
which.min(RMSE)
# drugi naΔin
# podatkom dodamo razliΔne potence
data <- podatki3
for (i in 2:10){
potenca <- '^'(podatki3$x,i)
#assign(paste0("x_",i),potenca)
data <- cbind(data, potenca )
}
stolpci <- c("x","y",paste0("x_", 2:10))
colnames(data) <- stolpci
potence <- data
napake <- c()
for (i in 1:10){
data <- potence[,1:(i+1)]
model <- train(y ~., data = data ,method = "lm")
rmse <- RMSE(potence$y, predict(model, potence))
napake[[i]] <- rmse
}
min(napake)
which.min(RMSE)
s_0 <- which.min(napake)
# 3.2
razbij <-function(podatki,k){
n <- nrow(podatki)
P <- list()
for (j in 1:k){
P_j <- c()
q <- 0
i <- 0
while (q * k + j <= n){
i <- q * k + j
q <- q + 1
P_j <- append(P_j,i)
}
P[[j]] <- P_j
}
return(P)
}
# k = 4
P <- razbij(podatki3,4)
P1 <- P[[1]]
podatki_p1 <- podatki3[P1,]
mean(podatki_p1$x)
# 3.3
k <- 4
# razbitje na U in T
P <- razbij(podatki3,k)
U <- list()
TT <- list()
for (j in 1:k){
T_j <- P[[j]]
U_j <- c()
for (i in 1:k){
if (i != j){
U_j <- append(U_j , P[[i]])
}
}
U_j <- sort(U_j)
U[[j]] <- U_j
TT[[j]] <- T_j
}
# RMSE
napake_s <- c()
for (s in 1:10){
# napake <- c()
e_s <- 0
for (j in 1:k){
U_j <- U[[j]]
T_j <- TT[[j]]
data <- potence[,1:(s+1)]
model <- train(y ~., data[U_j,] ,method = "lm")
e_s_j <- RMSE(data[T_j,]$y, predict(model, data[T_j,]))
e_s <- e_s + e_s_j / k
}
napake_s[[s]] <- e_s
#sum(napake)/k # e_s
}
min(napake_s)
which.min(napake_s)
# 3.4
# s0 = 10
sigma <- 1.0
X_zvezdica <- matrix(1,1,11)
X <- potence
X$y <- NULL
X <- as.matrix(X)
n <- nrow(potence)
prosti_clen <- matrix(1,n,1)
X <- cbind(prosti_clen, X)
X_t <- t(X)
produkt <- X_t %*% X
inverz <- solve(produkt)
var <- sigma^2 * (X_zvezdica %*% inverz %*% t(X_zvezdica))
var
# 3.5
# s1 = 2
sigma <- 1.0
X_zvezdica <- matrix(1,1,3)
X <- potence
X <- X[,c("x","x_2")]
X <- as.matrix(X)
n <- nrow(potence)
prosti_clen <- matrix(1,n,1)
X <- cbind(prosti_clen, X)
X_t <- t(X)
produkt <- X_t %*% X
inverz <- solve(produkt)
var <- sigma^2 * (X_zvezdica %*% inverz %*% t(X_zvezdica))
var
###############################################
# Kode pod tem ne spreminjaj
# Do not change the code below
###############################################
test_runif = function(){
set.seed(1234)
x = runif(5);
x1 = c(0.1137034113053232, 0.6222994048148394, 0.6092747328802943, 0.6233794416766614, 0.8609153835568577)
if (sum(abs(x - x1)) > 10^-10){
stop("Test runif ni ok (has failed)")
}
}
test_sample = function(){
set.seed(1234)
x = sample(20);
x1 = c(3, 12, 11, 18, 14, 10, 1, 4, 8, 6, 7, 5, 20, 15, 2, 9, 17, 16, 19, 13)
if (sum(abs(x - x1)) > 0){
stop("Test sample ni ok (has failed)")
}
}
|
e9bd6a67c0ce9c6e0049e07a93b073084268c48e
|
ce7b5f60b1628f0d6b4f1b1fc03b092a13cf185a
|
/quiz_3.R
|
c52553bae79c2d3b4d175e5bed35e90466505599
|
[] |
no_license
|
jjanzen/statistical_inference
|
29e767e1b6d3bf66236411038c10b7c92af75a10
|
cb96da702dca580d52d1284f6bca44436eed2142
|
refs/heads/master
| 2020-06-29T03:29:07.244980
| 2015-05-17T13:45:58
| 2015-05-17T13:45:58
| 35,767,750
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,231
|
r
|
quiz_3.R
|
In a population of interest, a sample of 9 men yielded a sample average brain volume of 1,100cc and a standard deviation of 30cc.
What is a 95% Student's T confidence interval for the mean brain volume in this new population?
# http://www.cyclismo.org/tutorial/R/confidence.html
# normal diest
mean <- 1100
n <- 9
sd <- 30
alpha <- 0.05
error <- qnorm(1-alpha/2)*sd/sqrt(n)
left <- mean-error
left #1080
right <- mean+error
right #1120
# t dist
mean <- 1100
n <- 9
sd <- 30
alpha <- 0.05
error <- qt(1-alpha/2,df=n-1)*sd/sqrt(n)
left <- mean-error
left #1099
right <- mean+error
right #1101
mean + c(-error,error)
# 1076.94 1123.06
# 2
#A diet pill is given to 9 subjects over six weeks. The average difference in weight (follow up - baseline)
#is -2 pounds. What would the standard deviation of the difference in weight have to be for the upper
#endpoint of the 95% T confidence interval to touch 0?
mean <- 2
n <- 9
alpha <- 0.05 # for 95% CI
tvalue <- qt(1-alpha/2,n-1) # 2.3
sd <- mean*sqrt(n)/ tvalue
sd # 2.6
# 3
In an effort to improve running performance, 5 runners were either given a protein supplement or placebo.
#Then, after a suitable washout period, they were given the opposite treatment. Their mile times were
#recorded under both the treatment and placebo, yielding 10 measurements with 2 per subject.
#The researchers intend to use a T test and interval to investigate the treatment.
#Should they use a paired or independent group T test and interval?
# answer - paird
# 4
#In a study of emergency room waiting times, investigators consider a new and the standard triage systems.
#To test the systems, administrators selected 20 nights and randomly assigned the new triage system to be
#used on 10 nights and the standard system on the remaining 10 nights. They calculated the nightly median
#waiting time (MWT) to see a physician. The average MWT for the new system was 3 hours with a variance
#of 0.60 while the average MWT for the old system was 5 hours with a variance of 0.68. Consider the 95%
#confidence interval estimate for the differences of the mean MWT associated with the new system. Assume a
#constant variance. What is the interval? Subtract in this order (New System - Old System).
n <- 10
mean_new <- 3
var_new <- 0.6
sd_new <- (var_new)
mean_old <- 5
var_old <- 0.68
sd_old <- (var_old)
alpha <- 0.05
error_new <- qt(1-alpha/2,df=n-1)*sd_new/sqrt(n)
mean_new + c(-error_new,error_new)
error_old <- qt(1-alpha/2,df=n-1)*sd_old/sqrt(n)
mean_old + c(-error_old,error_old)
2.57-4.51
3.43-5.48
n_x <- 10
n_y <- 10
x_bar <- 5 # old_system
y_bar <- 3 # new_system
var_x <- 0.6
var_y <- 0.68
alpha <- 0.05
sp_2 <- ((n_x - 1)*var_x + (n_y - 1)*var_y) / (n_x + n_y - 2)
sp <- sqrt(sp_2)
ts <- qt(1 - (alpha/2), n_x + n_y - 2)
round((y_bar - x_bar) + c(-1, 1) * ts * sp * (sqrt(1/n_x + 1/n_y)), 2)
# Problem 5.
# 90% confidence interval gives a lower t-value then 95% confidence interval.
# => The interval will be narrower.
# Problem 6.
n_x <- 100
n_y <- 100
x_bar <- 6
y_bar <- 4
s_x <- 2
s_y <- 0.5
alpha <- 0.05
sp_2 <- ((n_x - 1)*s_x^2 + (n_y - 1)*s_y^2) / (n_x + n_y - 2)
sp <- sqrt(sp_2)
ts <- qt(1 - (alpha/2), n_x + n_y - 2)
round((x_bar - y_bar) + c(-1, 1) * ts * sp * (sqrt(1/n_x + 1/n_y)), 2)
# 1.59 2.41 => The new system appears to be effective.
n1 <- n2 <- 100
xbar1 <- 4
xbar2 <- 6
s1 <- 0.5
s2 <- 2
xbar2 - xbar1 + c(-1, 1) * qnorm(0.975) * sqrt(s1^2/n1 + s2^2/n2)
# 7
n_treated <- 9
mean_treated_diff <- -3
n_untreated <-
mean_untreated_diff <- 1
sd_treated <- 1.5
sd_untreated <- 1.8
alpha <- 0.1
difference <- mean_treated_diff - mean_untreated_diff
mn <- mean(difference);
mn + c(-1,1) * qt(1-alpha/2, n_treated-1) * sd_treated/sqrt(n_treated)
n_x <- 9
n_y <- 9
x_bar <- -3
y_bar <- 1
s_x <- 1.5
s_y <- 1.8
alpha <- 0.1
sp_2 <- ((n_x - 1)*s_x^2 + (n_y - 1)*s_y^2) / (n_x + n_y - 2)
sp <- sqrt(sp_2)
ts <- qt(1 - (alpha/2), n_x + n_y - 2)
round((x_bar - y_bar) + c(-1, 1) * ts * sp * (sqrt(1/n_x + 1/n_y)), 3)
# -5.364 -2.636
n1 <- n2 <- 9
x1 <- -3 ##treated
x2 <- 1 ##placebo
s1 <- 1.5 ##treated
s2 <- 1.8 ##placebo
s <- sqrt(((n1 - 1) * s1^2 + (n2 - 1) * s2^2)/(n1 + n2 - 2))
(x1 - x2) + c(-1, 1) * qt(0.95, n1 + n2 - 2) * s * sqrt(1/n1 + 1/n2)
|
ed8c57c495b087b090db4d5559afd92a15dc59f6
|
1e2580d2b2771ea6f5d5c07827d9679507af1011
|
/R/new_project.R
|
ccc3dbcf4f6f6f55b1d407e5eca53e66afec9dcf
|
[] |
no_license
|
louisahsmith/louisahstuff
|
5500e7476f62903059d357927e43550df0d5ce43
|
75ee938ce59551999e17145580de2aec33add646
|
refs/heads/master
| 2022-08-29T00:53:32.687843
| 2022-08-03T15:46:16
| 2022-08-03T15:46:16
| 170,620,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
new_project.R
|
#' Create a new project
#'
#'
#' @name new_project
new_project <- function(path, use_targets = TRUE, use_renv = TRUE, use_git = TRUE,
packages = c("tidyverse"), ...) {
dir.create(path, recursive = TRUE, showWarnings = FALSE)
setwd(path)
if (use_targets) {
if (!require(targets)) stop("targets package not installed")
file.copy(system.file("extdata", "_targets.R",
package = "louisahstuff"), "_targets.R")
if (use_renv) {
tar_renv(extras = packages)
}
}
if (use_git) {
if (!require(usethis)) stop("usethis package not installed")
# use_git()
# use_github(private = TRUE)
}
if (use_renv) {
if (!require(renv)) stop("renv package not installed")
init(".")
}
}
|
07a5be416a9e206bcfc83d456ba70a7836ecb488
|
32eee8dc776f6f0e9a40a23a1791a756e227889f
|
/man/RunDESeq2.Rd
|
9da1c08c7f0d75eb4c227991361d0dddd0239a43
|
[] |
no_license
|
elqumsan/RNAseqMVA
|
52508368d17404270c099c93b59c2ec338f8d2a3
|
7490110c7866a69346a2e440ee5e3b53562f343d
|
refs/heads/master
| 2021-06-02T21:36:20.956517
| 2021-02-24T14:59:12
| 2021-02-24T14:59:12
| 104,486,547
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 584
|
rd
|
RunDESeq2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/study_case.R
\name{RunDESeq2}
\alias{RunDESeq2}
\title{run DESeq2 to test differential expression on each feature of a data table.}
\usage{
RunDESeq2(self)
}
\arguments{
\item{self}{object belong to StudyCase class}
}
\value{
an object of the same class as the input object
}
\description{
run DESeq2 on an object of class StudyCase to test differential expression on each feature of a data table, and order variables by increasing adjusted p-value.
}
\author{
Mustafa AbuElQumsan and Jacques van Helden
}
|
eab9e36331a543cf91c259fff57c88b543a2b37f
|
d466cef62ea4433a3ecd4c2a24a95a86dcf4eb94
|
/tests/testthat/test-modes.R
|
ed8e1a8eb1baa78dc50c1e0c280564fe35fe9b25
|
[
"MIT"
] |
permissive
|
cran/tabr
|
8b79b13871d715a864bd5e60c6ecfad3abda016c
|
baf94c11dc91af8406612f9e26252a2c60e66c6c
|
refs/heads/master
| 2021-06-15T18:22:23.832218
| 2021-02-20T21:20:06
| 2021-02-20T21:20:06
| 130,035,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,365
|
r
|
test-modes.R
|
context("modes")
test_that("mode helpers return as expected", {
expect_equal(mode_ionian(), mode_modern())
expect_equal(mode_dorian(), mode_modern("dorian"))
expect_equal(mode_phrygian(), mode_modern("phrygian"))
expect_equal(mode_lydian(), mode_modern("lydian"))
expect_equal(mode_mixolydian("b_"), mode_modern("mixolydian", "b_"))
expect_equal(mode_aeolian("c#"), mode_modern("aeolian", "c#"))
expect_equal(mode_locrian(), mode_modern("locrian"))
expect_equal(mode_ionian(collapse = TRUE),
mode_modern(collapse = TRUE))
expect_equal(mode_dorian(collapse = TRUE),
mode_modern("dorian", collapse = TRUE))
expect_equal(mode_phrygian(collapse = TRUE),
mode_modern("phrygian", collapse = TRUE))
expect_equal(mode_lydian(collapse = TRUE),
mode_modern("lydian", collapse = TRUE))
expect_equal(mode_mixolydian("b_", collapse = TRUE),
mode_modern("mixolydian", "b_", collapse = TRUE))
expect_equal(mode_aeolian("c#", collapse = TRUE),
mode_modern("aeolian", "c#", collapse = TRUE))
expect_equal(mode_locrian(collapse = TRUE), collapse = TRUE,
mode_modern("locrian", collapse = TRUE))
expect_false(is_mode("c d"))
expect_false(is_mode(c("c", "d")))
expect_true(is_mode(mode_aeolian("b_")))
x <- gsub("[0-9,'~]", "", transpose(mode_ionian(collapse = T), 17, key = "f"))
expect_true(is_mode(x, ignore_octave = TRUE))
x <- gsub("[0-9,'~]", "", transpose(mode_ionian(collapse = T), 17, key = "f"))
expect_true(!is_mode(x))
expect_error(mode_rotate("a"), "`notes` does not define a valid mode.")
expect_identical(mode_rotate(mode_ionian(), 0), mode_ionian())
expect_identical(mode_rotate(mode_ionian("c"), 1), mode_dorian("d"))
expect_equal(mode_modern("dorian", "e", TRUE, TRUE),
as_noteworthy("e f# g a b c# d"))
expect_identical(mode_rotate(mode_ionian("c"), 1), mode_dorian("d"))
expect_identical(mode_rotate(mode_ionian("c", ignore_octave = TRUE), 1),
mode_dorian("d", ignore_octave = TRUE))
x <- setNames(
data.frame(t(sapply(modes(), mode_modern, ignore_octave = TRUE))),
as.roman(1:7))
expect_equal(dim(x), c(7, 7))
expect_equal(rownames(x), modes())
expect_equal(names(x), as.character(as.roman(1:7)))
})
|
112bfa0085b9cca88c05c8851a508f95af313ea0
|
68a91a962f9990052a896f6737f9385062b2336a
|
/simuOtherMethod.R
|
4c69d78d2b2b30b549fdd7651d974d1b18463501
|
[] |
no_license
|
Alterock13/ProbaP3
|
922b6268d303f0b73fc46d00385f380af3d1f01e
|
f138d25144ae1359ba5cf39249252d7f73eacfb6
|
refs/heads/master
| 2020-03-18T04:05:10.474557
| 2018-06-05T16:24:06
| 2018-06-05T16:24:06
| 134,268,608
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,226
|
r
|
simuOtherMethod.R
|
## Simul
simulationServeur2 = function(lambda,mu,N,p1,p2,p3,duration,nbServeurs){
## N : capacitΓ© maximale de stockage (en service + file d'attente)
## p1,p2,p3 : proportion de prioritΓ©s (p1+p2+p3 = 1)
## lambda : intensitΓ©, lambda>0
## 1/mu : temps de traitement, mu>0
## nbServeurs : nombre de serveurs
## Duree de l'experience
totalTime = duration # duration of simulation
expTime = 0 # simulation time
tempsArrivee = 0 # Temps pour la reception suivante
tempsService = 0 # Temps pour le service suivant
N = N * nbServeurs # on augmente la taille de la file globale en fonction du nombre de serveurs disponibles
## Pour les arrivees
k1 = 0 # nombre de requetes dans la queue de P1
k2 = 0 # nombre de requetes dans la queue de P2
k3 = 0 # nombre de requetes dans la queue de P3
k = 0 # nombre de requetes totales
### Compteurs
compteurTraitesk1 = 0 # nombre des requetes 1 traitees successivement
compteurTraitesk2 = 0 # nombre des requetes 2 traites successivement
compteurTraitesk3 = 0 # nombre des requetes 1 traitees successivement
compteurRecuesk1 = 0 # nombre des requetes 1 recues
compteurRecuesk2 = 0 # nombre des requetes 2 recues
compteurRecuesk3 = 0 # nombre des requetes 1 recues
compteurPerduesk1 = 0 # nombre des requetes 1 perdues
compteurPerduesk2 = 0 # nombre des requetes 2 perdues
compteurPerduesk3 = 0 # nombre des requetes 1 perdues
paquetsRecus = 0
paquetsTraites = 0
paquetsPerdus = 0
while(expTime<totalTime){
# k==0 pour le cas ou il n'y a pas de paquets Γ traiter
if (tempsArrivee< tempsService || k == 0) {
paquetsRecus = paquetsRecus + 1
## On associe a la requete une priorite par une loi uniforme
randomPriority = sample(1:100,1)
if(randomPriority < p1*100){
prochaineRequete = rexp(1,lambda)
if(k==N){
paquetsPerdus = paquetsPerdus + 1
compteurPerduesk1 = compteurPerduesk1+1
}
else{
k1 = k1 +1
}
compteurRecuesk1 = compteurRecuesk1 + 1
}
else if( p1*100 <= randomPriority && randomPriority < (p1*100 + p2*100)){
prochaineRequete = rexp(1,lambda)
if(k==N){
paquetsPerdus = paquetsPerdus + 1
compteurPerduesk2 = compteurPerduesk2+1
}
else{
k2 = k2 + 1
}
compteurRecuesk2 = compteurRecuesk2 + 1
}
else{
prochaineRequete = rexp(1,lambda)
if(k==N){
paquetsPerdus = paquetsPerdus + 1
compteurPerduesk3 = compteurPerduesk3 + 1
}
else{
k3 = k3 + 1
}
compteurRecuesk3 = compteurRecuesk3 + 1
}
tempsArrivee = tempsArrivee + prochaineRequete
k = k1+k2+k3
expTime = tempsArrivee
}else
## Partie traitement des requetes
{
prochainService = rexp(1,mu)
tempsService = tempsService + prochainService
## Gestion multi-serveurs
for (i in 0:nbServeurs) {
if (k1 > 0){
k1 = k1 - 1
paquetsTraites = paquetsTraites + 1
compteurTraitesk1 = compteurTraitesk1 +1
}
else if (k2 > 0){
k2 = k2 - 1
paquetsTraites = paquetsTraites + 1
compteurTraitesk2 = compteurTraitesk2+1
}
else if (k3 > 0){
k3 = k3 - 1
paquetsTraites = paquetsTraites + 1
compteurTraitesk3 = compteurTraitesk3 +1
}
k = k1+k2+k3
}
expTime = tempsService
}
}
print ("## DEBUT SIMULATION ")
print ("Paquets reΓ§us :")
print(paquetsRecus)
print ("Paquets traitΓ©s :")
print(paquetsTraites)
print ("Paquets perdus :")
print(paquetsPerdus)
print ("Paquets totaux (calculΓ©s) :")
print(paquetsTraites+paquetsPerdus+k)
print ("#########")
print ("Paquets reΓ§us P1 :")
print(compteurRecuesk1)
print ("Paquets reΓ§us P2 :")
print(compteurRecuesk2)
print ("Paquets reΓ§us P3 :")
print(compteurRecuesk3)
print ("Paquets reΓ§us (calculΓ©s):")
print(compteurRecuesk1+compteurRecuesk2+compteurRecuesk3)
print ("#########")
print ("Paquets traitΓ©s P1 :")
print(compteurTraitesk1)
print ("Paquets traitΓ©s P2:")
print(compteurTraitesk2)
print ("Paquets traitΓ©s P3:")
print(compteurTraitesk3)
print ("#########")
print ("Paquets perdus P1:")
print(compteurPerduesk1)
print ("Paquets perdus P1:")
print(compteurPerduesk2)
print ("Paquets perdus P1:")
print(compteurPerduesk3)
print("## FIN SIMULATION")
}
## Test de l'augmentation du nombre de serveurs
simulationServeur2(6,1,20,0.5,0.3,0.2,10^4,1)
simulationServeur2(6,1,20,0.5,0.3,0.2,10^4,2)
simulationServeur2(6,1,20,0.5,0.3,0.2,10^4,4)
simulationServeur2(6,1,20,0.5,0.3,0.2,10^4,8)
|
9868dbdbcb544ec8c8e18cb5c215dffd32495507
|
73356fed9beb3870100c792820325631bb99062a
|
/TGR_calculator.R
|
5c74cb36ae1009b9ec4f6dcf4f50a14ed7c642e8
|
[] |
no_license
|
chferte/TumorGrowthRate
|
ce6ca9c21e9ff6484dcbd9372360d926553b4960
|
ea1cbbf947351cd190ca197d27f3ecee7079f3c4
|
refs/heads/master
| 2021-01-15T18:50:47.807308
| 2013-11-26T23:03:42
| 2013-11-26T23:03:42
| 10,672,376
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,403
|
r
|
TGR_calculator.R
|
## Charles FertΓ©
## Institut Gustave Roussy, Villejuif, France
## Asessment of Tumor Growth Rates TGR) in the clinical setting
## Performed under the supervision of Serge Koscielny, Bernard Escudier and Jean-Charles Soria
## With the great help of Antoine Hollebecque (Gustave Roussy), Mariana Fernandez (Gustave Roussy), Christophe Massard (Gustave Roussy)
## and Brian Bot (Sage Bionetworks, Seattle)
########################################################################
# definition of TGR
########################################################################
# Tumor size (D) was defined as the sum of the longest diameters of the target lesions
# as per the Response Evaluation Criteria in Solid Tumors (RECIST) criteria [1].
# Let t be the time expressed in months at the tumor evaluation.
# Assuming the tumor growth follows an exponential law,
# Vt the tumor volume at time t is equal to Vt=V0 exp(TG.t),
# where V0 is volume at baseline, and TG is the growth rate.
# We approximated the tumor volume (V) by V = 4 Ο R3 / 3, where R, the radius of the sphere is equal to D/2.
# Consecutively, TG is equal to TG=3 Log(Dt/D0)/t, where D0 and Dt are the diameters (RECIST sums) at baseline and t, respectively.
# To report the tumor growth rate (TGR) results in a clinically meaningful way,
# we expressed TGR as a percent increase in tumor volume during one month using the following transformation:
# TGR = 100 (exp(TG) -1), where exp(TG) represents the exponential of TG.
# We calculated the TGR across clinically relevant treatment periods (Figure 1):
# (i) TGR REFERENCE assessed during the wash-out period (off-therapy) before the introduction of the experimental drug,
#(ii) TGR EXPERIMENTAL assessed during the first cycle of treatment (i.e.: between the drug introduction and the first evaluation, on-therapy),
########################################################################
# Notable references about TGR:
########################################################################
# 1: A simulation model of the natural history of human breast cancer.
# Koscielny S, Tubiana M, Valleron AJ.
# Br J Cancer. 1985 Oct;52(4):515-24. PubMed PMID: 4063132; PubMed Central PMCID: PMC1977243.
# 2: Tumour growth rates and RECIST criteria in early drug development. Gomez-Roca C, Koscielny S, Ribrag V, Dromain C, Marzouk I, Bidault F, Bahleda R, FertΓ© C, Massard C, Soria JC.
# Eur J Cancer. 2011 Nov;47(17):2512-6. doi:10.1016/j.ejca.2011.06.012. Epub 2011 Jul 15. PubMed PMID: 21763126.
# 3: Tumor Growth Rate (TGR) provides useful information to evaluate Sorafenib and Everolimus treatment in metastatic renal cell carcinoma (mRCC) patients. An integrated analysis of the TARGET and RECORD phase III trials data.
# FertΓ© C, Koscielny S, Albiges L, Rocher L, Soria JC, Iacovelli R, Loriot Y, Fizazi K, Escudier B.
# presented as Posted Discussion, GU Session, ASCO Annual meeting 2012
# submitted for publication
# 4: Tumor Growth Rate (TGR) provides useful information for patients enrolled in phase I trials
# and yields clear specific drug profiles.
# FertΓ© C, et al (manuscript in preparation)
# presented as Posted Discussion, Developmental Therapeutics Session, ASCO Annual meeting 2013
#########################################################################################################################
# load the data
#########################################################################################################################
# you are invited to input your own file (e.g. MyData.txt)
# requirement is a table excel spreadsheet converted into a tabe delimited .txt file
# must contain the follwoing columns names: "RECIST_BASELINE","RECIST_BEFORE","RECIST_EVAL1","SCANDATE_BASELINE","SCANDATE_BEFORE","SCANDATE_EVAL1"
# with numeric values for the following columns: "RECIST_BASELINE" "RECIST_BEFORE" "RECIST_EVAL1"
# with dates entered as mm/dd/yyyy for the following columns: "SCANDATE_BASELINE" "SCANDATE_BEFORE" "SCANDATE_EVAL1"
# # for clarity purposes, as example, we point to an example of such .txt file
# # this file is available through Synapse.
# # You'll love it: it's super useful and free ! here is some general information about it: http://sagebase.org/synapse-overview/
# # just create a synapse account online @ www.synapse.org
# # then, install the R client package with the two command lines:
# source("http://depot.sagebase.org/CRAN.R")
# pkgInstall("synapseClient")
# # then run the following lines to get the example.txt file
# library(synapseClient)
# synapseLogin(username="insert here your login",password="insert here your password")
# myFile <- synGet("syn1936427")
# myFile <- myFile@filePath
myData <- read.table(file=myFile)
##########################################################################################################
# Compute the tumor Growth Rates TGR.ref and TGR.exp (TGR ref and TGR exp the first cycle, respectively)
##########################################################################################################
# first define the reference period and the experimental period (in months)
myData$ref.period <- as.numeric(difftime(myData$SCANDATE_BASELINE,myData$SCANDATE_BEFORE))*12/365.25
myData$exp.period <- as.numeric(difftime(myData$SCANDATE_EVAL1,myData$SCANDATE_BASELINE))*12/365.25
# compute the TGR
myData$TGR.ref <- 100*(exp(3*log(myData$RECIST_BASELINE/myData$RECIST_BEFORE)/(myData$ref.period))-1)
myData$TGR.exp <- 100*(exp(3*log(myData$RECIST_EVAL1/myData$RECIST_BASELINE)/(myData$exp.period))-1)
############################################################################################################
# compare the TGR ref and the TGR exp (Pairwise comparison by wilcoxon ranked signed test)
# (note that this is a pairwise comparison since each patient is used as his/her own control)
############################################################################################################
# basic descriptive statistics
summary(myData$TGR.ref)
summary(myData$TGR.exp)
# comparison of the two periods
wilcox.test(myData$TGR.ref,myData$TGR.exp,paired=TRUE)
##########################################################################
# plot the TGR ref and the TGR exp
##########################################################################
par(mfrow=c(1,1))
plot(myData$TGR.ref,myData$TGR.exp,xlim=c(-200,200),ylim=c(-200,200),pch=20,cex=.9,col="gray60",
axes=FALSE, xlab= "TGR Reference", ylab="TGR Experimental",cex.lab=.8)
axis(1,las=1,at=c(-200,-100,0,100,200),lab=c("-200 %","-100 %","0 %","100 %","200 %"),cex.axis=.7, font=2)
axis(2,las=1,at=c(-200,-100,0,100,200),lab=c("-200 %","-100 %","0 %","100 %","200 %"),cex.axis=.7, font=2)
abline(h=0,v=0)
abline(coef=as.vector(c(0,1)), col="orange", lty=2,lwd=2.5)
text(x=-130, y=-180,labels=paste("Pairwise comparison:\np value =",
format(wilcox.test(myData$TGR.ref,myData$TGR.exp,paired=TRUE)$p.value,digits=3)),adj=0,cex=.8)
text(-200,-100,"orange line set for: \nTGR ref = TGR exp",col="orange", cex=.55, font=4,adj=0)
text(x=130,y=-105,'DECREASE in TGR\n "Antitumor activity"',cex=.9,font=4,col="darkgreen")
text(x=-90,y=100,'INCREASE in TGR\n "No antitumor activity"',cex=.9,font=4,col="red")
title("Variation of Tumor Growth Rate (TGR)\nacross the Reference and Experimental periods", font=2)
############################################################################################################
# see Pubmed for recent publications using TGR in oncology:
############################################################################################################
# 1: Ferte C, Fernandez M, Hollebecque A, Koscielny S, Levy A, Massard C, Balheda
# R, Bot BM, Gomez Roca C, Dromain C, Ammari S, Soria JC. Tumor Growth Rate (TGR)
# is an early indicator of anti-tumor drug activity in phase I clinical trials.
# Clin Cancer Res. 2013 Nov 22. [Epub ahead of print] PubMed PMID: 24240109.
# 2: FertΓ© C, Koscielny S, Albiges L, Rocher L, Soria JC, Iacovelli R, Loriot Y,
# Fizazi K, Escudier B. Tumor Growth Rate Provides Useful Information to Evaluate
# Sorafenib and Everolimus Treatment in Metastatic Renal Cell Carcinoma Patients:
# An Integrated Analysis of the TARGET and RECORD Phase 3 Trial Data. Eur Urol.
# 2013 Aug 15. doi:pii: S0302-2838(13)00831-2. 10.1016/j.eururo.2013.08.010. [Epub
# ahead of print] PubMed PMID: 23993162.
|
e7eeced734847fadb8a4e5f09bcf957f80d978b0
|
ebab9db9bb33c548b003d421380b9aa804117e57
|
/R/helpers.R
|
f9d992637aaad85cd1c2f916e8e7b652e1924ccb
|
[] |
no_license
|
xiaoxiaozhangx/featureImportance
|
9d6227ba16e8c6e40860bae500b1f5223d0d0118
|
cf064ddc757f73db1efbc7e9722869fbfff8e6ba
|
refs/heads/master
| 2023-05-09T08:50:15.609474
| 2021-05-30T08:17:52
| 2021-05-30T08:17:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,191
|
r
|
helpers.R
|
assertMeasure = function(measures) {
if (inherits(measures, "Measure"))
measures = list(measures)
assertList(measures, "Measure")
mid = BBmisc::vcapply(measures, function(x) x$id)
measures = setNames(measures, mid)
return(measures)
}
assertResampleResultData = function(object, data, target) {
td = mlr::getTaskDesc(object)
ts = mlr::getTaskSize(td)
tn = mlr::getTaskTargetNames(td)
# check ResampleResult
if (is.null(object$models))
stop("Use 'models = TRUE' to create the ResampleResult.")
features = object$models[[1]]$features
# check if target equals target from td
assertSubset(target, choices = tn, empty.ok = TRUE)
# some checks for the data ()
if (ts != nrow(data) | any(tn %nin% colnames(data))) {
warningf("Use the same data that created the ResampleResult.")
assertDataFrame(data, nrows = ts)
assertSubset(features, colnames(data))
}
}
# guessPerformanceMeasureProperties = function(data, target, pred = NULL) {
# y = data[[target]]
# # guess task from target
# if (is.factor(y)) {
# properties = "classif"
# if (nlevels(y) > 2) {
# properties = c(properties, "classif.multi")
# }
# } else {
# properties = "regr"
# }
# # guess pred
# if (!is.null(pred)) {
# if (is.matrix(pred) | (!is.factor(pred) & is.factor(y))) {
# properties = c(properties, "req.prob")
# }
# }
# return(properties)
# }
checkPrediction = function(y, p) {
UseMethod("checkPrediction")
}
checkPrediction.factor = function(y, p) {
lvls = levels(y)
if (is.factor(p)) {
# predict classes: classes must be subset of levels of y
assertFactor(p, levels = lvls)
p = factor(p, levels = lvls)
} else if (is.matrix(p)) {
# predict probabilities: prediction should return matrix of probabilities
if (length(lvls) == ncol(p)) {
assertNames(colnames(p), must.include = lvls)
} else {
stopf("'predict.fun' returns an object of class '%s' instead of a named matrix of probabilities!", class(p)[1L])
}
}
p
}
checkPrediction.character = function(y, p) {
checkPrediction(as.factor(y), p)
}
checkPrediction.default = function(y, p) {
assertVector(p)
p
}
|
ed865092a42f9a454d61549dc8e8eae34d8e84a7
|
239726c28c1f28341559f9cf4dded61ddd4c7800
|
/Fase 2/Sesion 1/Reto3.R
|
2090f5fd65e3dc81edc441dff45055dfd79af090
|
[] |
no_license
|
Laborico/BEDU-SANTANDER
|
f8c928470f39b1b58b37c43c5c9b10b0d6553d80
|
34650a9efd7decc81980ebb272b3bb138ebc61ba
|
refs/heads/master
| 2023-02-18T16:18:06.706838
| 2021-01-19T17:18:07
| 2021-01-19T17:18:07
| 308,212,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 568
|
r
|
Reto3.R
|
#Almacenalo en un data frame que se llame amazon.best
amazon.best <-read.csv("https://raw.githubusercontent.com/beduExpert/Programacion-con-R-Santander/master/Sesion-01/Data/bestsellers%20with%20categories.csv")
#Calcula el data frame transpuesto, asΓgnale el nombre de tAmazon y conviΓ©rtelo en un data frame
tAmazon <- data.frame(t(amazon.best))
#Usa el nombre de los libros como el nombre de las columnas
colnames(tAmazon) <- tAmazon[1,]
row.names(tAmazon)
#ΒΏCΓΊal es el libro de menor y mayor precio?
which.max(tAmazon["Price",])
which.min(tAmazon["Price",])
|
16934d5209298f9220e580535ade2cb0ff5396bb
|
6adf748adac8e279adeccfae1b49195893034125
|
/man/matchMeanSD.Rd
|
bb1c46e1925d83eb530369d74a313c18eb4536ab
|
[] |
no_license
|
cran/raincin
|
1c9b5fb73023a6a705c83556579f652a983ccecb
|
59ad7c5121e24eeec96363a59a1b1afbb4496e9f
|
refs/heads/master
| 2022-09-30T09:12:59.469857
| 2020-06-04T15:10:02
| 2020-06-04T15:10:02
| 269,669,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 655
|
rd
|
matchMeanSD.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rfun_matchMeanSD.R
\name{matchMeanSD}
\alias{matchMeanSD}
\title{Transform Data to Desired Mean and Standard Deviation}
\usage{
matchMeanSD(data, mean = 0, sd = 1)
}
\arguments{
\item{data}{a vector includeing data to be transformed}
\item{mean}{a value of desired mean}
\item{sd}{a value of desirred SD}
}
\value{
a vector of transformed vector
}
\description{
Transform Data to Desired Mean and Standard Deviation
}
\examples{
orig_data <- c(1,3,5,10)
trans_data <- matchMeanSD(data=orig_data, mean=100, sd=15)
print(trans_data)
}
\author{
Jiangtao Gou
Fengqing Zhang
}
|
b3f2b1c82260e44d11390cb0b30df7ee02178123
|
a3c78700a65f10714471a0d307ab984e8a71644d
|
/modules/data.atmosphere/man/noaa_grid_download.Rd
|
8d48f18e485a74a90188bab8235fd46d52b8c64f
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
PecanProject/pecan
|
e42a8a6a0fc9c0bb624e0743ab891f6cf131ed3f
|
ce327b92bf14498fa32fcf4ef500a7a5db5c9c6c
|
refs/heads/develop
| 2023-08-31T23:30:32.388665
| 2023-08-28T13:53:32
| 2023-08-28T13:53:32
| 6,857,384
| 187
| 217
|
NOASSERTION
| 2023-09-14T01:40:24
| 2012-11-25T23:48:26
|
R
|
UTF-8
|
R
| false
| true
| 816
|
rd
|
noaa_grid_download.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GEFS_helper_functions.R
\name{noaa_grid_download}
\alias{noaa_grid_download}
\title{Download gridded forecast in the box bounded by the latitude and longitude list}
\usage{
noaa_grid_download(
lat_list,
lon_list,
forecast_time,
forecast_date,
model_name_raw,
output_directory,
end_hr
)
}
\arguments{
\item{lat_list}{lat for site}
\item{lon_list}{long for site}
\item{forecast_time}{start hour of forecast}
\item{forecast_date}{date for forecast}
\item{model_name_raw}{model name for directory creation}
\item{output_directory}{output directory}
\item{end_hr}{end hr to determine how many hours to download}
}
\value{
NA
}
\description{
Download gridded forecast in the box bounded by the latitude and longitude list
}
|
ade42f483b9014199cc32bb093acec12c5553797
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/intkrige/man/utsnow_dtl2.Rd
|
379c47b4e3762533ea96b73259eb9d96487cc73e
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,008
|
rd
|
utsnow_dtl2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{utsnow_dtl2}
\alias{utsnow_dtl2}
\title{An interval-valued design ground snow load dataset for Utah that only
considers depth to load conversions.}
\format{A data frame with 415 rows and 8 variables:
\describe{
\item{STATION}{The global historical climatological network (GHCN)
station identifier}
\item{STATION_NAME}{The GHCN station name}
\item{LATITUDE}{Latitude coordinate position}
\item{LONGITUDE}{Longitude coordinate position}
\item{ELEVATION}{Elevation of the measurement location (meters)}
\item{minDL}{The lower endpoint of the interval-valued design snow load as measured in kilopascals (kpa)}
\item{maxDL}{The upper endpoint of the design snow load interval (kpa)}
\item{pointDL}{The original point-valued design snow load from the 2018 Utah Snow Load Study (kpa)}
}}
\usage{
utsnow_dtl2
}
\description{
A dataset containing the interval-valued data used in the analysis
of Bean et. at (2019). The 415 measurement locations
included in the dataset are taken from Bean et. al. (2018).
}
\details{
The interval valued kriging analysis described in Bean et. al. (2019) analyzes this dataset on a
log scale after removing the effect of elevation. An example of predictions using this workflow are
found in the README associated with this package. Note that this dataset differs from utsnow in that
intervals only consider differences in depth to load conversions. This dataset differs from utsnow_dtl
in that intervals are only calculated at the final step of the analysis: after finding 50 year events
using all 8 depth to load conversion techniques. utsnow_dtl rather created annual intervals, only fitting
distributions to two sets of maximums (the annual lower and upper bounds), rather fitting 8 sets of
maximums on all the depth-to-load conversion types.
}
\references{
\insertRef{Bean2019-int}{intkrige}
\insertRef{Bean2018-report}{intkrige}
}
\keyword{datasets}
|
865207e6f89cc3d7e2588652d88f2dfb3ff55b6c
|
680c36c7ef9031bc389fd51a99f2e2505593f97f
|
/man/kpegasos-module.Rd
|
bd5cb7c1cd3b4db9f0622821e849054ca6bcfbf4
|
[] |
no_license
|
pkimes/glmargin
|
2b8ed887ca3c2ccd962511ca55c3beae6545d28e
|
00fc5d56d810442d9275b0d355c9f5d848861f09
|
refs/heads/master
| 2021-01-22T05:11:10.940778
| 2015-10-28T06:57:54
| 2015-10-28T06:57:54
| 45,015,017
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
rd
|
kpegasos-module.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/zzz.R
\name{KPEGASOS}
\alias{KPEGASOS}
\title{KPEGASOS Reference Class}
\description{
Exposed Rcpp module class
}
\section{Fields}{
\describe{
\item{\code{b}}{a numeric vector}
\item{\code{W}}{a numeric vector}
}}
\section{Methods}{
\describe{
\item{\code{new(x, y, py, lambda, max_iter, min_eps, verbose):}}{constructor call with training dataset}
\item{\code{Solve():}}{solve the problem}
\item{\code{boundaries():}}{return interval boundaries}
}
}
\author{
Patrick Kimes
}
|
f6506ba3898c4086327d1a91789858e0d4328ec7
|
b2f256c584aa071a58d8905c3628d4a4df25a506
|
/utils/R/GUD_exploration_tools/data_options.R
|
de2435fd4180d9b1e586a2ee2ff5979f9bb5ea54
|
[
"MIT"
] |
permissive
|
maximebenoitgagne/wintertime
|
6865fa4aff5fb51d50ea883b2e4aa883366227d8
|
129758e29cb3b85635c878dadf95e8d0b55ffce7
|
refs/heads/main
| 2023-04-16T22:08:24.256745
| 2023-01-10T20:15:33
| 2023-01-10T20:15:33
| 585,290,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,984
|
r
|
data_options.R
|
#'@title Read GUD_OPTIONS.h input file for MITgcm GUD module.
#'@description
#'Read GUD_OPTIONS.h input file and create a list of keys (code options).
#'
#'@param none for now
#'@return opt list
#'@author
#'F. Maps 2020
#'@export
require("dplyr")
read.options <- function() {
# Read GUD_OPTIONS.h input file with def keys
infile <- "../../../gud_1d_35+16/code_noradtrans/GUD_OPTIONS.h"
if( !file.exists( infile ) ) {
infile <- "../../../pkg/gud/GUD_OPTIONS.h"
}
df <- file( infile, "r" ) # read only
opt_txt <- readLines( df, skipNul = TRUE )
close( df )
# Select only relevant lines
keep <- grepl( "define|undef", opt_txt )
opt_txt <- opt_txt[ keep ]
# Create the list of GUD options
opt <- list()
# FALSE keys
undef <- grepl( "undef", opt_txt )
opt [undef] <- FALSE
names( opt )[undef] <- strsplit( opt_txt[undef], "#undef " ) %>% # remove keyword
sapply( "[", 2 ) %>% # get its name
trimws( which = "both" ) # get rid of useless white spaces
# TRUE keys
define <- grepl( "define", opt_txt )
opt [define] <- TRUE
names( opt )[define] <- strsplit( opt_txt[define], "#define " ) %>%
sapply( "[", 2 ) %>%
trimws( which = "both" )
# Numerical keys
num_key <- grep( " ", names(opt) )
for ( i in 1:length(num_key) ) {
opt [ num_key[i] ] <- strsplit( names( opt[ num_key[i] ] ), " " ) %>%
sapply( "[", 2 ) %>%
as.numeric()
names( opt )[ num_key[i] ] <- strsplit( names( opt[ num_key[i] ] ), " " ) %>%
sapply( "[", 1 ) %>%
trimws( which = "both" )
}
# Return the list of GUD option keys
return(opt)
}
|
446f7441fdd0f4c8959eb729832606fc736c6f36
|
f02ffb8350d83842355277288ea0dcbe74010f96
|
/r/main.R
|
2aaab1dc774b086a1ca22c4f713823c7ef38da8f
|
[] |
no_license
|
mayer79/plumber
|
ca96b7be2f115448da0ab44f6dcc11367ca5d713
|
e63f26adff828b767ed152c95ddd2f8bb6770ec7
|
refs/heads/master
| 2020-05-04T23:13:37.212262
| 2019-06-13T07:17:00
| 2019-06-13T07:17:00
| 179,536,818
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
main.R
|
library(plumber)
r <- plumb("r/rest_controller.R")
r$run(port = 8000, host='0.0.0.0')
# curl -X GET "http://localhost/predict_price?carat=0.4" -H "accept: application/json"
# curl -H "Content-Type: application/json" -X GET -d "{\"carat\":0.7}" "http://localhost/predict_price"
|
fcf764c483e71ede249f2c35157d9f09cd9e438c
|
682cb749025a52b9cbccd6d1e1063589b4edb127
|
/R/meanPseudoBoost.R
|
1d331a3ac15c3cb855176c7c34de465d01079997
|
[
"BSD-3-Clause"
] |
permissive
|
danielazoeller/PseudoBoost
|
849118e97809883a64872862c34642daeb128519
|
7c5e20a8a3a1dda73b21f0f91531d1f2bb394a95
|
refs/heads/master
| 2021-01-02T08:40:49.132502
| 2015-06-25T07:04:04
| 2015-06-25T07:04:04
| 35,098,711
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,273
|
r
|
meanPseudoBoost.R
|
#' Perform smoothed stagewise pseudo-value regression for the first risk in a competing risk setting
#'
#' Creates RepMean modified datasets with the help of pairwise switches of the observation times with on observation time smaller than switch.t. For these dataset the function PseudoBoost will be called seperately and the mean value is taken for each effect estimate at each considered time point.
#' @param data A data.frame containing observation times AND statuses
#' @param xmat A numeric matrix containing the covariate values for all patients.
#' @param times A numeric vector containing the evaluation times.
#' @param stepno A numeric value containing the number of boosting steps to be performed. If you use cross validation (cv=TRUE), this parameter will be ignored.
#' @param maxstepno A numeric value containing the maximal number of boosting steps considered during the cross validation (only used if cv=TRUE).
#' @param nu A numeric value between 0 and 1, the shrinkage parameter for the stagewise regression algorithm. Setting it to values such as nu=0.1 avoids overfitting in early steps.
#' @param cv A boolean value indicating if cross validation should be performed.
#' @param multicore A boolean value indication if more than one core should be used for the cross validation (for this the parallel package is needed).
#' @param RepMean A numeric value indicating the number of modified datasets used the estimate.
#' @param switch.to A numeric value indicating the number of the first risk up to which the observations can be switched. It sets the value switch.t. If switch.t is specified, switch.to will be ignored. If switch.to=0, switch.t will be set to zero.
#' @param switch.t A numeric value indicating the time point up to which the observations can be switched. If this value is not NULL, switch.to will be ignored. If switch.t=0, there will be no switching.
#' @param seed.start A numeric value indicating the first seed value.
#' @param trace A boolean value indicating if additonal information should be printed out during the process.
#' @param switches A numeric value indicating the number of switches to be made. Setting switches=0 will suppress smoothing. If switches is NULL, 1000 switches will be performed.
#' @return An object of type meanPseudoBoost containing the estimates, the performed boosting step number and the evaluation times.
#' @import prodlim
#' @export
meanPseudoBoost <- function(object,...){
UseMethod("meanPseudoBoost",object)
}
#' Perform smoothed stagewise pseudo-value regression for the first risk in a competing risk setting
#'
#' Creates RepMean modified datasets with the help of pairwise switches of the observation times with on observation time smaller than switch.t. For these dataset the function PseudoBoost will be called seperately and the mean value is taken for each effect estimate at each considered time point.
#' @param data A data.frame containing observation times AND statuses
#' @param xmat A numeric matrix containing the covariate values for all patients.
#' @param times A numeric vector containing the evaluation times.
#' @param stepno A numeric value containing the number of boosting steps to be performed. If you use cross validation (cv=TRUE), this parameter will be ignored.
#' @param maxstepno A numeric value containing the maximal number of boosting steps considered during the cross validation (only used if cv=TRUE).
#' @param nu A numeric value between 0 and 1, the shrinkage parameter for the stagewise regression algorithm. Setting it to values such as nu=0.1 avoids overfitting in early steps.
#' @param cv A boolean value indicating if cross validation should be performed.
#' @param multicore A boolean value indication if more than one core should be used for the cross validation (for this the parallel package is needed).
#' @param RepMean A numeric value indicating the number of modified datasets used the estimate.
#' @param switch.to A numeric value indicating the number of the first risk up to which the observations can be switched. It sets the value switch.t. If switch.t is specified, switch.to will be ignored. If switch.to=0, switch.t will be set to zero.
#' @param switch.t A numeric value indicating the time point up to which the observations can be switched. If this value is not NULL, switch.to will be ignored. If switch.t=0, there will be no switching.
#' @param seed.start A numeric value indicating the first seed value.
#' @param trace A boolean value indicating if additonal information should be printed out during the process.
#' @param switches A numeric value indicating the number of switches to be made. Setting switches=0 will suppress smoothing. If switches is NULL, 1000 switches will be performed.
#' @return An object of type meanPseudoBoost containing the estimates, the performed boosting step number and the evaluation times.
#' @import prodlim
#' @export
meanPseudoBoost.default <- function(data,xmat,times,stepno=100,maxstepno=100,nu=0.1,cv=TRUE,multicore=FALSE,RepMean=50,switch.to=150,switch.t=NULL,seed.start=NULL,trace=TRUE,switches = NULL,...){
require(prodlim)
obs.time <- data[[1]]
status <- data[[2]]
status <- status[order(obs.time)]
xmat <- xmat[order(obs.time),]
obs.time <- obs.time[order(obs.time)]
if(!is.null(switch.t)){
if(trace) cat("The argument switch.t is used instead of the argument switch.to.\n")
if(switch.t==0){
switch.to <- 0
} else{
switch.to <- rev(which(sort(obs.time[status==1])<=switch.t))[1]
}
} else{
if(switch.to == 0){
switch.t <- 0
} else{
switch.t <- sort(obs.time[status==1])[switch.to]
}
}
if(is.null(switches)){
switches <- 1000
}
if(trace) cat("For smoothing the first observation times with a observation time <= ",switch.t," will be switched.\n This corresponds to switching up to the ",switch.to," observation time of the risk 1.\n")
if(is.null(seed.start)){
seed.start <- round(runif(1,0,749853798))
}
ymat <- jackknife(prodlim(Hist(obs.time,status) ~ 1), times=times, cause=1)
if(RepMean==0){
RepMean <- 1
}
if(length(stepno) != RepMean){
stepno=rep(stepno[1],RepMean)
}
n <- nrow(xmat)
sub <- seq(n)
ids <- seq(1:n)
ids.sub <- ids[1:length(which(obs.time<=switch.t))]
ids.rest <- ids[-ids.sub]
if(switch.to==0){
switches <- 0
}
reslist <- list() #One element will contain all resulting values for one beta_i for every time point for every sample
res.mean <- list() #One element will contain estimate and confidencebounds for one beta_i for every time point
ymatsub.list = vector("list",n)
if(switch.t != 0){
for (i in 1:RepMean) {
cat("Replication:",i,"\n")
set.seed(seed.start+i*100)
pairs <- sample(seq(length(ids.sub)-1),switches,replace=TRUE)
obs.timesub <- obs.time
if(length(pairs)>0){
for(pairs.id in 1:length(pairs)){
zwischen <- obs.timesub[pairs[pairs.id]]
obs.timesub[pairs[pairs.id]] <- obs.timesub[pairs[pairs.id]+1]
obs.timesub[pairs[pairs.id]+1] <- zwischen
}
}
statussub <- status
xmatsub <- xmat
ymatsub <- jackknife(prodlim(Hist(obs.timesub,statussub) ~ 1), times=times, cause=1)
if (cv) {
cv.res <- cv.PseudoBoost(ymatsub,xmatsub,maxstepno=maxstepno,nu=nu,trace=FALSE,...)
stepno[i] <- cv.res$optimal.step
}
ressub <- PseudoBoost(ymatsub,xmatsub,stepno=stepno[i],nu=nu,trace=FALSE)
l = length(ressub$coefficients)
ressub <- ressub$coefficients[[l]]
if(cv) cat("Number of Boosting steps: ",l,"\n")
if (i ==1) {
for (xindex in 1:(ncol(xmat)+1)) {
reslist[[xindex]] <- ressub[,xindex]
}
} else {
for (xindex in 1:(ncol(xmat)+1)) {
reslist[[xindex]] <- cbind(reslist[[xindex]],ressub[,xindex])
}
}
}
#reslist[[xindex+1]] <- stepno
# if(switch.to==0){
# for (xindex in 1:(ncol(xmat)+1)) {
# res.mean[[xindex]] <- reslist[[xindex]]
#
#
#
# if (xindex == 1){
# rescinames[xindex] <- "Intercept"
# } else {
# rescinames[xindex] <- paste("beta",(xindex-1),sep="_")
# }
# }
# }else{
if(RepMean>1){
for (xindex in 1:(ncol(xmat)+1)) {
res.mean[[xindex]] <- apply(reslist[[xindex]],MARGIN=1,FUN=mean)
}
} else {
for (xindex in 1:(ncol(xmat)+1)) {
res.mean[[xindex]] <- reslist[[xindex]]
}
}
} else {
set.seed(seed.start)
if (cv) {
cv.res <- cv.PseudoBoost(ymat,xmat,maxstepno=maxstepno,nu=nu,trace=FALSE,...)
stepno[1] <- cv.res$optimal.step
}
ressub <- PseudoBoost(ymat,xmat,stepno=stepno[1],nu=nu,trace=FALSE)
l = length(ressub$coefficients)
ressub <- ressub$coefficients[[l]]
if(cv) cat("Number of Boosting steps: ",l,"\n")
for (xindex in 1:(ncol(xmat)+1)) {
res.mean[[xindex]] <- ressub[,xindex]
}
}
rescinames <- rep("stepno",ncol(xmat) + 2)
for (xindex in 1:(ncol(xmat)+1)){
if (xindex == 1){
rescinames[xindex] <- "Intercept"
} else {
rescinames[xindex] <- paste("beta",(xindex-1),sep="_")
}
}
# }
res.mean[[xindex+1]] <- stepno
names(res.mean) <- rescinames
res.mean[[xindex+2]] <- times
names(res.mean)[xindex+2] <- "evaluationTimes"
class(res.mean) <- "meanPseudoBoost"
return(res.mean)
}
#' Plot an object of class meanPseudoBoost. The estimated effects will be plotted against the evaluation times saved in the object. Output as PDF.
#'
#' @param object An object of class meanPseudoBoost created by meanPseudoBoost().
#' @param est A numeric vector containing the indices of the estimated to be plotted. est=c(1) will e.g. plot the estimated intercept.
#' @param comb A boolean vector indicating if the results should be combined in one plot (comb=TRUE) or if each estimate should be plotted seperately (comb=FALSE).
#' @param trans A boolean vector indicating if the results should be transformed (trans=TRUE => Plot exponential of the estimates).
#' @param name A string value indicating the name of the resulting PDF. E.g. name="results.pdf"
#' @return A PDF document with the name "name".
#' @export
plot.meanPseudoBoost <- function(object,est,comb=TRUE,trans=TRUE,name="results.pdf"){
pdf(name)
times <- object$evaluationTimes
if (comb) {
if(trans==TRUE){plot(0,type="n",xlab="time",ylab="coefficient",xlim=c(0,1.3*max(times)),ylim=c(0,2))}
else{plot(0,type="n",xlab="time",ylab="coefficient",xlim=c(0,1.3*max(times)),ylim=c(-2,2))}
if(trans==TRUE){abline(h=1, col = "lightgray")}
else {abline(h=0, col = "lightgray")}
linetyp <- 1
for (i in 1:length(est)) {
if(trans==TRUE){lines(times,exp(object[[est[i]]]),lty=linetyp)}
else {lines(times,object[[est[i]]],lty=linetyp)}
linetyp <- linetyp+1
}
legend("topright",legend=names(as.list(object[est])),lty=seq(linetyp),bty="n")
title("Estimater for Parameters")
}
else {
for (i in 1:length(est)) {
if(trans==TRUE){plot(0,type="n",xlab="time",ylab="coefficient",xlim=c(0,1.3*max(times)),ylim=c(0,2))}
else{plot(0,type="n",xlab="time",ylab="coefficient",xlim=c(0,1.3*max(times)),ylim=c(-2,2))}
if(trans==TRUE){abline(h=1, col = "lightgray")}
else {abline(h=0, col = "lightgray")}
linetyp=2
for (j in c(1)){
if(trans==TRUE){lines(times,exp(object[[est[i]]]),lty=min(linetyp,j))}
else{lines(times,object[[est[i]]],lty=min(linetyp,j))}
}
if (est[i] == 1) {
legend("topright",legend=c("Estimator of Intercept","confidencebounds"),lty=seq(linetyp),bty="n")
} else {
legend("topright",legend=c(paste("Estimator of beta",est[i]-1,sep="_"),"confidencebounds"),lty=seq(linetyp),bty="n")
}
title(paste("Estimater for ",names(as.list(object[est[i]])),sep=""))
}
}
dev.off()
}
|
ae4974f8eb224cea514e8ffb505852023f6073fc
|
90c2365749baa1d1d8d0839fbc8b71c8fe628154
|
/r.R
|
be9af4cb110efadb592d4a5eb747a2dab183aff7
|
[] |
no_license
|
annachen368/r_language
|
4ad2cc79041455305e7d7cf93a15da2fd5eff0e8
|
fb63889f957c926ea26288d55733b8583eb7bc72
|
refs/heads/master
| 2021-01-01T05:59:34.536047
| 2014-09-28T22:45:04
| 2014-09-28T22:45:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,831
|
r
|
r.R
|
# Creating a Graph
windows()
plot(aimp$V1, aimp$r1,type = "l", xlab="epsilon",ylab="avg reward",cex=.4,xaxt="n",col="red")
points(aimp$V1, aimp$r2,type = "l", xlab="epsilon",ylab="avg reward",cex=.5,xaxt="n",col="blue")
points(aimp$V1, aimp$r3,type = "l", xlab="epsilon",ylab="avg reward",cex=.5,xaxt="n",col="green")
points(aimp$V1, aimp$r4,type = "l", xlab="epsilon",ylab="avg reward",cex=.5,xaxt="n",col="yellow")
points(aimp$V1, aimp$r5,type = "l", xlab="epsilon",ylab="avg reward",cex=.5,xaxt="n",col="purple")
legend('topright', legend=(c("rate=0.1","rate=0.2","rate=0.3","rate=0.4","rate=0.5")) ,
lty=1, col=c('red', 'blue', 'green',' yellow','purple'), bty='n', cex=.75)
title("WorldWithThief QLearningAgent y 10000 1000")
axis(1, at = seq(0, 0.2, by = 0.005), las=2)
?legend
summary(aimp)
#abline(lm(rate.0$V2~rate.0$V1))
?plot
?axis
rbind(rate.0, rate1)
install.packages('sqldf')
install.packages('gsubfn')
install.packages('proto')
install.packages('RSQLite')
install.packages('DBI')
install.packages('RSQLite.extfuns')
library(gsubfn)
library(proto)
library(DBI)
library(RSQLite)
library(RSQLite.extfuns)
library(sqldf)
aimp<-sqldf("select rate1.v1,
rate1.v2 as r1,
rate2.v2 as r2,
rate3.v2 as r3,
rate4.v2 as r4,
rate5.v2 as r5
from rate1
LEFT JOIN rate2 ON rate2.v1=rate1.v1
LEFT JOIN rate3 ON rate3.v1=rate1.v1
LEFT JOIN rate4 ON rate4.v1=rate1.v1
LEFT JOIN rate5 ON rate5.v1=rate1.v1
")
x <- aimp$V1
y1 <- aimp$r1
y2 <- aimp$r2
y3 <- aimp$r3
x <- 1:10
y1 <- rnorm(10)
y2 <- rnorm(10)
y3 <- rnorm(10)
# One way
plot(x, y1, ylim = range(c(y1, y2, y3)))
points(x, y2, col = "red")
points(x, y3, col = "blue")
|
4bc2b328f519aa208ea3adde5eacc7c55b61c6d2
|
78d7ee805619bcb552206c336430a6e34e5911d4
|
/overview/src/hmp1-ii_comparisons.r
|
d970c0c9ab43e7446aaec7e92e568fb46b21da91
|
[] |
no_license
|
biobakery/hmp2_analysis
|
2178965fa84c8218610846f181d6b6072e8585b9
|
663bf7285e808aa09f27d4ddc09b3ebf89ec6a97
|
refs/heads/master
| 2022-07-28T16:47:53.875629
| 2019-05-28T01:20:27
| 2019-05-28T01:20:27
| 265,696,875
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,466
|
r
|
hmp1-ii_comparisons.r
|
library(plyr)
library(dplyr)
source("./common/disease_colors.r")
source("./common/load_bugs.r")
hmp12_bugs.pcl <- pcl.read(file.path(HMP2_data, "hmp1-ii/hmp1-II_metaphlan2-mtd-qcd.tsv"), metadata.rows=8) %>%
pcl.filter.s(STSite == "Stool")
# Merge the datasets
hmp12_bugs.pcl$meta$diagnosis <- "HMP1-II"
hmp12_bugs.pcl$meta$VISNO[hmp12_bugs.pcl$meta$VISNO=="02S"] <- "2"
hmp12_bugs.pcl$meta$VISNO[hmp12_bugs.pcl$meta$VISNO=="03S"] <- "3"
hmp12_bugs.pcl$meta$VISNO <- as.numeric(as.character(hmp12_bugs.pcl$meta$VISNO))
merged.pcl <- pcl.merge(bugs.pcl, hmp12_bugs.pcl)
merged.pcl$meta$merged_subj <- as.character(merged.pcl$meta$subject)
merged.pcl$meta$merged_subj[is.na(merged.pcl$meta$merged_subj)] <-
as.character(merged.pcl$meta$RANDSID)[is.na(merged.pcl$meta$merged_subj)]
merged.pcl$meta$merged_subj <- factor(merged.pcl$meta$merged_subj)
diag_subj <- merged.pcl$meta[match(levels(merged.pcl$meta$merged_subj), merged.pcl$meta$merged_subj),"diagnosis",drop=F]
rownames(diag_subj) <- levels(merged.pcl$meta$merged_subj)
# PERMANOVA to see if we can distinguish HMP1-II from non-IBD?
merged.pcl.healthy <- merged.pcl %>%
pcl.filter.s(diagnosis == "nonIBD" || diagnosis == "HMP1-II") %>%
pcl.only(rank="s") %>% pcl.nicenames %>%
pcl.normalize
ad.naive <- adonis(merged.pcl.healthy$x ~ merged.pcl.healthy$meta$diagnosis, method="bray", permutations=999)
ad.naive
# Permutation: free
# Number of permutations: 999
#
# Terms added sequentially (first to last)
#
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# merged.pcl.healthy$meta$diagnosis 1 9.08 9.0804 33.105 0.03277 0.001 ***
# Residuals 977 267.99 0.2743 0.96723
# Total 978 277.06 1.00000
# ---
# Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
library(vegan)
D <- vegdist(merged.pcl.healthy$x, method="bray");
ad <- PERMANOVA_repeat_measures(
D, merged.pcl.healthy$meta[,c(),drop=F],
factor(merged.pcl.healthy$meta$merged_subj, levels=rownames(diag_subj)),
diag_subj, permutations=999)
ad
# Call:
# adonis(formula = D ~ ., data = mtdat[, metadata_order, drop = F], permutations = 0)
#
# Permutation: free
# Number of permutations: 0
#
# Terms added sequentially (first to last)
#
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# diagnosis 1 9.08 9.0804 33.105 0.03277 0.001 ***
# Residuals 977 267.99 0.2743 0.96723 0.001 ***
# Total 978 277.06 1.00000
# ---
# Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
library(vegan)
merged.pcl.healthy.adult <- pcl.filter.s(merged.pcl.healthy, adult | (diagnosis == "HMP1-II"))
Dadult <- vegdist(merged.pcl.healthy.adult$x, method="bray");
subjfact <- factor(merged.pcl.healthy.adult$meta$merged_subj)
ad <- PERMANOVA_repeat_measures(
Dadult, merged.pcl.healthy.adult$meta[,c(),drop=F], subjfact,
diag_subj[match(levels(subjfact), rownames(diag_subj)),,drop=F], permutations=999)
ad
# Call:
# adonis(formula = D ~ ., data = mtdat[, metadata_order, drop = F], permutations = 0)
#
# Permutation: free
# Number of permutations: 0
#
# Terms added sequentially (first to last)
#
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# diagnosis 1 6.796 6.7965 25.437 0.03198 0.001 ***
# Residuals 770 205.736 0.2672 0.96802 0.001 ***
# Total 771 212.533 1.00000
# ---
# Signif. codes: 0 β***β 0.001 β**β 0.01 β*β 0.05 β.β 0.1 β β 1
# Species-level joint ordination
joint.ord <- pcl.pcoa(merged.pcl %>% pcl.only(rank="s") %>% pcl.normalize)
pdf("./overview/joint_ordination_hmp1ii_alldiagnosis.pdf", 6.5, 5.5)
pcl.ordplot(merged.pcl, joint.ord, colour="diagnosis",
colour_override=c(hmp2_disease_colors, c("HMP1-II" = "forestgreen")),
colour_title = "Diagnosis", size_abs = 2)
dev.off()
# Species-level joint ordination (only non-IBD samples)
joint.nonibd.ord <- pcl.pcoa(merged.pcl %>% pcl.filter.s(diagnosis %in% c("HMP1-II", "nonIBD")) %>%
pcl.only(rank="s") %>% pcl.normalize)
pdf("./overview/joint_ordination_hmp1ii_nonibd.pdf", 6.5, 5.5)
pcl.ordplot(merged.pcl, joint.nonibd.ord, colour="diagnosis",
colour_override=c(hmp2_disease_colors, c("HMP1-II" = "forestgreen")),
colour_title = "Diagnosis", size_abs = 2)
dev.off()
pdf("./overview/joint_ordination_hmp1ii_bacteroides.pdf", 6.5, 5.5)
pcl.ordplot(merged.pcl %>% pcl.nicenames,
joint.nonibd.ord,
colour="Bacteroides",
size_abs = 3)
dev.off()
pdf("./overview/joint_ordination_hmp1ii_bacteroides_ovatus.pdf", 6.5, 5.5)
pcl.ordplot(merged.pcl %>% pcl.nicenames,
joint.nonibd.ord,
colour="Bacteroides ovatus",
size_abs = 3)
dev.off()
pdf("./overview/joint_heatmap_hmp1ii.pdf", 12, 5)
merged.pcl %>%
pcl.only(rank="s") %>%
pcl.nicenames %>%
pcl.top.f(mean(x), n=10) %>%
pcl.heatmap(meta=c("diagnosis"), annotation_colors=list(diagnosis=c(hmp2_disease_colors, c("HMP1-II" = "forestgreen"))))
merged.pcl %>%
pcl.only(rank="s") %>%
pcl.nicenames %>%
pcl.filter.s(diagnosis %in% c("HMP1-II", "nonIBD")) %>%
pcl.top.f(mean(x), n=10) %>%
pcl.heatmap(meta=c("diagnosis"), annotation_colors=list(diagnosis=c(hmp2_disease_colors, c("HMP1-II" = "forestgreen"))))
dev.off()
pdf("./overview/joint_ordination_hmp1ii_edfig.pdf", 1.15, 1.15)
ggp <- pcl.ordplot(merged.pcl, joint.ord, colour="diagnosis",
colour_override=c(hmp2_disease_colors, c("HMP1-II" = "forestgreen")),
colour_title = "Diagnosis", size_abs=1.4, outline_size=0.4) +
theme_nature() +
theme(axis.text.x=element_blank(), axis.text.y = element_blank())
print(ggp + guides(fill="none"))
print(ggp)
ggp <- pcl.ordplot(merged.pcl %>% pcl.nicenames, joint.ord, colour="Bacteroides ovatus",
sortby="Bacteroides ovatus", colour_title = "B. ovatus", size_abs=1.4, outline_size=0.4) +
theme_nature() +
theme(axis.text.x=element_blank(), axis.text.y = element_blank())
print(ggp + guides(fill="none"))
print(ggp)
dev.off()
## Fraction with P-copri
pcopriperson <- sapply(split(pcl.nicenames(merged.pcl)$x[,"Prevotella copri"] > 0.1, merged.pcl$meta$merged_subj), any)
ct <- table(pcopriperson, diag_subj$diagnosis)
ct[2,] / colSums(ct)
# Likelihood ratio test based on binomial likelihoods for the two means being different
binoll <- function(x) {
p <- mean(x)
return (log(p) * sum(x) + log(1-p) * sum(!x))
}
ll_null <- binoll(pcopriperson[diag_subj$diagnosis %in% c("HMP1-II", "nonIBD")])
ll_alt <- binoll(pcopriperson[diag_subj$diagnosis == "HMP1-II"]) + binoll(pcopriperson[diag_subj$diagnosis == "nonIBD"])
D <- -2 * (ll_null - ll_alt)
p.value <- pchisq(D, 1, lower.tail=F)
p.value
# [1] 0.5497016
df <- merged.pcl.healthy$meta
df$pcopri <- merged.pcl.healthy$x[,"Prevotella copri"]
library(ggplot2)
library(cowplot)
ggplot(data=df, aes(x = VISNO, y = pcopri, color=factor(RANDSID))) +
geom_line(aes(group=RANDSID)) +
geom_point() +
guides(color="none") +
ylab("Relative abundance (Prevotella copri)") +
xlab("Visit number") +
#scale_y_log10() +
theme_cowplot()
|
b399c2803ba54b6135c227681b25a639c12b07c9
|
94f308855bf8e8101bacfff76e7b5cc3f60cc174
|
/WCHC_Analyses/2.3.0.processVCF.platypus.R
|
7203fe44e91b798b056a81b5dc37a6b3200fd81b
|
[] |
no_license
|
BCI-EvoCa/Evo_history_CACRC
|
a823ecf6f276b2a01a8bc02167a97c13b0a7fe28
|
e89a05f2d7355641e7e41cbd0dd5ff993e1f7000
|
refs/heads/master
| 2020-03-18T21:26:18.611399
| 2018-05-29T10:22:41
| 2018-05-29T10:22:41
| 135,280,267
| 1
| 1
| null | 2018-05-29T10:32:21
| 2018-05-29T10:32:20
| null |
UTF-8
|
R
| false
| false
| 8,269
|
r
|
2.3.0.processVCF.platypus.R
|
#get sample list information
sampleList <- read.csv(file="~/Projects/IBDproject/masterSampleList.csv", header=TRUE, stringsAsFactors=FALSE)
setNames <- unique(sampleList[["setID"]])
platDir <- "~/Projects/IBDproject/annoPlatypusCalls/exomes/"
vcfName <- ".merged_indels.vcf"
mutType <- "snv"
#mutType <- "indel"
outAnno <- "~/Projects/IBDproject/6.indelPlatypusCalls/annotatedIndels/"
#concatenate names in table and delete unwanted columns
sampleList["sampleInfo"] <- paste(platDir, sampleList[[1]],"/", sampleList[[1]], vcfName, sep="")
#perform pipeline for each sample
for(j in 1:length(setNames)){
currSetId <- setNames[j]
print(paste("#### filtering sample ", currSetId, " ####",sep=""))
#setup input/output names
subSample <- sampleList[sampleList[[1]]==currSetId, ]
#### these file are temporary and later deleted ####
FILTName <- paste(platDir, currSetId,"/", currSetId, ".FILT.vcf", sep="")
VARName <- paste(platDir, currSetId,"/", currSetId, ".VAR.vcf", sep="")
SOMAName <- paste(platDir, currSetId,"/", currSetId, ".SOMA.vcf", sep="")
#somatic files
confTotalName <- paste(platDir, currSetId,"/", currSetId, ".", mutType,".somatic.vcf", sep="")
confTotalOutput <- paste(platDir, currSetId,"/", currSetId, ".", mutType, ".somatic.txt", sep="")
#SNP (germline variant) files
germTotalName <- paste(platDir, currSetId,"/", currSetId, ".germline.vcf", sep="")
germTotalOutput <- paste(platDir, currSetId,"/", currSetId, ".germline.txt", sep="")
#biopsy order lists and therefore indexes to remove
biopList <- system(command = paste("grep \"#CHROM\" ", platDir, currSetId, "/", currSetId, vcfName, sep=""), intern = TRUE, wait = TRUE)
biopList <- strsplit(biopList, split = "\t")
biopList <- biopList[[1]]
biopList <- biopList[10:length(biopList)]
write.table(biopList, file=paste(platDir, currSetId, "/", currSetId, ".bioIDorder.txt", sep=""), sep = "\t", row.names = FALSE, quote = FALSE)
normalIndex <- (which(biopList==subSample[1, "normalID"]))-1
system(command = paste("mkdir ", outAnno, currSetId, sep=""))
#subset exclusion list
colIndexes <- c(0:(nrow(subSample)-1))
remList <- subSample[subSample[["retain"]]==2, "sampleID"]
if(length(remList)==0){
keepList <- colIndexes
write.table(biopList, file=paste(outAnno, currSetId, "/", currSetId, ".vcfbioID.txt", sep=""), sep = "\t", row.names = FALSE, quote = FALSE)
}else{
keepList <- colIndexes[-(which(biopList %in% remList))]
biopListTemp <- biopList[-(which(biopList %in% remList))]
write.table(biopListTemp, file=paste(outAnno, currSetId, "/", currSetId, ".vcfbioID.txt", sep=""), sep = "\t", row.names = FALSE, quote = FALSE)
}
#remove normal column from keep list
keepListNoNorm <- keepList[-which(keepList == normalIndex)]
#prepare indexes for total variant output
counterTemp <- 1
totalIndexStrings <- as.list(NA)
for(k in keepListNoNorm){
totalIndexStrings[[counterTemp]] <- paste(" ((GEN[", k,"].NR > 9) & (GEN[", k,"].NV > 0)) | ", sep="")
counterTemp <- counterTemp +1
}
totalIndexStrings[[length(totalIndexStrings)]] <- substr(totalIndexStrings[[length(totalIndexStrings)]], 1, (nchar(totalIndexStrings[[length(totalIndexStrings)]]) - 2 ))
#prepare indexes for extractFields command
counterTemp <- 1
extractStringsNR <- as.list(NA)
for(k in keepList){
extractStringsNR[[counterTemp]] <- paste(" \"GEN[", k,"].NR\" ", sep="")
counterTemp <- counterTemp +1
}
counterTemp <- 1
extractStringsNV <- as.list(NA)
for(k in keepList){
extractStringsNV[[counterTemp]] <- paste(" \"GEN[", k,"].NV\" ", sep="")
counterTemp <- counterTemp +1
}
# 1 .filter by FILTER field
filtVarCommand <- paste("cat ", subSample[1, "sampleInfo"], " | java -jar ~/bin/SnpSift.jar filter \"( ( (FILTER ='PASS') | (FILTER ='alleleBias') | (FILTER ='HapScore') | (FILTER ='SC') | (FILTER ='badReads') | (FILTER ='SC;alleleBias') | (FILTER ='HapScore;alleleBias') | (FILTER ='HapScore;SC') ) )\" > ", FILTName, sep="")
system(command=filtVarCommand)
# 2. annotate variant types
annoCommand <- paste("java -jar bin/SnpSift.jar varType ", FILTName," > ", VARName, sep="")
system(command=annoCommand)
#### somatic files ####
# 3. filter for somatic mutations (not in normal)
if(mutType == "snv"){
somaticCommand <- paste("cat ", VARName, " | java -jar ~/bin/SnpSift.jar filter \"( ( (exists SNP) & (GEN[", normalIndex,"].NV < 2) & (GEN[", normalIndex,"].NR < 100) & (GEN[", normalIndex,"].NR > 9) ) | ( (exists SNP) & (GEN[", normalIndex,"].NV < 3) & (GEN[", normalIndex,"].NR > 99) ) )\" > ", SOMAName, sep="")
system(command=somaticCommand)
}else if(mutType == "indel"){
somaticCommand <- paste("cat ", VARName, " | java -jar ~/bin/SnpSift.jar filter \"( ( (exists DEL) & (GEN[", normalIndex,"].NV = 0)) | ( (exists INS) & (GEN[", normalIndex,"].NV = 0)) )\" > ", SOMAName, sep="")
system(command=somaticCommand)
}
# 4. filter by read depth (>9X for ANY samples), this is the final somatic variants vcf
depthCommand5 <- paste("cat ", SOMAName, " | java -jar ~/bin/SnpSift.jar filter \"( ( ", paste(totalIndexStrings, collapse=" ") ,"))\" > ", confTotalName, sep="")
system(command=depthCommand5)
#### germline files ####
# 4b. produce non-fitered germline file, this is the final germline vcf
if(mutType == "snv"){
depthCommand3 <- paste("cat ", VARName, " | java -jar ~/bin/SnpSift.jar filter \"( (exists SNP) & (GEN[", normalIndex,"].NV > 0) & (GEN[", normalIndex,"].NR > 9) )\" > ", germTotalName, sep="")
system(command=depthCommand3)
}else if(mutType == "indel"){
depthCommand3 <- paste("cat ", VARName, " | java -jar ~/bin/SnpSift.jar filter \"( ((exists DEL) & (GEN[", normalIndex,"].NV > 20) & (GEN[", normalIndex,"].NR > 49)) | ((exists INS) & (GEN[", normalIndex,"].NV > 20) & (GEN[", normalIndex,"].NR > 49)))\" > ", germTotalName, sep="")
system(command=depthCommand3)
}
#### vcf to text file conversion ####
extractCommand2 <- paste("java -jar ~/bin/SnpSift.jar extractFields ", confTotalName, " \"CHROM\" \"POS\" \"REF\" \"ALT\" ", paste(extractStringsNR, collapse=" "), " ", paste(extractStringsNV, collapse=" "), " > ", confTotalOutput, sep="")
system(command=extractCommand2)
extractCommand5 <- paste("java -jar ~/bin/SnpSift.jar extractFields ", germTotalName, " \"CHROM\" \"POS\" \"REF\" \"ALT\" ", paste(extractStringsNR, collapse=" "), " ", paste(extractStringsNV, collapse=" "), " > ", germTotalOutput, sep="")
system(command=extractCommand5)
#tidy up
system(command=paste("rm ", FILTName, sep=""))
system(command=paste("rm ", VARName, sep=""))
system(command=paste("rm ", SOMAName, sep=""))
}
#annotate somatic file with annoVar
makeAnnovar(subSample, paste(".", mutType, ".somatic.txt", sep=""), paste(".", mutType, ".somatic", sep=""), platDir)
makeAnnovar(subSample, ".germline.txt", ".germline", platDir)
system(command=paste("rm ", confTotalOutput, sep=""))
system(command=paste("rm ", germTotalOutput, sep=""))
#move annotated files to new directory
annoVcf <- paste(platDir, currSetId,"/", currSetId, ".", mutType, ".somatic.annoVar.variant_function.txt", sep="")
annoVcfNew <- paste(outAnno, currSetId,"/", currSetId, ".", mutType, ".somatic", ".annoVar.variant_function.txt", sep="")
annoVcfEx <- paste(platDir, currSetId,"/", currSetId, ".", mutType, ".somatic.annoVar.exonic_variant_function.txt", sep="")
annoVcfExNew <- paste(outAnno, currSetId,"/", currSetId, ".", mutType, ".somatic.annoVar.exonic_variant_function.txt", sep="")
annoSNPTotal <- paste(platDir, currSetId,"/", currSetId, ".germline.annoVar.variant_function.txt", sep="")
annoSNPTotalNew <- paste(outAnno, currSetId,"/", currSetId, ".germline.annoVar.variant_function.txt", sep="")
annoSNPTotalEx <- paste(platDir, currSetId,"/", currSetId, ".germline.annoVar.exonic_variant_function.txt", sep="")
annoSNPTotalExNew <- paste(outAnno, currSetId,"/", currSetId, ".germline.annoVar.exonic_variant_function.txt", sep="")
#rename files with .txt prepend
system(command=paste("mv", annoVcf, annoVcfNew))
system(command=paste("mv", annoVcfEx, annoVcfExNew))
system(command=paste("mv", annoSNPTotal, annoSNPTotalNew))
system(command=paste("mv", annoSNPTotalEx, annoSNPTotalExNew))
}
|
8565377b67b3719d9ebf7a8290d65175c109a356
|
9a2384f2062f1db180d7dfd2651c998b39e0dce9
|
/Rcode matrix to raster.R
|
0ef9e5c67686ddc7f440da6d5e0fa76d9a2001d2
|
[] |
no_license
|
wadehobbs/Matrix-to-Raster-for-spatial-stats-
|
8d0a44f798d76ba6f48340234e277140bbf293c9
|
13cd348b8f3a67d7c098c274d85086791b99670b
|
refs/heads/master
| 2020-03-07T08:37:06.346658
| 2018-08-21T08:17:49
| 2018-08-21T08:17:49
| 127,383,694
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,625
|
r
|
Rcode matrix to raster.R
|
#Creating a SpatialGridDataFrame in package sp
#Create the base grid
# - the smallest coordinates for each dimension, here: 0,0
# - cell size in each dimension, here: 1,1
# - number of cells in each dimension, here: 5,5
bbgrid = GridTopology(c(1,1), c(1,1), c(4,8))
#Import data - Data must be a data.frame (worked as a 1 column data.frame, did not work as a 4x8 data.frame)
test_data <- read_csv("~/Dropbox/PhD/Study 3/Spatial modelling/test_data.csv", col_names = FALSE)
#Create the grid data frame
spdf = SpatialGridDataFrame(bbgrid, test_data)
#Plot
spplot(spdf, sp.layout = c("sp.points", SpatialPoints(coordinates(spdf))))
#Create raster grid from raster package
# specify the RasterLayer with the following parameters:
# - minimum x coordinate (left border)
# - minimum y coordinate (bottom border)
# - maximum x coordinate (right border)
# - maximum y coordinate (top border)
# - resolution (cell size) in each dimension
rastergrid = raster(xmn=0.5, ymn=0.5, xmx=4.5, ymx=8.5, resolution=c(1,1))
rastergrid = setValues(rastergrid, test_data_vec)
#Plot raster
plot(rastergrid); points(coordinates(rastergrid), pch=3)
#Can also convert matrix to raster but this does not work as well - dims are off
rastergrid2 = raster(test_data)
#Calculating morans i for autocorrelation
Moran(rastergrid) #Single value that is the mean of each cell's autocor value
moran_test = MoranLocal(rastergrid) #Returns a rasterlayer of morans i values (values of the rasterlayer are the results of the autocor test)
matrix(values(moran_test), nrow = 8, ncol = 4, byrow = T) #Matrix of morans i values
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.