blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42d4723b73977caef4289b91536e4ac12eb9a33f | 4ba3e2c619ab8519449af96eb745e3ac2d4a3284 | /Erika's original script.R | 1a0dbcbc3b6ce8762669e9ecfe51bceaf6e78d79 | [] | no_license | Mesolith/Isotope-normalisation | 12f36cc9ef61a11aaac58945611034bcbb44ac40 | 21dd032b8353f66a63f31e49b7fd1b08fb80dabe | refs/heads/master | 2021-07-15T23:57:53.141337 | 2020-07-16T19:31:31 | 2020-07-16T19:31:31 | 189,245,495 | 0 | 0 | null | 2019-05-29T18:04:12 | 2019-05-29T14:49:06 | R | UTF-8 | R | false | false | 18,574 | r | Erika's original script.R |
############################################################################
############################################################################
## ##
## Erika's normalization calculation and data-quality checking ##
## ##
############################################################################
############################################################################
################################################
# MODIFY THIS SECTION #
################################################
# 1. Put your runfile's location in the quotation marks" Change nothing else in line 15.
#<- remove this comment if using lab basement computer -> setwd("C:/Users/Sercon_1/Desktop") # Place csv file on desktop
setwd("~/Dropbox Oxford/Dropbox/dropbox AGRICURB/Runfiles/160812")
name <- read.csv("160812.csv", header=F)[, c(1:3, 5, 6,9:11)]
runfile.id <- "160812"
# 2. Assign RM1 and RM2 the same character string as in your spreadsheet.
RM1.name <- "SEAL2" # <- the name of your RM1 exactly as it is in the spreadsheet
RM2.name <- "USGS40" # <- the name of your RM2 exactly as it is in the spreadsheet
# 3. For RM1 and RM2, give the true d15N values relative to AIR and stdev obtained from literature
RM1T.N <- 17.3 #RM1 d15N mean
RM1Tsd.N <- 0.29 #RM1 d15N standard deviation
RM2T.N <- -4.52 # RM2 d15N mean
RM2Tsd.N <- 0.06 #RM2 d15N standard deviation
# 4. For RM1 and RM2, give the true d13C values relative to VPDB and stdev obtained from literature
RM1T.C <- -13.3 # RM1 d13C mean
RM1Tsd.C <- 0.11 # RM1 d13C standard deviation
RM2T.C <- -26.39 # RM2 d13C mean
RM2Tsd.C <- 0.04 # RM2 d13C standard deviation
################################################
# Don't forget to change C RMs below #
################################################
#######################################################################################
################################################
# Formatting columns from raw data file #
################################################
rownumber <- (1:nrow(name))
name2 <- name[9:((rownumber[name[,1]=="Drift Corrected"])-2),]
name3 <- name[((rownumber[name[,1]=="Drift Corrected"])+4):nrow(name),]
strReverse <- function(x)
sapply(lapply(strsplit(x, NULL), rev), paste, collapse="")
runname <- runfile.id
data <- data.frame(name2, name3[,4:8], rep(runname, nrow(name2)), rep(0, nrow(name2)),rep(0, nrow(name2)),rep(0, nrow(name2)))
names(data) <- c("Ps", "ID", "Wt", "NugR", "d15NR", "CugR", "d13CR", "d18OR", "Nugdc", "d15Ndc", "Cugdc", "d13Cdc", "d18Odc", "Runfile", "pcC", "pcN", "CN")
#make numeric things numeric
for (i in c(1, 3:13, 15:17)){data[,i] <- as.numeric(as.character(data[,i]))}
#Add pcC, pcN and CN ratio #NB these are based on drift corrected values
data$pcC <- data$Cugdc/data$Wt/10
data$pcN <- data$Nugdc/data$Wt/10
data$CN <- data$Cugdc/data$Nugdc*14/12
data <- data[data$Ps!=7 & data$Ps!=38 & data$Ps!=55, ]
########################################################################################
# Create the archive copy
########################################################################################
archive <- data.frame(samplename=data$ID, sampleweight = data$Wt, RS="S", owner="AS", PreparedBy ="AS", Funding="Bogaard ERC", sampletype=0, Taxa="", site="", age.context=0, ugN=data$Nugdc, d15N=data$d15Ndc, ugC=data$Cugdc, d13C=data$d13Cdc, CN=data$CN, labno = paste(runfile.id, data$Ps, sep="-"))
list.of.salanines <- data$Ps[archive$samplename=="SALANINE"]
first.s <- list.of.salanines[1]
first.r <- first.s-8
list.of.ralanines <- c(2, first.r, list.of.salanines)
archive$RS <- "S"
archive$RS[list.of.ralanines] <- "R"
archive$sampletype[archive$samplename=="ALANINE" |archive$samplename=="SALANINE" | archive$samplename==paste(RM1.name) | archive$samplename==paste(RM2.name)] <- "Standard"
archive$sampletype[archive$samplename!="ALANINE" & archive$samplename!="SALANINE" & archive$samplename!=paste(RM1.name) & archive$samplename!=paste(RM2.name)] <- "Plant"
archive$age.context[archive$samplename=="ALANINE" |archive$samplename=="SALANINE" | archive$samplename==paste(RM1.name) | archive$samplename==paste(RM2.name)] <- "Modern"
archive$age.context[archive$samplename!="ALANINE" & archive$samplename!="SALANINE" & archive$samplename!=paste(RM1.name) & archive$samplename!=paste(RM2.name)] <- "Bronze Age"
names(archive) <- c("sample name", "sample weight", "R/S", "Owner", "Prepared by", "Funding", "Sample type", "Taxa", "Site", "age/context", "ug N", "d15N AIR", "ug C", "d13C VPDB", "C/N molar", "Lab number")
run.id <- matrix(nrow=2, ncol=16)
run.id[1, 1:2] <- c("Run number", paste(runfile.id))
run.id[2, 1:2] <- c("User", "Amy Styring")
run.id[1:2, 3:16] <- ""
run.id <- data.frame(run.id)
names(run.id) <- names(archive)
archive <- rbind(archive, run.id)
write.csv(archive, paste("archive", runfile.id, "csv", sep="."))
data2 <- read.csv(paste("archive", runfile.id, "csv", sep="."), header=F)
bottom <- nrow(data2)
run.id <- data2[(bottom-1):bottom, ]
just.data <- data2[1:(bottom-2), ]
final.archive <- rbind(run.id[2:17], just.data[2:17])
write.csv(final.archive, paste("final.archive", runfile.id, "csv", sep="."), row.names=FALSE)
################################################
# Calculate normalized N data #
################################################
## Step 1 : Pre-calculate all of the means and stdevs of the standards
normd15N <- rep(0, length(data$d15Ndc))
d15Nsd <- rep(0, length(data$d15Ndc))
data <- data.frame(data, normd15N, d15Nsd)
RM1M <- mean(data$d15Ndc[data$ID==paste(RM1.name)])
RM2M <- mean(data$d15Ndc[data$ID==paste(RM2.name)])
RM1Msd <- sd(data$d15Ndc[data$ID==paste(RM1.name)])
RM2Msd <- sd(data$d15Ndc[data$ID==paste(RM2.name)])
alaninesd <- sd(data$d15Ndc[data$ID=="SALANINE" & data$Ps > 10])
mean.alanine <- mean(data$d15Ndc[data$ID=="SALANINE" & data$Ps > 10])
## Step 2: Normalize data and calculate uncertainties
## This is based on Kragten's spreadsheet
for (i in 1:nrow(data)){
x1 <- data$d15Ndc[i]
measuredcolumn <- c(RM1T.N, RM1M, RM2T.N, RM2M, x1)
matrix1 <- cbind(c(RM1Tsd.N, 0, 0, 0, 0),
c(0, RM1Msd, 0, 0, 0),
c(0, 0, RM2Tsd.N, 0, 0),
c(0, 0, 0,RM2Msd, 0),
c(0,0,0,0,alaninesd))
matrix2 <- rbind(rep(RM1T.N, 5), rep(RM1M, 5), rep(RM2T.N, 5), rep(RM2M, 5), rep(x1, 5))
matrix3 <- matrix1 + matrix2
raw2true <- function(cv) {cv[1] + (cv[5]-cv[2])*((cv[1]-cv[3])/(cv[2]-cv[4]))}
dsqr <- function(colno){(raw2true(matrix3[,colno])-raw2true(measuredcolumn))^2}
normalizedd15N <- raw2true(measuredcolumn)
finalerror <- sqrt(dsqr(1)+dsqr(2)+dsqr(3)+dsqr(4)+dsqr(5))
data$d15Nsd[i]<- finalerror
data$normd15N[i] <- normalizedd15N
}
################################################
# Calculate normalized C data #
################################################
## Step 1 : Pre-calculate all of the means and stdevs of the standards
normd13C <- rep(0, length(data$d13Cdc))
d13Csd <- rep(0, length(data$d13Cdc))
data <- data.frame(data, normd13C, d13Csd)
RM1M <- mean(data$d13Cdc[data$ID==paste(RM1.name)]) # Make sure to select the correct RM!
RM2M <- mean(data$d13Cdc[data$ID==paste(RM2.name)]) # Make sure to select the correct RM!
RM1Msd <- sd(data$d13Cdc[data$ID==paste(RM1.name)])
RM2Msd <- sd(data$d13Cdc[data$ID==paste(RM2.name)])
alaninesd <- sd(data$d13Cdc[data$ID=="SALANINE" & data$Ps > 10])
mean.alanine <- mean(data$d13Cdc[data$ID=="SALANINE" & data$Ps > 10])
## Step 2: Normalize data and calculate uncertainties
## This is based on Kragten's spreadsheet
for (i in 1:nrow(data)){
x1 <- data$d13Cdc[i]
measuredcolumn <- c(RM1T.C, RM1M, RM2T.C, RM2M, x1)
matrix1 <- cbind(c(RM1Tsd.C, 0, 0, 0, 0),
c(0, RM1Msd, 0, 0, 0),
c(0, 0, RM2Tsd.C, 0, 0),
c(0, 0, 0,RM2Msd, 0),
c(0,0,0,0,alaninesd))
matrix2 <- rbind(rep(RM1T.C, 5), rep(RM1M, 5), rep(RM2T.C, 5), rep(RM2M, 5), rep(x1, 5))
matrix3 <- matrix1 + matrix2
raw2true <- function(cv) {cv[1] + (cv[5]-cv[2])*((cv[1]-cv[3])/(cv[2]-cv[4]))}
dsqr <- function(colno){(raw2true(matrix3[,colno])-raw2true(measuredcolumn))^2}
normalizedd13C <- raw2true(measuredcolumn)
finalerror <- sqrt(dsqr(1)+dsqr(2)+dsqr(3)+dsqr(4)+dsqr(5))
data$d13Csd[i]<- finalerror
data$normd13C[i] <- normalizedd13C
}
RM1M.C <- RM1M
RM2M.C <- RM2M
RM1Msd.C <- RM1Msd
RM2Msd.C <- RM2Msd
####################################################################
# Now you have normalized data! BUT CAN YOU ACCEPT IT? #
####################################################################
write.csv(data, "Alldata.csv")
#######################################################################################
#########################################################################
#########################################################################
## Diagnostic plots <- CHECK THESE BEFORE ACCEPTING DATA ##
#########################################################################
#########################################################################
####################################################################
# How good were the standards today? #
####################################################################
RM1 <- data[data$ID==paste(RM1.name),]
RM2 <- data[data$ID==paste(RM2.name),]
alanine <- data[data$ID=="SALANINE" & data$Ps > 10,]
samples <- data[data$ID != paste(RM1.name) & data$ID!= paste(RM2.name) & data$ID!= "SALANINE" & data$ID!= "ALANINE",]
d13C <- expression(paste(delta^{13},"C (\u2030) drift corrected"))
d15N <- expression(paste(delta^{15},"N (\u2030) drift corrected"))
###### For Carbon
par(mfrow=c(1,3))
par(mar=c(4,5,5,1))
boxplot(RM1$d13Cdc)
par(new=T)
plot(rep(1, length(RM1$d13Cdc)), RM1$d13Cdc, axes=F, xlab=paste(RM1.name, "RM1"), ylab=d13C)
boxplot(RM2$d13Cdc)
par(new=T)
plot(rep(1, length(RM2$d13Cdc)), RM2$d13Cdc, axes=F, xlab=paste(RM2.name, "RM2"), ylab=d13C)
mtext("Drift corrected d13C for Two Reference Materials and S-Alanine", line=3)
mtext("Look at the spread of the measured values of References Materials and Alanines. What is the spread in permil?", cex=0.7, line=2)
mtext("Are the measured deltas of the alanines centred (randomly distributed) around their expected mean, in red?", cex=0.7, line=1)
mtext("Or is there a bias in the measurement? Are any of the alanines wrong?", cex=0.7, line=0)
boxplot(alanine$d13Cdc)
abline(h=-27.11000, col="red")
par(new=T)
plot(rep(1, length(alanine$d13Cdc)), alanine$d13Cdc, axes=F, xlab="S-Alanines", ylab=d13C)
dev.copy2pdf(file="plot1.pdf", encoding="WinAnsi")
###### For Nitrogen
###Assign NITROGEN RMs here, carbon RMs below
par(mfrow=c(1,3))
par(mar=c(4,5,5,1))
boxplot(RM1$d15Ndc)
par(new=T)
plot(rep(1, length(RM1$d15Ndc)), RM1$d15Ndc, axes=F, xlab=paste(RM1.name, "RM1"), ylab=d15N)
boxplot(RM2$d15Ndc)
mtext("Drift corrected d15N for Two Reference Materials and S-Alanine", line=3)
mtext("Look at the spread of the measured values of References Materials and Alanines. What is the spread in permil?", cex=0.7, line=2)
mtext("Are the measured deltas of the alanines centred (randomly distributed) around their expected mean, in red?", cex=0.7, line=1)
mtext("Or is there a bias in the measurement? Are any of the alanines wrong?", cex=0.7, line=0)
par(new=T)
plot(rep(1, length(RM2$d15Ndc)), RM2$d15Ndc, axes=F, xlab=paste(RM2.name, "RM2"), ylab=d15N)
boxplot(alanine$d15Ndc)
abline(h=-1.56, col="red")
par(new=T)
plot(rep(1, length(alanine$d15Ndc)), alanine$d15Ndc, axes=F, xlab="S-Alanines", ylab=d15N)
legend("topright", "true alanine = -1.63", lty=1, col="red")
dev.copy2pdf(file="plot2.pdf", encoding="WinAnsi")
#ID position of all dots
####################################################################
# What effect did drift correction have? #
####################################################################
par(mfrow=c(2,2))
xtext <- "Position in Run"
d15N <- expression(paste("Normalized ", delta^{15},"N (\u2030)"))
d13C <- expression(paste("Normalized ", delta^{13},"C (\u2030)"))
plot(data$Ps, data$normd13C, xlab=xtext, ylab=d13C, main="Change in d13C through run?")
plot(data$Ps, data$normd15N, xlab=xtext, ylab=d15N, main="Change in d15N through run?" )
d15N <- expression(paste("Raw - drift-corrected ", Delta^{15},"N (\u2030)"))
d13C <- expression(paste("Raw - drift-corrected ", Delta^{13},"C (\u2030)"))
plot(data$Ps, data$d13Cdc-data$d13CR, ylab=d13C, xlab=xtext, main="Effect of drift correction vs position")
plot(data$Ps, data$d15Ndc-data$d15NR, ylab=d15N, xlab=xtext, main="Effect of drift correction vs position")
dev.copy2pdf(file="plot3.pdf", encoding="WinAnsi")
####################################################################
# What effect did normalization have? #
####################################################################
### For carbon
measuredC <- c(mean(RM1$d13Cdc), mean(RM2$d13Cdc))
trueC <- c(RM1T.C, RM2T.C)
trueCsd <- c(RM1Tsd.C, RM2Tsd.C)
par(mfrow=c(1,1))
plot(measuredC, trueC, type="n", ylab=expression(paste("True ", delta^{13},"C (\u2030)")),
xlab=expression(paste("Measured ", delta^{13},"C (\u2030)")),
ylim=c(min(trueC-1), max(trueC+1)),
xlim=c(min(measuredC-1), max(measuredC+1)))
measuredCsd <- c(sd(RM1$d13Cdc), sd(RM2$d13Cdc))
arrows(measuredC-measuredCsd, trueC, measuredC+measuredCsd, trueC, angle=90, code=3, length=0.01)
arrows(measuredC, trueC-trueCsd, measuredC, trueC+trueCsd, angle=90, code=3, length=0.01)
text(measuredC, trueC, c(paste(RM1.name), paste(RM2.name)), cex=0.6, adj=c(0,1))
slope <- abs(trueC[1]-trueC[2])/abs(measuredC[1]-measuredC[2])
intercept <- RM1T.C-(mean(RM1$d13Cdc)*slope)
abline(a=intercept, b=slope, col="red")
legend("topleft", paste("slope =", round(slope,4), ", intercept =", round(intercept,4)), col="red", lty=1)
par(new=T)
plot(samples$d13Cdc, samples$normd13C, axes=F, ylab="", xlab="", ylim=c(min(trueC-1), max(trueC+1)),
xlim=c(min(measuredC-1), max(measuredC+1)))
mtext("Measured versus True (normalized) d13C", line=3)
mtext("How much effect does the normalization regression have? How different is the slope from 1,", line=2, cex=0.7)
mtext("how different is the intercept from zero? Are the error bars for the two reference materials as small", line=1, cex=0.7)
mtext("as they should be? Do your samples lie on the regression line between the two reference materials?", line=0, cex=0.7)
dev.copy2pdf(file="plot4.pdf", encoding="WinAnsi")
### For nitrogen
measuredN <- c(mean(RM1$d15Ndc), mean(RM2$d15Ndc))
trueN <- c(RM1T.N, RM2T.N)
trueNsd <- c(RM1Tsd.N, RM2Tsd.N)
par(mfrow=c(1,1))
plot(measuredN, trueN, type="n", ylab=expression(paste("True ", delta^{15},"N (\u2030)")),
xlab=expression(paste("Measured ", delta^{15},"N (\u2030)")),
ylim=c(min(trueN-1), max(trueN+1)),
xlim=c(min(measuredN-1), max(measuredN+1)))
measuredNsd <- c(sd(RM1$d15Ndc), sd(RM2$d15Ndc))
arrows(measuredN-measuredNsd, trueN, measuredN+measuredNsd, trueN, angle=90, code=3, length=0.01)
arrows(measuredN, trueN-trueNsd, measuredN, trueN+trueNsd, angle=90, code=3, length=0.01)
text(measuredN, trueN, c(paste(RM1.name), paste(RM2.name)), cex=0.6, adj=c(0,1))
slope <- abs(trueN[1]-trueN[2])/abs(measuredN[1]-measuredN[2])
intercept <- RM1T.N-(mean(RM1$d15Ndc)*slope)
abline(a=intercept, b=slope, col="red")
legend("topleft", paste("slope =", round(slope,4), ", intercept =", round(intercept,4)), col="red", lty=1)
par(new=T)
plot(samples$d15Ndc, samples$normd15N, axes=F, ylab="", xlab="", ylim=c(min(trueN-1), max(trueN+1)),
xlim=c(min(measuredN-1), max(measuredN+1)))
mtext("Measured versus True (normalized) d15N", line=3)
mtext("How much effect does the normalization regression have? How different is the slope from 1,", line=2, cex=0.7)
mtext("how different is the intercept from zero? Are the error bars for the two reference materials as small", line=1, cex=0.7)
mtext("as they should be? Do your samples lie on the regression line between the two reference materials?", line=0, cex=0.7)
dev.copy2pdf(file="plot5.pdf", encoding="WinAnsi")
####################################################################
# How are the C/N ratios and mass of C and N? #
####################################################################
par(mfrow=c(2,2))
par(mar=c(4,5,3,1))
CNmin <- if (min(samples$CN) < 2.9) {min(samples$CN)} else { 2.8}
CNmax <- if (max(samples$CN) > 3.6) {max(samples$CN)} else { 3.7}
d15N <- expression(paste("Normalized ", delta^{15},"N (\u2030)"))
d13C <- expression(paste("Normalized ", delta^{13},"C (\u2030)"))
plot(samples$CN, samples$normd13C, xlim=c(CNmin, CNmax), xlab="C/N ratio", ylab=d13C)
abline(v=2.9, col="red")
abline(v=3.6, col="red")
plot(samples$CN, samples$normd15N, xlim=c(CNmin, CNmax), xlab="C/N ratio", ylab=d15N)
abline(v=2.9, col="red")
abline(v=3.6, col="red")
xtext <- expression(paste("Sample weight (", mu,"g)"))
plot(samples$CugR, samples$normd13C, ylab=d13C, xlab=xtext)
plot(samples$NugR, samples$normd15N, ylab=d15N, xlab=xtext)
par(mfrow=c(1,1))
mtext("Do the C/N ratios fall within a good range?", cex=0.7, line=2)
mtext("Within the good range, is there any possibility of a trend between C/N and d13C or d15N?", cex=0.7, line=1)
par(mfrow=c(2,1))
mtext("Similarly, was the weight of C and N measured high enough?", cex=0.7, line=2)
mtext("Is there any biasing effect of weight on isotopic ratio, even for the OK samples?", cex=0.7, line=1)
dev.copy2pdf(file="plot6.pdf", onefile=T, encoding="WinAnsi")
#########################################################################
#########################################################################
## Remove BAD data ##
#########################################################################
#########################################################################
# For example, but you can use your own numbers based on what the plots have shown
#samples <- samples[samples$CugR < 300,]
#samples <- samples[samples$NugR > 60,]
#samples <- samples[samples$CN > 2.9 & samples$CN < 3.6, ]
#########################################################################
#########################################################################
## Export the accepted samples ##
#########################################################################
#########################################################################
write.csv(samples, "Samples.160812.csv")
|
04e09a02f85d0d72b809f304ec3ecd069190ee09 | 42b8649de1e49bb83cd641341f57f3a8ba89247a | /man/kml_polygons.Rd | 5406f346fe64f594093a5d0e47e6d9fb94ed817c | [] | no_license | briatte/tidykml | 8f8bbae5f10480b5435e8994568edae3287173fb | 30848d45491e416ad2bcfa0b88ac204091bece8c | refs/heads/master | 2021-01-12T04:39:33.736788 | 2017-01-01T16:33:27 | 2017-01-01T16:33:27 | 77,697,414 | 10 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,923 | rd | kml_polygons.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kml_polygons.R
\name{kml_polygons}
\alias{kml_polygons}
\title{Read Polygons out of a KML file.}
\usage{
kml_polygons(x, ns = "d1", verbose = TRUE, fuse = FALSE, ...)
}
\arguments{
\item{x}{A KML source. See \link{kml_read}.}
\item{ns}{The name of the namespace to extract from: defaults to \code{"d1"}.}
\item{verbose}{Whether to report invalid coordinates and/or altitudes below
sea level; defaults to \code{TRUE}. See \link{kml_coords}.}
\item{fuse}{Whether to fuse multi-polygons into a single element; defaults
to \code{FALSE}. Experimental. Might not return nice things.}
\item{...}{Arguments passed to \link[xml2:read_xml]{read_xml}.
See \link{kml_read}.}
}
\value{
A \link[tibble:tibble]{tibble} containing the \code{folder} (layer),
\code{name}, \code{description}, \code{styleUrl} and geographic coordinates
(\code{longitude}, \code{latitude} and \code{altitude}) of the \emph{first}
Polygon contained within each Placemark element of the KML source.
Other Placemark elements will be ignored.
If there are no Polygons in the KML source, the function returns \code{NULL}.
If there are no Folders in the KML source, the \code{folder} variable will be
filled with \code{NA}.
}
\description{
Read Polygons out of a KML file.
}
\note{
The function only extracts the outer bounds of Polygon elements, and it
only extracts the \strong{first} Polygon out of each Placemark element. As a
result, multi-polygons built into <MultiGeometry> elements are \emph{not}
fully supported: only the first Polygon will be present in the results.
}
\examples{
# demo data: U.S. Civil War map
# see ?states for details
f <- system.file("extdata", "states.kml.zip", package = "tidykml")
kml_polygons(f)
}
\references{
Google Developers. KML Reference: <Polygon> Element.
\url{https://developers.google.com/kml/documentation/kmlreference#polygon}
}
|
b28f8772ba6b87590580040724650cb0e024af13 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/3676_1/rinput.R | bbe4fbea596b75712eccf198a0d89c03ef8616f8 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("3676_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3676_1_unrooted.txt") |
7829df77f71bf76091c0dc83b4a0c9a172e8e27b | ff9eb712be2af2fa24b28ecc75341b741d5e0b01 | /R/epoisSinglyCensored.half.cen.level.R | c4f0f33b26c934fb982f90535299405bea6f25f0 | [] | no_license | alexkowa/EnvStats | 715c35c196832480ee304af1034ce286e40e46c2 | 166e5445d252aa77e50b2b0316f79dee6d070d14 | refs/heads/master | 2023-06-26T19:27:24.446592 | 2023-06-14T05:48:07 | 2023-06-14T05:48:07 | 140,378,542 | 21 | 6 | null | 2023-05-10T10:27:08 | 2018-07-10T04:49:22 | R | UTF-8 | R | false | false | 829 | r | epoisSinglyCensored.half.cen.level.R | epoisSinglyCensored.half.cen.level <-
function (x, censored, censoring.side, ci, ci.method = "normal.approx",
ci.type, conf.level, ci.sample.size = sum(!censored), pivot.statistic = c("z",
"t"))
{
x[censored] <- x[censored]/2
lambda.hat <- c(lambda = mean(x))
if (ci) {
ci.method <- match.arg(ci.method)
pivot.statistic <- match.arg(pivot.statistic)
ci.obj <- ci.normal.approx(theta.hat = lambda.hat, sd.theta.hat = sqrt(lambda.hat/ci.sample.size),
n = ci.sample.size, df = ci.sample.size - 1, ci.type = ci.type,
alpha = 1 - conf.level, lb = 0, test.statistic = pivot.statistic)
ci.obj$parameter <- "lambda"
return(list(parameters = lambda.hat, ci.obj = ci.obj))
}
else return(list(parameters = lambda.hat))
}
|
1717e1f63e9dcfa3a01c246eafb07579408ee654 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i30-m15-u4-v0.pddl_planlen=21/dungeon_i30-m15-u4-v0.pddl_planlen=21.R | 2f02f8ba9434f954b48af3973cc6f893d9b14541 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | dungeon_i30-m15-u4-v0.pddl_planlen=21.R | f2d38ff3be3ebcc099ba42456d3fc43c dungeon_i30-m15-u4-v0.pddl_planlen=21.qdimacs 23070 321794 |
522545c381d7e17f8b2ae935bd36f41bf80bbe11 | b5bf502bbfe8291f3b46dcff4501e6d22c64cb6e | /R/R6_logit.R | 69ee147f5b7afdf7f9ce54bfef956f521c639f27 | [
"MIT"
] | permissive | SwReliab/msrat | f4ac1dd25a990a594d427c4138bd4fc0432f511a | e32046ff37b8595d50890ef86c36d869921df13b | refs/heads/master | 2023-06-23T11:44:26.640234 | 2021-07-26T19:27:15 | 2021-07-26T19:27:15 | 173,534,951 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,014 | r | R6_logit.R | #' Class for NHPP-based software reliability model with d-metrics
#'
#' @docType class
#' @name dGLM
#' @return Object of \code{\link{R6Class}} with methods for NHPP-based software reliability model with d-metrics.
#' @format \code{\link{R6Class}} object.
#' @field name A character string for the name of model.
#' @field params A numeric vector for the model parameters.
#' @field df An integer for the degrees of freedom of the model.
#' @field data Data to esimate parameters.
#'
#' @section Methods:
#' \describe{
#' \item{\code{print()}}{This method prints model parameters.}
#' \item{\code{omega()}}{This method returns the number of total faults.}
#' \item{\code{coefficients()}}{This method returns a vector for the coefficients.}
#' \item{\code{mvf(t, data = NULL)}}{This method returns the mean value function at time t.
#' The d-metrics is given from \code{data}. If \code{data} is NULL, the d-metrics for the estimation is used.}
#' \item{\code{dmvf(t, data = NULL)}}{This method returns the intensity function at time t.
#' The d-metrics is given from \code{data}. If \code{data} is NULL, the d-metrics for the estimation is used.}
#' \item{\code{residual(t, data = NULL)}}{This method returns the expected residual number of faults at time t.
#' The d-metrics is given from \code{data}. If \code{data} is NULL, the d-metrics for the estimation is used.}
#' \item{\code{ffp(t, data = NULL)}}{This method returns the fault-free probability at time t.
#' The d-metrics is given from \code{data}. If \code{data} is NULL, the d-metrics for the estimation is used.}
#' \item{\code{init_params(data)}}{This method changes the model parameters based on a given data.
#' This is used to set the initial value for the fitting algorithm.}
#' \item{\code{set_params(params)}}{This method sets the model parameters.}
#' \item{\code{set_data(data)}}{This method sets data.}
#' \item{\code{em(params, data)}}{This method returns a list with an updated parameter vector (param),
#' absolute difference of parameter vector (pdiff),
#' log-likelihood function for a given parameter vector (llf),
#' the number of total faults (total) via EM algorithm for a given data.}
#' \item{\code{llf(data)}}{This method returns the log-likelihood function for a given data.}
#' }
#' @seealso \code{\link{fit.srm.logit}}
NULL
#' @rdname dGLM
dGLM <- R6::R6Class("dGLM",
private = list(
linkfun = NA
),
public = list(
name = NA,
params = NA,
df = NA,
data = NA,
print = function(digits = max(3, getOption("digits") - 3), ...) {
print(self$data)
cat(gettextf("\nLink function: %s\n\n", private$linkfun))
print.default(format(self$params, digits = digits), print.gap = 2, quote = FALSE)
},
omega = function() { self$params[1L] },
coefficients = function() { self$params[2L:length(self$params)] },
mvf = function(t, data = NULL) {
if (is.null(data)) {
metrics <- self$data$metrics
offset <- self$data$offset
} else {
metrics <- data$metrics
offset <- data$offset
}
mname <- names(self$coefficients())
if (!all(mname %in% colnames(metrics))) {
warning("colnames(metrics) do not match to names(coefficients).")
metrics <- metrics[,1:length(mname)]
colnames(metrics) <- mname
}
else {
metrics <- metrics[,mname]
}
family <- binomial(link = private$linkfun)
result <- if (length(mname) == 1L) {
sapply(t, function(t0) {
eta <- metrics[1L:t0] * self$coefficients() + offset[1L:t0]
mu <- family$linkinv(eta)
self$omega() * (1 - prod(1-mu))
}
)
}
else {
sapply(t, function(t0) {
eta <- metrics[1L:t0,] %*% self$coefficients() + offset[1L:t0]
mu <- family$linkinv(eta)
self$omega() * (1 - prod(1-mu))
}
)
}
names(result) <- NULL
result
},
dmvf = function(t, data = NULL) {
result <- self$mvf(t, data)
c(result[1], diff(result))
},
residual = function(t, data = NULL) {
if (is.null(data)) {
metrics <- self$data$metrics
offset <- self$data$offset
} else {
metrics <- data$metrics
offset <- data$offset
}
mname <- names(self$coefficients())
if (!all(mname %in% colnames(metrics))) {
warning("colnames(metrics) do not match to names(coefficients).")
metrics <- metrics[,1:length(mname)]
colnames(metrics) <- mname
}
else {
metrics <- metrics[,mname]
}
family <- binomial(link = private$linkfun)
result <- if (length(mname) == 1L) {
sapply(t, function(t0) {
eta <- metrics[1:t0] * self$coefficients() + offset[1L:t0]
mu <- family$linkinv(eta)
self$omega() * prod(1-mu)
}
)
}
else {
sapply(t, function(t0) {
eta <- metrics[1:t0,] %*% self$coefficients() + offset[1L:t0]
mu <- family$linkinv(eta)
self$omega() * prod(1-mu)
}
)
}
names(result) <- NULL
result
},
ffp = function(t, data = NULL) { exp(-self$residual(t, data)) },
initialize = function(omega = 1, coefficients = c(1)) {
self$params <- c(omega, coefficients)
self$df <- length(self$params)
},
init_params = function(data) {
self$params <- numeric(1L + data$nmetrics)
self$params[1] <- data$total + 1.0
self$df <- length(self$params)
},
set_params = function(params) { self$params <- params },
set_data = function(data) { self$data <- data },
em = function(params, data, ...) {
omega <- params[1]
coefficients <- params[2L:length(params)]
family <- binomial(link = private$linkfun)
eta <- data$metrics %*% coefficients + data$offset
mu <- family$linkinv(eta)
residual <- omega * prod(1-mu)
total <- sum(data$fault) + residual
rfault <- total - cumsum(data$fault)
wopt <- getOption("warn")
options(warn = -1)
result <- glm.fit(data$metrics, cbind(data$fault, rfault),
family=binomial(link=private$linkfun), offset=data$offset, ...)
options(warn = wopt)
newparams <- c(total, result$coefficients)
names(newparams) <- c("omega", names(result$coefficients))
pdiff <- abs(params - newparams)
llf <- self$llf(data, omega=omega, mu=mu)
list(param=newparams, pdiff=pdiff, llf=llf, total=total)
},
llf = function(data, fault, omega, mu) {
if (missing(omega)) {
omega <- self$omega()
}
if (missing(mu)) {
family <- binomial(link = private$linkfun)
eta <- data$metrics %*% self$coefficients() + data$offset
mu <- family$linkinv(eta)
}
if (missing(fault)) {
fault <- data$fault
}
nonzeron <- fault != 0
rfault <- sum(fault) - cumsum(fault)
nonzeror <- rfault != 0
sum((fault * log(mu))[nonzeron]) + sum((rfault * log(1-mu))[nonzeror]) -
sum(lgamma(fault+1)) + sum(fault) * log(omega) - omega * (1 - prod(1-mu))
},
comp_error = function(res0, res1) {
sdiff <- res1$llf - res0$llf
aerror <- abs(res1$llf - res0$llf)
rerror <- aerror / abs(res0$llf)
c(aerror, rerror, sdiff)
}
)
)
#' @rdname dGLM
#' @export
dGLM.logit <- R6::R6Class("dGLM.logit",
inherit = dGLM,
private = list(
linkfun = "logit"
),
public = list(
name = "dGLM.logit"
)
)
#' @rdname dGLM
#' @export
dGLM.probit <- R6::R6Class("dGLM.probit",
inherit = dGLM,
private = list(
linkfun = "probit"
),
public = list(
name = "dGLM.probit"
)
)
#' @rdname dGLM
#' @export
dGLM.cloglog <- R6::R6Class("dGLM.cloglog",
inherit = dGLM,
private = list(
linkfun = "cloglog"
),
public = list(
name = "dGLM.cloglog"
)
)
|
a505b0e7363575d7eb8ca3abec48075b5f2f651c | fe6ccdbfc41f2a820d0f911289e1ff5d15472e7d | /man/BatchGetValuesByDataFilterRequest.Rd | adefaa31863e118fa2bb7d79481a60e5ee2e0205 | [] | no_license | bradgwest/googleSheetsR | b5b7cd4c3a34fceeecb25894398fe87148f87cea | 48de991ee571f11b02f2254d3456551bfdbcd76f | refs/heads/master | 2020-03-27T11:21:13.423181 | 2018-10-12T19:52:59 | 2018-10-12T19:52:59 | 146,481,101 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,048 | rd | BatchGetValuesByDataFilterRequest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sheets_objects.R
\name{BatchGetValuesByDataFilterRequest}
\alias{BatchGetValuesByDataFilterRequest}
\title{BatchGetValuesByDataFilterRequest Object}
\usage{
BatchGetValuesByDataFilterRequest(valueRenderOption = NULL,
dateTimeRenderOption = NULL, majorDimension = NULL,
dataFilters = NULL)
}
\arguments{
\item{valueRenderOption}{How values should be represented in the output}
\item{dateTimeRenderOption}{How dates, times, and durations should be represented in the output}
\item{majorDimension}{The major dimension that results should use}
\item{dataFilters}{The data filters used to match the ranges of values to retrieve}
}
\value{
BatchGetValuesByDataFilterRequest object
}
\description{
BatchGetValuesByDataFilterRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The request for retrieving a range of values in a spreadsheet selected by aset of DataFilters.
}
\concept{BatchGetValuesByDataFilterRequest functions}
|
3902496d19a5029bede0d36dad69cd979ad3a5a8 | 3593c9cb4b361d70714eadc90315f8f075cfa812 | /man/TF_michalewicz.Rd | 80e7069018eaf4e1ceef16fc6a3891edb308841a | [] | no_license | cran/TestFunctions | a1cc4208f1001aa8dba3d3fb90c72020239a4de4 | d229e3d5e6e8eda400c11bc1c253151d91797679 | refs/heads/master | 2021-01-18T19:51:20.295226 | 2017-05-09T20:25:06 | 2017-05-09T20:25:06 | 69,298,855 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 569 | rd | TF_michalewicz.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions1.R
\name{TF_michalewicz}
\alias{TF_michalewicz}
\title{TF_michalewicz: Michalewicz function for evaluating a single point.}
\usage{
TF_michalewicz(x, m = 10)
}
\arguments{
\item{x}{Input vector at which to evaluate.}
\item{m}{Parameter for the michalewicz function}
}
\value{
Function output evaluated at x.
}
\description{
TF_michalewicz: Michalewicz function for evaluating a single point.
}
\examples{
TF_michalewicz(rep(0,2))
TF_michalewicz(rep(1,2))
}
|
750f32328299935742d485288368f71a62d6a294 | f24c2dc660f68e12fab7c95dce10d8ee70c5dc48 | /lib/libraries.r | 2f14a4a603dba82d2dde54c0e31aedbdd17dd0f6 | [
"MIT"
] | permissive | jaanos/APPR-2015-16 | 519923ffdf9e952f075040520533025703d93dad | ce05236134240946b89bcec641fed7675d41be2e | refs/heads/master | 2020-06-05T13:04:24.490912 | 2015-12-02T18:16:37 | 2015-12-02T18:16:37 | 39,377,827 | 1 | 43 | null | 2015-12-22T22:48:03 | 2015-07-20T10:31:47 | R | UTF-8 | R | false | false | 205 | r | libraries.r | library(knitr)
# Uvozimo funkcije za delo z datotekami XML.
source("lib/xml.r", encoding = "UTF-8")
# Uvozimo funkcije za pobiranje in uvoz zemljevida.
source("lib/uvozi.zemljevid.r", encoding = "UTF-8") |
03371d557c5a7df66f985acacbf43be43cf849fd | cd19072e9ddc5cd42ea6647a6aea156db8705406 | /afl_premier_gdp_plots.R | c38aa541fdce1c7a9eb8508669b7901078dd9c20 | [] | no_license | zerogetsamgow/afl_economic_stimulus | 0b6c40b6f27f8c782532ddf01e3b26791b0f3df0 | 9b58d383cb2e01b4a01e5b9d28d5303926413a4c | refs/heads/master | 2021-09-22T22:43:14.269814 | 2018-09-18T11:25:49 | 2018-09-18T11:25:49 | 103,822,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,346 | r | afl_premier_gdp_plots.R | library(magrittr)
library(dplyr)
library(ggplot2)
## Get grand final data and gdp growth data
## source('~/afl_economic_stimulus/get_grand_finals.R')
## source('~/afl_economic_stimulus/get_gdp_data.R')
## Set treasury colours
source('~/afl_economic_stimulus/set_treasury_colours.R')
## Join selected variables from AFL data frame with GDP data frame and create summary stats
premier_gdp_df <- grand_finals_df %>%
select(premier, season, preliminary_finalist) %>%
filter(!season==2017) %>%
inner_join(., abs_gdp_df, by=c("season"="season")) %>% ##use inner join as we add in teams without premierships since 1960 below
group_by(premier) %>%
## mutate rather than summarise so we can use original variable in plots
dplyr::mutate(mean_gdp_cvm_sa_growth=mean(gdp_cvm_sa_growth),
max_gdp_cvm_sa_growth=max(gdp_cvm_sa_growth),
min_gdp_cvm_sa_growth=min(gdp_cvm_sa_growth),
premierships=n(),
premiership_years=list(season)) %>%
ungroup()
## Coerce means for teams without premierships to minus five to put at bottom of order and out of chart limits
non_premiers <- data.frame(premier=c("Greater Western Sydney", "Fremantle", "Gold Coast Suns"),
season=c(rep(2018,3)),
gdp_cvm_sa_growth=c(rep(-5,3)), ## these numbers ensure points are outside chart range
mean_gdp_cvm_sa_growth=c(rep(-5,3)),
preliminary_finalist=c(FALSE,FALSE,FALSE)) %>%
mutate(premier=factor(premier,levels=all_teams))
## Add non-premiers and Reorder data frame by mean gdp then preliminary_finalist
premier_gdp_df <- premier_gdp_df %>%
full_join(., non_premiers, by=c("premier"="premier","season"="season","gdp_cvm_sa_growth"="gdp_cvm_sa_growth","mean_gdp_cvm_sa_growth"="mean_gdp_cvm_sa_growth","preliminary_finalist"="preliminary_finalist")) %>%
dplyr::mutate(premier=reorder(reorder(premier, mean_gdp_cvm_sa_growth), preliminary_finalist))
## Define a theme for the chart
theme_premier_gdp <- theme(text = element_text(size=12),
plot.title = element_text(size=12),
plot.subtitle = element_text(size=10),
axis.title.y = element_blank(),
axis.title.x = element_text(size=10, vjust = -1),
axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
legend.position = "bottom",
panel.background = element_blank(),
panel.grid.major.x = element_line())
## Plot a chart of teams versus gdp growth
gdp_by_team_point <- premier_gdp_df %>%
ggplot(aes(y=premier, x=gdp_cvm_sa_growth)) +
geom_point(aes(colour="A", fill="A"), size=4) +
geom_point(aes(x=mean_gdp_cvm_sa_growth, colour="B", fill="B"), size=4) +
## include next row for crude labels.
## geom_text(size=2.5, position="dodge",check_overlap=TRUE, angle=90, colour="black", hjust=1.8, aes(label=substr(season,3,4)))+
scale_colour_manual(values=c("A"=corp_blue_lt, "B"=corp_green_lt), labels=c("Individual premiership years","Mean growth")) +
scale_fill_manual(values=c("A"=corp_blue_lt, "B"=corp_green_lt), guide=FALSE) +
scale_y_discrete(drop=FALSE)+
scale_x_continuous(expand=c(0,0), limits=c(-3,10), breaks=c(-2.5,0,2.5,5,7.5)) +
labs(title="AFL Premiers and economic growth, 1960 to 2016",
subtitle="The Australian economy, on average, has grown faster after Richmond premierships\nthan following premierships of any other 2018 preliminary finalist.",
x="Growth in GDP (chain volume measure), per cent",
colour="") +
theme_premier_gdp
## Save the chart
ggsave("gdp_by_team_point.png",plot=gdp_by_team_point, path="./afl_economic_stimulus/", units = "mm", width = 180, height = 170)
## Create point range chart with ggplot
gdp_by_team_pointrange <- premier_gdp_df %>%
ggplot(aes(x=premier, group=premier, y=gdp_cvm_sa_growth, ymin=min_gdp_cvm_sa_growth, ymax=max_gdp_cvm_sa_growth, colour=preliminary_finalist)) +
geom_pointrange(size=4, fatten=.7) +
coord_flip() +
scale_colour_manual(values=tsy_corp_pal_lt, labels=c("Individual premiership years","Mean growth"), guide=FALSE) +
scale_fill_manual(values=tsy_corp_pal_lt, guide=FALSE) +
geom_point(aes(y=mean_gdp_cvm_sa_growth) , colour=corp_orange_lt, size=3) +
scale_y_continuous(expand=c(0,0), limits=c(-3,10), breaks=c(-2.5,0,2.5,5,7.5)) +
labs(title="AFL Premiers and economic growth, 1960 to 2016",
subtitle="The Australian economy, on average, has grown faster after Adelaide premierships\nthan following premierships of any other 2018 preliminary finalist.",
y="Growth in GDP (chain volume measure), per cent",
colour="") +
theme_premier_gdp
## Save the chart
ggsave("gdp_by_team_pointrange.png",plot=gdp_by_team_pointrange, path="./afl_economic_stimulus/", units = "mm", width = 180, height = 170)
|
f6d813e41c3bd297583503ee5932fcbef96b3f89 | 760c13c8f8b0726f18cc3a0c6966a207773d6dea | /Developing Data Products/census-app/server.R | 79cfc1589394605c8cf44bd879dcd37377b80999 | [] | no_license | hijiangtao/R-Program-Series | 21293f3868e3dbd2e67d37b74f2183d1c7577e10 | c24d5c72a7e8c99f7f0167877c5fe3caa321ca83 | refs/heads/master | 2016-09-16T10:53:31.431485 | 2014-11-23T02:54:03 | 2014-11-23T02:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 576 | r | server.R | source("helpers.R")
counties <- readRDS("data/counties.rds")
library(maps)
library(mapproj)
shinyServer(
function(input,output) {
output$map <- renderPlot({
data<-switch(input$var,
"Percent White"=counties$white,
"Percent Black"=counties$black,
"Percent Hispanic"=counties$hispanic,
"Percent Asian"=counties$asian)
percent_map(var = data, color = "darkgreen", legend.title = paste("% ", input$var), max = input$range[2], min = input$range[1])
})
}
) |
8c7f8dd220dd5d96e21b08e5eb19229df951a5d4 | af1e02963f52a039fe6d4fa7d89936c40f84059f | /man/RevNetJacobian.Rd | 98e07fbb5f830c43f2fa4688c88a6c8994d1c558 | [] | no_license | cran/MetStaT | 0007fdeb38c91385ba59ec199c1a4bfc68f7bfcf | a2a631cfab82927ba4dad74ae1391bf3d17b9872 | refs/heads/master | 2020-04-02T15:00:32.094244 | 2012-12-10T00:00:00 | 2012-12-10T00:00:00 | 17,680,903 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,011 | rd | RevNetJacobian.Rd | \name{RevNetJacobian}
\alias{RevNetJacobian}
\docType{data}
\title{
Example data for RevNetJacobianMethod
}
\description{
The data provide an example for the Jacobian method to reverse engineer a metabolomic network. The matrix dataset contains measured metabolite concentrations at different times, the list labels the names of the metabolites, \code{delta.t} is the time between measurements.
}
\usage{data(RevNetJacobian)}
\format{
The format is:\cr
RevNetJacobian, numerical matrix with dim(dataset) = 51 4 4.\cr
first dimension: time \cr
second dimension: metabolites\cr
third dimension: experiments\cr
SteadyState, numerical matrix with steady state concentrations\cr
}
\details{
The dataset contains 4 experiments with the following initial conditions:\cr
experiment 1, metabolite 1 is 2\% increased from steady state\cr
experiment 2, metabolite 2 is 2\% increased from steady state\cr
experiment 3, metabolite 3 is 2\% increased from steady state\cr
experiment 4, metabolite 4 is 2\% increased from steady state\cr
The time between measurements, \code{delta.t}, is used to calculate the Jacobian matrix. It is assumed that these times between measurements are all the same, with value \code{delta.t}. If not, interpolation can be used to obtain metabolite concentrations at regular time intervals \code{delta.t}.\cr
Running the example produces the vertex-edge matrix:
1 1 0 0 \cr
1 1 1 0 \cr
0 1 1 0 \cr
0 0 1 1 \cr
}
\source{
Chassagnole, C., D. A. Fell, et al. (2001). "Control of the threonine-synthesis pathway in Escherichia coli: a theoretical and experimental approach." Biochemical Journal 356: 433-444.
}
\references{
Reverse engineering of metabolic networks, a critical assessment. Diana M. Hendrickx, Margriet M. W. B. Hendriks, Paul H. C. Eilers, Age K. Smilde and Huub C. J. Hoefsloot. Mol. BioSyst, Volume 7:2 (2011) pages 511-520
}
\keyword{datasets}
\keyword{RevNetJacobianMethod}
\keyword{SteadyState}
|
53c267b44a5451c4370324ca7d02cc6c227c719d | 87fc6ee5e3e748738162c5ff76ed5ebf34ec9ddc | /R/color2D.matplot.R | d8a59d505ffd5f9c948d60c3d5dcdf4619a6058b | [] | no_license | plotrix/plotrix | c7c4733a74f0fb1f9e0777c2cd6e665a90d3cb81 | b9a7e3ac79c77c0cab536429168ab347b924b047 | refs/heads/master | 2020-07-23T21:18:11.125733 | 2020-01-21T06:57:45 | 2020-01-21T06:57:45 | 207,708,799 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,363 | r | color2D.matplot.R | hexagon<-function(x,y,unitcell=1,col=NA,border="black") {
polygon(c(x,x,x+unitcell/2,x+unitcell,x+unitcell,x+unitcell/2),
c(y+unitcell*0.125,y+unitcell*0.875,y+unitcell*1.125,y+unitcell*0.875,
y+unitcell*0.125,y-unitcell*0.125),col=col,border=border)
}
fill.corner<-function(x,nrow,ncol,na.value=NA) {
xlen<-length(x)
ncells<-ifelse(nrow*ncol < xlen,nrow*ncol,xlen)
newmat<-matrix(na.value,nrow=nrow,ncol=ncol)
xside<-1
while(xside*xside < ncells) xside<-xside+1
row=1
col=1
for(xindex in 1:ncells) {
newmat[row,col]<-x[xindex]
if(row == xside) {
col<-col+1
row<-1
}
else row<-row+1
}
return(newmat)
}
color2D.matplot<-function(x,cs1=c(0,1),cs2=c(0,1),cs3=c(0,1),
extremes=NA,cellcolors=NA,show.legend=FALSE,nslices=10,xlab="Column",
ylab="Row",do.hex=FALSE,axes=TRUE,show.values=FALSE,vcol=NA,vcex=1,
border="black",na.color=NA,xrange=NULL,color.spec="rgb",yrev=TRUE,
xat=NULL,yat=NULL,Hinton=FALSE,...) {
if(diff(range(x,na.rm=TRUE)) == 0) {
if(Hinton) stop("No differences to display in Hinton plot.")
x<-x/max(x,na.rm=TRUE)
}
if(is.matrix(x) || is.data.frame(x)) {
xdim<-dim(x)
if(is.data.frame(x)) x<-unlist(x)
else x<-as.vector(x)
oldpar<-par("xaxs","yaxs","xpd","mar")
par(xaxs="i",yaxs="i")
if(do.hex) par(mar=c(5,4,4,4))
plot(c(0,xdim[2]),c(0,xdim[1]),xlab=xlab,ylab=ylab,type="n",axes=FALSE,...)
oldpar$usr<-par("usr")
if(!do.hex) {
box()
pos<-0
}
else pos<- -0.3
if(axes) {
if(is.null(xat)) xat<-pretty(0:xdim[2])[-1]
axis(1,at=xat-0.5,labels=xat,pos=pos)
if(is.null(yat)) yat<-pretty(0:xdim[1])[-1]
axis(2,at=xdim[1]-yat+0.5,labels=yat)
}
if(all(is.na(cellcolors))) {
if(Hinton) {
if(is.na(extremes[1])) extremes<-c("black","white")
cellcolors<-extremes[(x > 0) + 1]
}
else cellcolors<-color.scale(x,cs1,cs2,cs3,extremes=extremes,
na.color=na.color,color.spec=color.spec)
}
# this sets the color for overprinted text to black or white
# depending upon what color will be the background for the text
if(is.na(vcol))
vcol<-ifelse(colSums(col2rgb(cellcolors)*c(1,1.4,0.6))<350,"white","black")
# if it's a Hinton diagram,cellsize = x, rescaling to 0.1,1 if necessary
if(Hinton) {
if(any(x < 0 | x > 1))
cellsize<-matrix(rescale(abs(x),c(0.03,1)),nrow=xdim[1])
}
else cellsize<-matrix(1,nrow=xdim[1],ncol=xdim[2])
# start from the top left - isomorphic with the matrix layout
if(do.hex) {
par(xpd=TRUE)
offset<-0
if(length(border) < xdim[1]*xdim[2])
border<-rep(border,length.out=xdim[1]*xdim[2])
for(row in 1:xdim[1]) {
for(column in 0:(xdim[2]-1)) {
hexagon(column+offset,xdim[1]-row,unitcell=cellsize[row,column+1],
col=cellcolors[row+xdim[1]*column],
border=border[row+xdim[1]*column])
if(show.values)
text(column+offset+0.5,xdim[1]-row+0.5,x[row+column*xdim[1]],
col=vcol[row+xdim[1]*column],cex=vcex)
}
offset<-ifelse(offset,0,0.5)
}
par(xpd=FALSE)
}
else {
if(Hinton) inset<-(1-cellsize)/2
else inset<-0
if(yrev) {
y0<-rep(seq(xdim[1]-1,0,by=-1),xdim[2])+inset
y1<-rep(seq(xdim[1],1,by=-1),xdim[2])-inset
}
else {
y0<-rep(0:(xdim[1]-1),xdim[2])+inset
y1<-rep(1:xdim[1],xdim[2])-inset
}
rect(sort(rep((1:xdim[2])-1,xdim[1]))+inset,y0,
sort(rep(1:xdim[2],xdim[1]))-inset,y1,
col=cellcolors,border=border)
if(show.values) {
if(yrev) texty<-rep(seq(xdim[1]-0.5,0,by=-1),xdim[2])
else texty<-rep(seq(0.5,xdim[1]-0.5,by=1),xdim[2])
text(sort(rep((1:xdim[2])-0.5,xdim[1])), texty,
formatC(round(x,show.values),format="f",digits=show.values),
col=vcol,cex=vcex)
}
}
naxs<-which(is.na(x))
xy<-par("usr")
plot.din<-par("din")
plot.pin<-par("pin")
bottom.gap<-(xy[3]-xy[4])*(plot.din[2]-plot.pin[2])/(2*plot.pin[2])
grx1<-xy[1]
gry1<-bottom.gap*0.95
grx2<-xy[1]+(xy[2]-xy[1])/4
gry2<-bottom.gap*0.8
if(length(cellcolors) > 1) {
colmat<-col2rgb(c(cellcolors[which.min(x)],cellcolors[which.max(x)]))
cs1<-colmat[1,]/255
cs2<-colmat[2,]/255
cs3<-colmat[3,]/255
color.spec<-"rgb"
}
rect.col<-color.scale(1:nslices,cs1,cs2,cs3,color.spec=color.spec)
if(show.legend)
color.legend(grx1,gry1,grx2,gry2,round(range(x,na.rm=TRUE),show.legend),
rect.col=rect.col)
par(oldpar)
}
else cat("x must be a data frame or matrix\n")
}
|
b20192990fbf596fe53460d5d4dd0e556548c9c8 | 20e5ebca7f72ca2bde9f8e8e74459a2733612d9d | /day6/centralLimit2.sh | 82a03faa23f040ebc71c42a936d91320b1135ea0 | [] | no_license | jevharr/hacker_stats | 4781c6d56823d005e013c496275c340efadf76f8 | c9fd676d5c8a593aa5281bcf4398922c563eb7a1 | refs/heads/master | 2020-03-09T00:20:51.815686 | 2018-04-09T22:37:33 | 2018-04-09T22:37:33 | 128,485,797 | 0 | 0 | null | 2018-04-09T22:37:34 | 2018-04-07T01:19:49 | R | UTF-8 | R | false | false | 478 | sh | centralLimit2.sh | #!/usr/bin/Rscript
input <- file('stdin', 'r')
row1 <- readLines(input, n=1)
row2 <- readLines(input, n=1)
row3 <- readLines(input, n=1)
row4 <- readLines(input, n=1)
# row1 <- "250"
# row2 <- "100"
# row3 <- "2.4"
# row4 <- "2.0"
row1 <- as.numeric(row1)
row2 <- as.numeric(row2)
row3 <- as.numeric(row3)
row4 <- as.numeric(row4)
tix <- row1[1]
n <- row2
mu <- row3
sigma <- row4
p <- pnorm(tix, mean = n*mu, sd = sqrt(n)*sigma, lower.tail = T)
write(sprintf("%.4f", p), "")
|
bb680fbf13b92135cd55279e62c0f77ba3e66661 | bbbf7c48b42ffb5ad4ce8cb1c4c10990177055ec | /数据科学/apriori.R | 3be0ee0af54d6df133558ab4081536a4c6c3925b | [] | no_license | andygoo/note | 04ebe8f6e3f70259cf45c14584ddbbc4053d5b1f | a655a728e3e794e79e5cb8d1479d31ae0e348e42 | refs/heads/master | 2020-04-05T14:08:05.312054 | 2016-09-01T07:18:32 | 2016-09-01T07:18:32 | 46,042,291 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 550 | r | apriori.R | library(arules)
# 构造数据集
dataSet <- matrix(0, 5, 3)
rownames(dataSet) <- paste("item", 1:5, sep='')
colnames(dataSet) <- c("A", "B", "C")
dataSet[1,] <- c(1, 1, 0)
dataSet[2,] <- c(1, 0, 1)
dataSet[3,] <- c(1, 0, 1)
dataSet[4,] <- c(1, 1, 1)
dataSet[5,] <- c(0, 1, 1)
dataSet
# 转换数据格式(可以?apriori查看数据格式)
#dataSet_class <- as(dataSet,"transactions")
# 构造频繁项集
rules<-apriori(dataSet_class,parameter=list(supp=0.5,conf=0.6,target="rules"))
# 查看结果
summary(rules)
# 构造关联规则
inspect(rules) |
9216519a139067e1d2d3908c07b8ede56ab129a7 | 2e3f446e2603f6e09d57e744cb40c7aa513bbe02 | /Aircraft Delay Prediction/2_HourlyPrecipitation/Test/hpd03Test.R | f9a6e43b2d61a84f250bc92b4fb9cd02e1292373 | [] | no_license | kunalrayscorpio/DSProjects | 653f6429a556467cd043a2ff59e306cfeda5404f | 389c678afe16e066228545f5f07a2491db1bdceb | refs/heads/master | 2020-05-09T22:08:46.667037 | 2019-06-10T10:11:35 | 2019-06-10T10:11:35 | 181,461,609 | 0 | 0 | null | 2019-04-23T13:03:04 | 2019-04-15T10:09:08 | null | UTF-8 | R | false | false | 4,696 | r | hpd03Test.R | rm(list=ls())
library(lubridate)
library(sqldf)
library(DMwR)
hpd200503<- read.table("200503hpd.txt", sep=",",header = T, dec = ".") # Read 03 File
# Changing Format as applicable
hpd200503$YearMonthDay<-ymd(hpd200503$YearMonthDay)
hpd200503$YearMonthDay<-as.factor(hpd200503$YearMonthDay)
hpd200503$WeatherStationID<-as.factor(hpd200503$WeatherStationID)
# Deriving time slots in weather data
hpd200503$TimeSlot<-ifelse(hpd200503$Time<200,'Midnight to 2AM',ifelse(hpd200503$Time<400,'2AM to 4AM',
ifelse(hpd200503$Time<600,'4AM to 6AM',
ifelse(hpd200503$Time<800,'6AM to 8AM',
ifelse(hpd200503$Time<1000,'8AM to 10AM',
ifelse(hpd200503$Time<1200,'10AM to Noon',
ifelse(hpd200503$Time<1400,'Noon to 2PM',
ifelse(hpd200503$Time<1600,'2PM to 4PM',
ifelse(hpd200503$Time<1800,'4PM to 6PM',
ifelse(hpd200503$Time<2000,'6PM to 8PM',
ifelse(hpd200503$Time<2200,'8PM to 10PM','10PM to Midnight')))))))))))
hpd200503$Time<-NULL # Dropping time column
# Aggregating Hourly Precipitation by Station, Date & Slot
hpd200503<-sqldf('select distinct a.WeatherStationID, a.YearMonthDay, a.TimeSlot, avg(a.HourlyPrecip) as AvgPrecip
from hpd200503 a group by a.WeatherStationID, a.YearMonthDay, a.TimeSlot')
# Merging with close station data
closestation <- readRDS("closestation.rds")
hpd0503<-merge(hpd200503,closestation,by.x="WeatherStationID",by.y="WeatherStationID")
# Creating Keys for future merging
hpd0503$Key0<-paste(hpd0503$WeatherStationID,hpd0503$YearMonthDay,hpd0503$TimeSlot)
hpd0503$Key1<-paste(hpd0503$ClosestWS,hpd0503$YearMonthDay,hpd0503$TimeSlot)
hpd0503$Key2<-paste(hpd0503$Closest2ndWS,hpd0503$YearMonthDay,hpd0503$TimeSlot)
hpd0503$Key3<-paste(hpd0503$Closest3rdWS,hpd0503$YearMonthDay,hpd0503$TimeSlot)
hpd0503$Key4<-paste(hpd0503$Closest4thWS,hpd0503$YearMonthDay,hpd0503$TimeSlot)
hpd0503$Key5<-paste(hpd0503$Closest5thWS,hpd0503$YearMonthDay,hpd0503$TimeSlot)
# Merging with Closest Weather Stations
rm(hpd200503) # Free up memory
temp<-hpd0503
names(hpd0503)[names(hpd0503) == "AvgPrecip"] = "OrigPrecip"
hpd0503<-sqldf('select a.*, b.AvgPrecip from hpd0503 a left join (select Key0, AvgPrecip from temp) b
on a.Key1=b.Key0')
names(hpd0503)[names(hpd0503) == "AvgPrecip"] = "ClosestPrecip"
gc()
hpd0503<-sqldf('select a.*, b.AvgPrecip from hpd0503 a left join (select Key0, AvgPrecip from temp) b
on a.Key2=b.Key0')
names(hpd0503)[names(hpd0503) == "AvgPrecip"] = "Closest2ndPrecip"
gc()
hpd0503<-sqldf('select a.*, b.AvgPrecip from hpd0503 a left join (select Key0, AvgPrecip from temp) b
on a.Key3=b.Key0')
names(hpd0503)[names(hpd0503) == "AvgPrecip"] = "Closest3rdPrecip"
gc()
hpd0503<-sqldf('select a.*, b.AvgPrecip from hpd0503 a left join (select Key0, AvgPrecip from temp) b
on a.Key4=b.Key0')
names(hpd0503)[names(hpd0503) == "AvgPrecip"] = "Closest4thPrecip"
gc()
hpd0503<-sqldf('select a.*, b.AvgPrecip from hpd0503 a left join (select Key0, AvgPrecip from temp) b
on a.Key5=b.Key0')
names(hpd0503)[names(hpd0503) == "AvgPrecip"] = "Closest5thPrecip"
gc()
# Check for null values & impute using closest neighbours
rm(temp) # Remove temp file
rm(closestation) # Remove unrequired file
colSums(is.na(hpd0503)) # NA values in OrigPrecip column
hpd0503$OrigPrecip<-ifelse(is.na(hpd0503$OrigPrecip),rowMeans(hpd0503[,c("ClosestPrecip", "Closest2ndPrecip",
"Closest3rdPrecip","Closest4thPrecip",
"Closest5thPrecip")], na.rm=TRUE),
hpd0503$OrigPrecip)
saveRDS(hpd0503, file = "hpd0503Test.rds") # Saving externally |
188593c0b412d8a2e824b132f7ed01512aa14c7b | ed0192fb3ded1a45fa394829287b2553ee4382a1 | /scripts/evalsys_functions.R | fe2a609b4a3bc3c9f067ed8addd825063dc648d2 | [] | no_license | thedmv/uwrf_evaluation_system | d339dcac344dfd5f4fd89458d3c48f34fd9faa05 | 96b2fbb3cb9b209d21a98c28f38d554c09eda7ab | refs/heads/master | 2021-02-04T05:13:54.397509 | 2020-09-16T20:19:25 | 2020-09-16T20:19:25 | 243,622,879 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,353 | r | evalsys_functions.R | # This script contains the functions used for the Evaluation System forecast.
#### Functions for TS Lists ####
read.wrf_tslistTS = function(tsfilename, fcst_hr0) {
# This function reads TS files created from the tslist runtime configurations.
# Only works for the files in which the extension *.TS. First seen in `Load01-WRF_tslist.Rmd.`
# INPUT
# tsfilename -- string of the file name;
# fcst_hr0 -- string of the first forecast hour; for example: "2018-06-30 18:00:00"
# OUTPUT
# tsdata -- data frame of the tslist; only columns of date and time, temperature, and winds (u, v, wspd, wdir)
# Read the WRF tslist file
tsdata = read.table(tsfilename, skip = 1, stringsAsFactors = FALSE)
# Add the column names for the Time series (TS) output of surface variables
names(tsdata) = c("id", "ts_hour", "id_tsloc", "ix", "iy", "t", "q", "u", "v", "psfc", "glw", "gsw", "hfx", "lh", "tsk", "tslb_1", "rainc", "rainnc", "clw")
# Set the start of the forecast hour
tsdata$Date.Time = as.POSIXct(fcst_hr0, tz = "UTC") + tsdata$ts_hour * 3600
# Here we add columns for the date and times so that the wspd/wdir finctuon can work
tsdata = tsdata %>% dateTimeCol() %>% select(Date.Time, year:sec, t, q, u, v)
return(tsdata)
}
read.wrf_tslistTSfull = function(tsfilename, fcst_hr0) {
# This function reads TS files created from the tslist runtime configurations.
# Only works for the files in which the extension *.TS. First seen in `Load01-WRF_tslist.Rmd.`
#
# Update 2020-05-07:
# This is similar to read.wrf_tslistTS, but it outputs all the variable columns, instead
# of just t, q, u, and v.
# INPUT
# tsfilename -- string of the file name;
# fcst_hr0 -- string of the first forecast hour; for example: "2018-06-30 18:00:00"
# OUTPUT
# tsdata -- data frame of the tslist; only columns of date and time, temperature, and winds (u, v, wspd, wdir)
# Read the WRF tslist file
tsdata = read.table(tsfilename, skip = 1, stringsAsFactors = FALSE)
# Add the column names for the Time series (TS) output of surface variables
names(tsdata) = c("id", "ts_hour", "id_tsloc", "ix", "iy", "t", "q", "u", "v", "psfc", "glw", "gsw", "hfx", "lh", "tsk", "tslb_1", "rainc", "rainnc", "clw")
# Set the start of the forecast hour
tsdata$Date.Time = as.POSIXct(fcst_hr0, tz = "UTC") + tsdata$ts_hour * 3600
# Here we add columns for the date and times so that the wspd/wdir finctuon can work
tsdata = tsdata %>% dateTimeCol() %>% select(Date.Time, year:sec, t:clw)
return(tsdata)
}
ts_wswd = function(tsdata) {
# This function returns the wind speed and wind directions given u and v components.
# INPUT
# tsdata -- data.frame; read into memory by read.wrf_tslistTS
# OUTPUT
# tsdata -- data.frame with wind speed (wspd) and wind direction (wdir) columns added.
# Set the u and v wind vector components
uwind = tsdata$u
vwind = tsdata$v
# Calculate the wind speed
tsdata$wspd = sqrt(uwind^2 + vwind^2)
# Calculate the wind direction (https://stackoverflow.com/questions/1311049/how-to-map-atan2-to-degrees-0-360/25398191)
tsdata$wdir = ((atan2(-uwind, -vwind) * 180/pi) + 360) %% 360
# Return the new data frame with truncated columns from the tslist AND calculated wind speed and wind direction
return(tsdata)
}
timeavg.ts = function(tsdata, agg.time = "1 hour", statistic = "mean") {
# This function reads the "tsdata" object read into memory by `read.wrf_tslistTS` and
# calculates a statistic based on the desired time aggregate.
# INPUT
# tsdata -- data.frame; read *.TS files using read.wrf_tslistTS
# agg.time -- time used to form the aggregates that the statistics will be calculated for
# statistic -- statistic operation to be used on the aggregates picked by "agg.time"
#
# NOTE: The `agg.time` and `statistic` inputs are used in the timeAverage function from the
# openair library.
#
# OUTPUT
# tsdata -- data.frame; averaged (or other statistic) for the agg.time chosen
# 1. For timeAverage to work we need to change the Date.Time column name
names(tsdata)[names(tsdata) == "Date.Time"] = "date"
# 2. Save the heights in the variables
timenames = c("date", "year", "mon", "day", "hour", "min", "sec")
varnames = names(tsdata)[names(tsdata) %w/o% timenames]
# 3. Calculate the time average
tsdata = timeAverage(tsdata %>% select(date, all_of(varnames)), avg.time = agg.time, statistic = statistic)
names(tsdata)[names(tsdata) == "date"] = "Date.Time"
tsdata = tsdata %>% dateTimeCol() %>% select(Date.Time, year:sec, all_of(varnames))
return(tsdata)
}
#### Functions for ASOS Stations ####
read.asos = function(fASOS) {
# This function reads the ASOS data downloaded using the dl_ny_asos.py script.
# In this function I am selecting only the columns that correspond to the temperature,
# relative humidity, wind direction, and wind speed, in addition to the station name and
# the time stamp.
#
# INPUT
# fASOS -- string; file name of the ASOS data
# OUTPUT
# asos.df -- data.frame; Data frame of the ASOS data.
#
# Read ASOS data
asos.df = read.table(fASOS, header = T, stringsAsFactors = FALSE, skip = 5, sep = ",")
# Pick columns of interest
select_columns = c("station", "valid", "tmpf", "relh", "drct", "sknt")
asos.df = asos.df %>% select(all_of(select_columns))
# Replace M with NaN
asos.df[asos.df == "M"] = NaN
# Rename columns to match WRF tslist columns
rename_columns = c("station", "Date.Time", "t", "rh", "wdir", "wspd")
names(asos.df) = rename_columns
# Change variable types for every column
asos.df$Date.Time = as.POSIXct(asos.df$Date.Time, tz = "UTC") # Change the string date.time to date-time objects
asos.df$t = as.numeric(asos.df$t) # Temperature variables (Fahrenheit)
asos.df$rh = as.numeric(asos.df$rh) # Relative Humidity (%)
asos.df$wdir = as.numeric(asos.df$wdir) # Wind Direction (degrees N)
asos.df$wspd = as.numeric(asos.df$wspd) # Wind Speed (in knots)
# Return the new data frame
return(asos.df)
}
#### Functions for Time-Matching ####
next.day = function(select.date) {
# This function calculates the next date and outputs it as a string
#
# INPUT
# select.date - string of the desired date in "YYY-mm-dd" format
# OUTPUT
# next.date - string representing the next day from select.date
next.date = as.character(str_split(select.date, "-")[[1]] %>% as.numeric() + c(0, 0, 1)) %>%
str_pad(width = 2, side = "left", pad = 0) %>% str_flatten("-")
return(next.date)
}
prev.day = function(select.date) {
# This function calculates the previous date and outputs it as a string
#
# INPUT
# select.date - string of the desired date in "YYY-mm-dd" format
# OUTPUT
# next.date - string representing the next day from select.date
prev.date = as.character(str_split(select.date, "-")[[1]] %>% as.numeric() - c(0, 0, 1)) %>%
str_pad(width = 2, side = "left", pad = 0) %>% str_flatten("-")
return(next.date)
}
nearest <- function(probe, target, ends=c(-Inf,Inf)) {
# Return an array `i` of indexes into `target`, parallel to array `probe`.
# For each index `j` in `target`, probe[i[j]] is nearest to target[j].
# Both `probe` and `target` must be vectors of numbers in ascending order.
#
glb <- function(u, v) {
n <- length(v)
z <- c(v, u)
j <- i <- order(z)
j[j > n] <- -1
k <- cummax(j)
return (k[i > n])
}
y <- c(ends[1], target, ends[2])
i.lower <- glb(probe, y)
i.upper <- length(y) + 1 - rev(glb(rev(-probe), rev(-y)))
y.lower <- y[i.lower]
y.upper <- y[i.upper]
lower.nearest <- probe - y.lower < y.upper - probe
i <- ifelse(lower.nearest, i.lower, i.upper) - 1
i[i < 1 | i > length(target)] <- NA
return (i)
}
nearest.dataframe = function(probe, target, select.date) {
# This function uses the function 'nearest' to return one data frame of the target whose time-series is a closest match
# to the probe time-series. The date-times columns must be named "Date.Time" or else this function will not work.
# In the first implementation of this code we use ASOS date times to probe the WRF TS list target.
############
# INTPUT
# probe - data frame of the observatiopn data
# target - data frame of the target data
# select.date - string in YYY-mm-dd format of the date of interest
# OUTPUT
# near.df - subset of target data frame that has matching time-series to the probe
#
# Select a single day to filter the data
daybegin = select.date %>% as.POSIXct(., tz = "UTC")
dayend = daybegin + days(1)
# Create the date-time arrays that will be used to match the times.
myprobe = probe %>% filter(Date.Time >= daybegin) %>% filter(Date.Time <= dayend)
mytarget = target %>% filter(Date.Time >= daybegin) %>% filter(Date.Time <= dayend)
# Convert to numeric, from Date.Time object, so function works.
myprobets = as.numeric(myprobe$Date.Time)
mytargetts = as.numeric(mytarget$Date.Time)
# Find the WRF (target) values that match with the observations (probe) of ASOS stations
ii = nearest(myprobets, mytargetts)
new_target = mytarget[ii, ]
# Return the new target data frame
return(new_target)
}
evalsys_temp = function(data.for.eval) {
# This function calculates the evaluation statistics for temperature.
# The output is a data frame of the data before the evaluation statistics,
# As of 2020-02-26 the performance statistics used are Bias, RMSE and MAE.
# INPUT
# data.for.eval - data frame with three columns: Date.Time (UTC), Temperature, and
# Source (ASoS, WRF D-0, WRF D-1, WRF D-2)
# OUTPUT
# eeval_table - list containing a table of the performance statistics for temperature,
# and a data frame with the difference columns.z
eeval = data.frame(Date.Time = (data.for.eval %>% filter(Source == "ASOS"))$Date.Time,
OBS = (data.for.eval %>% filter(Source == "ASOS"))$Temperature,
WRF_D0 = (data.for.eval %>% filter(Source == "WRF D-0"))$Temperature,
WRF_D1 = (data.for.eval %>% filter(Source == "WRF D-1"))$Temperature,
WRF_D2 = (data.for.eval %>% filter(Source == "WRF D-2"))$Temperature)
eeval$DIFF_D0 = eeval$WRF_D0 - eeval$OBS
eeval$DIFF_D1 = eeval$WRF_D1 - eeval$OBS
eeval$DIFF_D2 = eeval$WRF_D2 - eeval$OBS
d = data.frame(D0 = eeval$DIFF_D0, D1 = eeval$DIFF_D1, D2 = eeval$DIFF_D2)
eeval_table = data.frame(Forecast.Init = c("WRF D-0", "WRF D-1", "WRF D-2"),
BIAS = c(sum(d$D0)/length(d$D0),
sum(d$D1)/length(d$D1),
sum(d$D2)/length(d$D2)),
RMSE = c(sqrt(sum(d$D0^2)/length(d$D0)),
sqrt(sum(d$D1^2)/length(d$D1)),
sqrt(sum(d$D2^2)/length(d$D2))),
MAE = c(sum(abs(d$D0))/length(d$D0),
sum(abs(d$D1))/length(d$D1),
sum(abs(d$D2))/length(d$D2)) )
eeval = list(eeval_df = eeval, eeval_table = eeval_table)
return(eeval)
}
evalsys_wspd = function(data.for.eval) {
# This function calculates the evaluation statistics for wind speed.
# The output is a data frame of the data before the evaluation statistics,
# As of 2020-02-26 the performance statistics used are Bias, RMSE and MAE.
# INPUT
# data.for.eval - data frame with three columns: Date.Time (UTC), Temperature, and
# Source (ASoS, WRF D-0, WRF D-1, WRF D-2)
# OUTPUT
# eeval_table - list containing a table of the performance statistics for wind speed,
# and a data frame with the difference columns.
eeval = data.frame(Date.Time = (data.for.eval %>% filter(Source == "ASOS"))$Date.Time,
OBS = (data.for.eval %>% filter(Source == "ASOS"))$Wind.Speed,
WRF_D0 = (data.for.eval %>% filter(Source == "WRF D-0"))$Wind.Speed,
WRF_D1 = (data.for.eval %>% filter(Source == "WRF D-1"))$Wind.Speed,
WRF_D2 = (data.for.eval %>% filter(Source == "WRF D-2"))$Wind.Speed)
eeval$DIFF_D0 = eeval$WRF_D0 - eeval$OBS
eeval$DIFF_D1 = eeval$WRF_D1 - eeval$OBS
eeval$DIFF_D2 = eeval$WRF_D2 - eeval$OBS
d = data.frame(D0 = eeval$DIFF_D0, D1 = eeval$DIFF_D1, D2 = eeval$DIFF_D2)
eeval_table = data.frame(Forecast.Init = c("WRF D-0", "WRF D-1", "WRF D-2"),
BIAS = c(sum(d$D0)/length(d$D0),
sum(d$D1)/length(d$D1),
sum(d$D2)/length(d$D2)),
RMSE = c(sqrt(sum(d$D0^2)/length(d$D0)),
sqrt(sum(d$D1^2)/length(d$D1)),
sqrt(sum(d$D2^2)/length(d$D2))),
MAE = c(sum(abs(d$D0))/length(d$D0),
sum(abs(d$D1))/length(d$D1),
sum(abs(d$D2))/length(d$D2)) )
eeval = list(eeval_df = eeval, eeval_table = eeval_table)
return(eeval)
}
evalsys_wdir = function(data.for.eval) {
# This function calculates the evaluation statistics for wind direction.
# The output is a data frame of the data before the evaluation statistics,
# As of 2020-02-26 the performance statistics used are Bias, RMSE and MAE.
# INPUT
# data.for.eval - data frame with three columns: Date.Time (UTC), Temperature, and
# Source (ASoS, WRF D-0, WRF D-1, WRF D-2)
# OUTPUT
# eeval_table - list containing a table of the performance statistics for wind speed,
# and a data frame with the difference columns.
eeval = data.frame(Date.Time = (data.for.eval %>% filter(Source == "ASOS"))$Date.Time,
OBS = (data.for.eval %>% filter(Source == "ASOS"))$Wind.Direction,
WRF_D0 = (data.for.eval %>% filter(Source == "WRF D-0"))$Wind.Direction,
WRF_D1 = (data.for.eval %>% filter(Source == "WRF D-1"))$Wind.Direction,
WRF_D2 = (data.for.eval %>% filter(Source == "WRF D-2"))$Wind.Direction)
# Calculate the difference between simulated and observed.
eeval$DIFF_D0= eeval$WRF_D0 - eeval$OBS
eeval$DIFF_D0[eeval$DIFF_D0 > 180] = eeval$DIFF_D0[eeval$DIFF_D0 > 180] - 360
eeval$DIFF_D0[eeval$DIFF_D0 < -180] = eeval$DIFF_D0[eeval$DIFF_D0 < -180] + 360
eeval$DIFF_D1= eeval$WRF_D1 - eeval$OBS
eeval$DIFF_D1[eeval$DIFF_D1 > 180] = eeval$DIFF_D1[eeval$DIFF_D1 > 180] - 360
eeval$DIFF_D1[eeval$DIFF_D1 < -180] = eeval$DIFF_D1[eeval$DIFF_D1 < -180] + 360
eeval$DIFF_D2= eeval$WRF_D2 - eeval$OBS
eeval$DIFF_D2[eeval$DIFF_D2 > 180] = eeval$DIFF_D2[eeval$DIFF_D2 > 180] - 360
eeval$DIFF_D2[eeval$DIFF_D2 < -180] = eeval$DIFF_D2[eeval$DIFF_D2 < -180] + 360
d = data.frame(D0 = eeval$DIFF_D0, D1 = eeval$DIFF_D1, D2 = eeval$DIFF_D2)
eeval_table = data.frame(Forecast.Init = c("WRF D-0", "WRF D-1", "WRF D-2"),
RMSE = c(sqrt(sum(d$D0^2)/length(d$D0)),
sqrt(sum(d$D1^2)/length(d$D1)),
sqrt(sum(d$D2^2)/length(d$D2))),
MAE = c(sum(abs(d$D0))/length(d$D0),
sum(abs(d$D1))/length(d$D1),
sum(abs(d$D2))/length(d$D2)) )
eeval = list(eeval_df = eeval, eeval_table = eeval_table)
return(eeval)
}
|
9662363b9ace72cbb653fc0cea5996e348f7d408 | 7f107bd4be987667feb046548868ed88d06cdb88 | /lang/R/report/test/src/adsl.R | 9e111da42c0b8a36511c47b5069740757f214e3b | [
"MIT"
] | permissive | phuse-org/phuse-scripts | d24d748de6cbdfd98af8be8d912749bb04811570 | 11b416b7b379f6094caccbc1f9069713033500c4 | refs/heads/master | 2023-08-08T17:41:11.238199 | 2023-08-01T15:21:20 | 2023-08-01T15:21:20 | 32,612,654 | 107 | 98 | MIT | 2023-08-01T15:02:06 | 2015-03-20T23:42:41 | SAS | UTF-8 | R | false | false | 162 | r | adsl.R | # test summary of subject level dataset
adsl <- read.xport(
file="http://phuse-scripts.googlecode.com/svn/trunk/lang/R/report/test/data/adsl.xpt")
summary(adsl) |
2c846516d907704e2295305a66fe88082d16bad9 | 2fbd23b496681bfa89b7a61f05f05d3b5e65772d | /R/package.R | 5ef4b30742eaba5fc8db9318443968d056f0b635 | [
"MIT"
] | permissive | numeract/Nmisc | 95b7b8f89f2eb278572117c04825d2add2f508a3 | 17347253922b657336e6c7ba044f7361daf67506 | refs/heads/master | 2021-07-16T22:09:34.461956 | 2021-04-28T13:28:39 | 2021-04-28T13:28:39 | 118,459,419 | 0 | 0 | NOASSERTION | 2021-04-28T04:29:10 | 2018-01-22T13:16:23 | R | UTF-8 | R | false | false | 344 | r | package.R | #' @importFrom magrittr %>%
#' @importFrom purrr %||%
#' @importFrom rlang .data
NULL
# quiets concerns of R CMD check re: Namespaces in Imports field not imported
#' @importFrom rappdirs user_data_dir
NULL
# quiets concerns of R CMD check re: the .'s that appear in pipelines
if (getRversion() >= "2.15.1") utils::globalVariables(c("."))
|
c54a14701e5e0458e706624dc78d21eeaec81ca3 | 74cc317eb46cc842d79309f652bf4b1e3105a066 | /cachematrix.R | 2aa038e8b2c1fc6b69b99883f46d148e2b3bd91d | [] | no_license | laacdm/ProgrammingAssignment2 | 965d0f2b411627816f8bbe7eda9895012e017e1f | bda43cec26bfcddd12bd0a8455ed588bd5764b86 | refs/heads/master | 2021-01-21T22:19:52.022502 | 2017-09-01T23:24:14 | 2017-09-01T23:24:14 | 102,151,487 | 0 | 0 | null | 2017-09-01T20:41:22 | 2017-09-01T20:41:22 | null | UTF-8 | R | false | false | 971 | r | cachematrix.R | ## The first function cache the inverse of a matrix that has been already calculated
## The second function cache the inverse in case it has already been calculated otherwise computes the inverse
## Chache the inverse of a matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Cache the inverse of a matrix in case it has been calculated otherwise compute the inverse
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
c149da0469812f7232cfec6b873e737fb5a327f2 | bb4148bd61296b1929e0f635a0a1354ceabc3f54 | /01_null_model.R | ad40d009126e1430330f486e72d2e57a011fe62c | [] | no_license | inbalalo/The-dynamics-of-credit-assignment-to-motor-outcome-irrelevant-task-representations | 8495b72e64fc36d149d1e1945c1ef18128831b54 | a61cc82a2ce210479b0f9b524776fd47f6fa8478 | refs/heads/master | 2022-12-12T07:16:22.837593 | 2020-09-05T18:29:14 | 2020-09-05T18:29:14 | 280,286,561 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,036 | r | 01_null_model.R | rm(list = ls())
source('01_functions.R')
library(knitr)
library(kableExtra)
#generate parameters and data for N agents
N <- 1000 #number of agents
Nalt <- 2 #number of alternatives in a trial
Ncards <- 4 #size of a card deck
Ntrl <- c(25, 50, 100, 200, 1000) #number of trls
conds <- c('positive', 'negative', 'positive', 'negative')
rndwlk <- read.csv('rndwlk_4frc_1000trials.csv', header = F)
####Model 1####
true.parms1 <-
data.frame(
alpha_frc = runif(N, min = 0, max = 1),
alpha_key = cbind(rep(0, N), rep(0, N)),
beta = runif(N, min = 1, max = 8),
w = cbind(rep(0, N), rep(0, N))
)
model1 <-
lapply(1:length(Ntrl), function(trl) {
mclapply(1:N, function(s) {
sim.task(
Ntrl[trl],
Nalt,
Ncards,
true.parms1$alpha_frc[s],
c(true.parms1$alpha_key.1[s], true.parms1$alpha_key.2[s]),
true.parms1$beta[s],
c(true.parms1$w.1[s], true.parms1$w.2[s]),
rndwlk,
conds
)
})
})
#recover param from data
rec.parms1 <-
lapply(1:length(Ntrl), function(trl) {
mclapply(1:N, function(s) {
if (s %% 10 == T) {
print(paste('Ntrl=', Ntrl[trl], ' subj=', s, sep = ''))
}
optim(
par = c(
runif(1, min = 0, max = 1), #alpha_frc
runif(1, min = 0, max = 1), #alpha_key_pos
runif(1, min = 0, max = 1), #alpha_key_neg
runif(1, min = 0, max = 2), #beta
0, #w_pos
0 #w_neg
),
fn = fit.task,
df = model1[[trl]][[s]][['df']],
alternatives = model1[[trl]][[s]][['alts']],
Nalt = Nalt,
Ncards = Ncards,
cond = conds,
lower = c(0, 0, 0, 0, 0, 0),
upper = c(1, 1, 1, 2, 1e-110, 1e-110),
method = "L-BFGS-B"
)$par
})
})
as.data.frame()
#save data
save(model1, file = "model1.Rda")
save(true.parms1, file = "true.parms1")
save(rec.parms1, file = "rec.parms1")
#calculate cor between true and recovered params
df.tbl1 <-lapply(1:length(Ntrl), function(trl){
data.frame(Ntrl=Ntrl[trl],
cor.alpha_frc=cor(true.parms1$alpha_frc,(do.call(rbind,rec.parms1[[trl]])[,1])),
#cor.alpha_key_pos=cor(true.parms1$alpha_key[1],(do.call(rbind,rec.parms1[[trl]])[,2])),
#cor.alpha_key_neg=cor(true.parms1$alpha_key[2],(do.call(rbind,rec.parms1[[trl]])[,3])),
cor.beta=cor(true.parms1$beta,(do.call(rbind,rec.parms1[[trl]])[,4]))
#cor.W_pos=cor(true.parms1$w[1], (do.call(rbind,rec.parms1[[trl]])[,5])),
#cor.W_neg=cor(true.parms1$w[2], (do.call(rbind,rec.parms1[[trl]])[,6]))
)
})
#print table to file
df.tbl1 %>%
kable() %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed"))
#save table
save(df.tbl1, file = "tbl1.corr")
|
e61c9d00e8b7ba9dbea44a6f92847e3f2f3c5c6d | 990defc7dfa8b37192d20c628f6dfd7471b5cddb | /R/fourth.R | 08d536c525a8611c8fc9d58bdd06c482fac1fd6c | [] | no_license | hoanguc3m/ccgarch | 247cbda115769c0f4bcdd87a5088e000b6bd73a0 | dbd84bacf56d09538d90fd687c2131d52e5dc7dd | refs/heads/master | 2020-03-13T17:29:12.532948 | 2018-04-29T01:22:42 | 2018-04-29T01:22:42 | 131,218,138 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 277 | r | fourth.R | # fourth order moment condition
# for details, see He and Ter\"{a}svirta (2004) and Nakatani and Ter\"{a}svirta (2007).
fourth <- function(A,B,R){
AB <- A+B
AA <- A%x%A
G <- AB%x%AB + 2*AA*(diag(as.vector(R))^2)
max(Mod(eigen(G)$values))
}
|
4bc01ac8042c14bdad1014cc58d9d0985281923b | c117da360dd891019adc21d0579c5dd8bcbe0ecd | /man/globalsOf.Rd | ed1a54d4556d03846675f73cc7259058557f7217 | [] | no_license | HenrikBengtsson/globals | d6433d2dc07367a659648901416a511f8712ddc3 | 0cec7a7d98a56b7256957d6d8d944aaff1257e6c | refs/heads/develop | 2023-08-18T18:46:57.570987 | 2023-05-19T17:21:10 | 2023-05-19T18:37:14 | 35,904,821 | 29 | 5 | null | 2020-10-09T23:21:55 | 2015-05-19T19:49:33 | R | UTF-8 | R | false | true | 3,713 | rd | globalsOf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findGlobals.R, R/globalsOf.R
\name{findGlobals}
\alias{findGlobals}
\alias{globalsOf}
\title{Get all global objects of an expression}
\usage{
findGlobals(
expr,
envir = parent.frame(),
...,
attributes = TRUE,
tweak = NULL,
dotdotdot = c("warning", "error", "return", "ignore"),
method = c("ordered", "conservative", "liberal"),
substitute = FALSE,
unlist = TRUE,
trace = FALSE
)
globalsOf(
expr,
envir = parent.frame(),
...,
method = c("ordered", "conservative", "liberal"),
tweak = NULL,
locals = NA,
substitute = FALSE,
mustExist = TRUE,
unlist = TRUE,
recursive = TRUE,
skip = NULL
)
}
\arguments{
\item{expr}{An R expression.}
\item{envir}{The environment from where to search for globals.}
\item{\dots}{Not used.}
\item{attributes}{If TRUE (default), attributes of `expr` are also searched.
If FALSE, they are not.
If a character vector, then attributes with matching names are searched.
Note, the attributes of the attributes elements are not searched, that is,
attributes are not searched recursively. Also, attributes are searched
with `dotdotdot = "ignore".}
\item{tweak}{An optional function that takes an expression
and returns a tweaked expression.}
\item{dotdotdot}{TBD.}
\item{method}{A character string specifying what type of search algorithm
to use.}
\item{substitute}{If TRUE, the expression is \code{substitute()}:ed,
otherwise not.}
\item{unlist}{If TRUE, a list of unique objects is returned.
If FALSE, a list of \code{length(expr)} sublists.}
\item{trace}{TBD.}
\item{locals}{Should globals part of any "local" environment of
a function be included or not?}
\item{mustExist}{If TRUE, an error is thrown if the object of the
identified global cannot be located. Otherwise, the global
is not returned.}
\item{recursive}{If TRUE, globals that are closures (functions) and that
exist outside of namespaces ("packages"), will be recursively
scanned for globals.}
\item{skip}{(internal) A list of globals not to be searched for
additional globals. Ignored unless \code{recursive} is TRUE.}
}
\value{
\code{findGlobals()} returns a character vector.
\code{globalsOf()} returns a \link{Globals} object.
}
\description{
Get all global objects of an expression
}
\details{
There currently three strategies for identifying global objects.
The \code{method = "ordered"} search method identifies globals such that
a global variable preceding a local variable with the same name
is not dropped (which the \code{"conservative"} method would).
The \code{method = "conservative"} search method tries to keep the number
of false positive to a minimum, i.e. the identified objects are
most likely true global objects. At the same time, there is
a risk that some true globals are not identified (see example).
This search method returns the exact same result as the
\code{\link[codetools]{findGlobals}()} function of the
\pkg{codetools} package.
The \code{method = "liberal"} search method tries to keep the
true-positive ratio as high as possible, i.e. the true globals
are most likely among the identified ones. At the same time,
there is a risk that some false positives are also identified.
With \code{recursive = TRUE}, globals part of locally defined
functions will also be found, otherwise not.
}
\examples{
b <- 2
expr <- substitute({ a <- b; b <- 1 })
## Will _not_ identify 'b' (because it's also a local)
globalsC <- globalsOf(expr, method = "conservative")
print(globalsC)
## Will identify 'b'
globalsL <- globalsOf(expr, method = "liberal")
print(globalsL)
}
\seealso{
Internally, the \pkg{\link{codetools}} package is utilized for
code inspections.
}
|
72d59861419c3e2588ad1a8044013ad063f05b89 | 8b26fe0ea1ceee5eb16454ac591754b3742ad1a4 | /plot2.R | 1eaa1bd26d9b7771e8825b14ffbf2c97f9ee174f | [] | no_license | pitono/ExData_Plotting1 | 84ab25edd0aa4c00a1c7a8872c388d313794f317 | 5f783b8f7ba26d3204a70889538b50f2ece94705 | refs/heads/master | 2021-01-24T14:18:34.093555 | 2014-05-11T15:59:51 | 2014-05-11T15:59:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 890 | r | plot2.R | ## Getting column names
column <- colnames(read.table("household_power_consumption.txt", nrow=1, header=TRUE, sep=";"))
## Getting required data, only 2880 rows and skipping 66637 rows.
## Column names is using the above vector
data <- read.table(file="household_power_consumption.txt",
skip=66637, nrow=2880, sep=";", col.names=column)
## Converting Time column to time format
data$Time <- strptime(paste(data$Date,data$Time,sep=" "), format="%d/%m/%Y %H:%M:%S")
## Converting Date column to date format
data$Date <- as.Date(data$Date, "%d/%m/%Y")
## Open PNG file
png("plot2.png", width=480, height=480, bg="transparent")
## png("plot2.png", width=480, height=480, bg="white")
## Plot the data
plot(data$Time, data$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
## Turn off device and turn off all warning message
g <- dev.off()
|
c6d9f84b662eb3a09c68353a0f9a69010b19bec1 | 5c1a533945187081ebda39902468140d98f3e0a2 | /R/regTable.R | 8ba469aa8be130158b5fb9cf05df066a16da40f0 | [] | no_license | cran/arealDB | 40fb8b955aab850010ad74662cfad71a57828656 | 3c724a46811ba71288f311e11b20d1c8d065200d | refs/heads/master | 2023-07-05T11:01:30.807752 | 2023-07-03T09:00:02 | 2023-07-03T09:00:02 | 276,648,603 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,003 | r | regTable.R | #' Register a new areal data table
#'
#' This function registers a new areal data table into the geospatial database.
#' @param ... [\code{character(1)}]\cr name and value of the topmost unit under
#' which the table shall be registered. The name of this must be a class of
#' the gazetteer and the value must be one of the territory names of that
#' class, e.g. \emph{nation = "Estonia"}.
#' @param subset [\code{character(1)}]\cr optional argument to specify which
#' subset the file contains. This could be a subset of territorial units (e.g.
#' only one municipality) or of a target variable.
#' @param dSeries [\code{character(1)}]\cr the dataseries of the areal data (see
#' \code{\link{regDataseries}}).
#' @param gSeries [\code{character(1)}]\cr optionally, the dataseries of the
#' geometries, if the geometry dataseries deviates from the dataseries of the
#' areal data (see \code{\link{regDataseries}}).
#' @param label [\code{integerish(1)}]\cr the label in the onology this geometry
#' should correspond to.
#' @param begin [\code{integerish(1)}]\cr the date from which on the data are
#' valid.
#' @param end [\code{integerish(1)}]\cr the date until which the data are valid.
#' @param schema [\code{list(1)}]\cr the schema description of the table to read
#' in (must have been placed in the global environment before calling it
#' here).
#' @param archive [\code{character(1)}]\cr the original file from which the
#' boundaries emerge.
#' @param archiveLink [\code{character(1)}]\cr download-link of the archive.
#' @param nextUpdate [\code{character(1)}]\cr when does the geometry dataset get
#' updated the next time (format restricted to: YYYY-MM-DD).
#' @param updateFrequency [\code{character(1)}]\cr value describing the
#' frequency with which the dataset is updated, according to the ISO 19115
#' Codelist, MD_MaintenanceFrequencyCode. Possible values are: 'continual',
#' 'daily', 'weekly', 'fortnightly', 'quarterly', 'biannually', 'annually',
#' 'asNeeded', 'irregular', 'notPlanned', 'unknown', 'periodic',
#' 'semimonthly', 'biennially'.
#' @param metadataLink [\code{character(1)}]\cr if there is already metadata
#' existing: link to the meta dataset.
#' @param metadataPath [\code{character(1)}]\cr if an existing meta dataset was
#' downloaded along the data: the path where it is stored locally.
#' @param notes [\code{character(1)}]\cr optional notes.
#' @param update [\code{logical(1)}]\cr whether or not the file 'inv_tables.csv'
#' should be updated.
#' @param overwrite [\code{logical(1)}]\cr whether or not the geometry to
#' register shall overwrite a potentially already existing older version.
#' @details When processing areal data tables, carry out the following steps:
#' \enumerate{ \item Determine the main territory (such as a nation, or any
#' other polygon), a \code{subset} (if applicable), the ontology
#' \code{label} and the dataseries of the areal data and of the geometry, and
#' provide them as arguments to this function. \item Provide a \code{begin}
#' and \code{end} date for the areal data. \item Run the function. \item
#' (Re)Save the table with the following properties: \itemize{\item Format:
#' csv \item Encoding: UTF-8 \item File name: What is provided as message by
#' this function \item make sure that the file is not modified or reshaped.
#' This will happen during data normalisation via the schema description,
#' which expects the original table.} \item Confirm that you have saved the
#' file.}
#'
#' Every areal data dataseries (\code{dSeries}) may come as a slight
#' permutation of a particular table arrangement. The function
#' \code{\link{normTable}} expects internally a schema description (a list
#' that describes the position of the data components) for each data table,
#' which is saved as \code{paste0("meta_", dSeries, TAB_NUMBER)}. See package
#' \code{tabshiftr}.
#' @return Returns a tibble of the entry that is appended to 'inv_tables.csv' in
#' case \code{update = TRUE}.
#' @family register functions
#' @examples
#' if(dev.interactive()){
#' # build the example database
#' makeExampleDB(until = "regGeometry", path = tempdir())
#'
#' # the schema description for this table
#' library(tabshiftr)
#'
#' schema_madeUp <-
#' setIDVar(name = "al1", columns = 1) %>%
#' setIDVar(name = "year", columns = 2) %>%
#' setIDVar(name = "commodities", columns = 3) %>%
#' setObsVar(name = "harvested",
#' factor = 1, columns = 4) %>%
#' setObsVar(name = "production",
#' factor = 1, columns = 5)
#'
#' regTable(nation = "Estonia",
#' subset = "barleyMaize",
#' dSeries = "madeUp",
#' gSeries = "gadm",
#' level = 1,
#' begin = 1990,
#' end = 2017,
#' schema = schema_madeUp,
#' archive = "example_table.7z|example_table1.csv",
#' archiveLink = "...",
#' nextUpdate = "2019-10-01",
#' updateFrequency = "quarterly",
#' metadataLink = "...",
#' metadataPath = "my/local/path",
#' update = TRUE)
#' }
#' @importFrom readr read_csv write_rds guess_encoding
#' @importFrom rlang ensym exprs eval_tidy
#' @importFrom purrr map_chr
#' @importFrom checkmate assertDataFrame assertNames assertCharacter
#' assertIntegerish assertSubset assertLogical testChoice assertChoice
#' assertFileExists assertClass assertTRUE testDataFrame testNames
#' @importFrom dplyr filter distinct
#' @importFrom stringr str_split
#' @importFrom tibble tibble
#' @export
regTable <- function(..., subset = NULL, dSeries = NULL, gSeries = NULL,
label = NULL, begin = NULL, end = NULL, schema = NULL,
archive = NULL, archiveLink = NULL, nextUpdate = NULL,
updateFrequency = NULL, metadataLink = NULL, metadataPath = NULL,
notes = NULL, update = FALSE, overwrite = FALSE){
# set internal paths
intPaths <- paste0(getOption(x = "adb_path"))
# get tables
inv_tables <- read_csv(paste0(intPaths, "/inv_tables.csv"), col_types = "iiiccccDccccc")
inv_dataseries <- read_csv(paste0(intPaths, "/inv_dataseries.csv"), col_types = "icccccc")
inv_geometries <- read_csv(paste0(intPaths, "/inv_geometries.csv"), col_types = "iicccccDDcc")
if(dim(inv_dataseries)[1] == 0){
stop("'inv_dataseries.csv' does not contain any entries!")
} else if(dim(inv_geometries)[1] == 0){
stop("'inv_geometries.csv' does not contain any entries!")
}
# make new tabID
newTID <- ifelse(length(inv_tables$tabID)==0, 1, as.integer(max(inv_tables$tabID)+1))
# in testing mode?
testing <- getOption(x = "adb_testing")
# check validity of arguments
assertNames(x = colnames(inv_tables),
permutation.of = c("tabID", "datID", "geoID", "source_file", "schema",
"orig_file", "orig_link", "download_date", "next_update",
"update_frequency", "metadata_link", "metadata_path", "notes"))
assertNames(x = colnames(inv_dataseries),
permutation.of = c("datID", "name", "description", "homepage",
"licence_link", "licence_path", "notes"))
assertNames(x = colnames(inv_geometries),
permutation.of = c("geoID", "datID", "source_file", "layer",
"label", "orig_file", "orig_link", "download_date",
"next_update", "update_frequency", "notes"))
assertCharacter(x = subset, any.missing = FALSE, null.ok = TRUE)
assertCharacter(x = dSeries, ignore.case = TRUE, any.missing = FALSE, len = 1, null.ok = TRUE)
assertCharacter(x = gSeries, ignore.case = TRUE, any.missing = FALSE, len = 1, null.ok = TRUE)
assertCharacter(x = label, any.missing = FALSE, len = 1, null.ok = TRUE)
assertIntegerish(x = begin, any.missing = FALSE, len = 1, lower = 1900, null.ok = TRUE)
assertIntegerish(x = end, any.missing = FALSE, len = 1, upper = as.integer(format(Sys.Date(), "%Y")), null.ok = TRUE)
assertClass(x = schema, classes = "schema", null.ok = TRUE)
assertCharacter(x = archive, any.missing = FALSE, null.ok = TRUE)
assertCharacter(x = archiveLink, any.missing = FALSE, null.ok = TRUE)
assertCharacter(x = nextUpdate, any.missing = FALSE, null.ok = TRUE)
assertCharacter(x = updateFrequency, any.missing = FALSE, null.ok = TRUE)
assertCharacter(x = metadataLink, any.missing = FALSE, null.ok = TRUE)
assertCharacter(x = metadataPath, any.missing = FALSE, null.ok = TRUE)
assertCharacter(x = notes, ignore.case = TRUE, any.missing = FALSE, len = 1, null.ok = TRUE)
assertLogical(x = update, len = 1)
assertLogical(x = overwrite, len = 1)
broadest <- exprs(..., .named = TRUE)
if(length(broadest) > 0){
mainPoly <- eval_tidy(broadest[[1]])
} else {
mainPoly <- ""
}
schemaName <- as.character(substitute(schema))
# ask for missing and required arguments
if(!is.null(subset)){
if(grepl(pattern = "_", x = subset)){
stop("please give a subset that does not contain any '_' characters.")
}
} else {
subset <- ""
}
if(is.null(dSeries)){
message("please type in to which data series this table belongs: ")
if(!testing){
dSeries <- readline()
} else {
dSeries <- "madeUp"
}
if(grepl(pattern = "_", x = dSeries)){
stop("please give a data series name that does not contain any '_' characters.")
}
if(!testing){
if(!any(inv_dataseries$name %in% dSeries)){
stop(paste0("please first create the new dataseries '", dSeries,"' via 'regDataseries()'"))
}
} else {
dataSeries <- NA_integer_
}
} else{
if(!any(inv_dataseries$name %in% dSeries)){
stop(paste0("please first create the new data table dataseries '", dSeries, "' via 'regDataseries()'"))
}
dataSeries <- inv_dataseries$datID[inv_dataseries$name %in% dSeries]
}
if(is.null(gSeries)){
message("please type in to which geometry series this table belongs: ")
if(!testing){
gSeries <- readline()
} else {
gSeries <- "gadm"
}
if(grepl(pattern = "_", x = gSeries)){
stop("please give a geometry series name that does not contain any '_' characters.")
}
if(!testing){
if(!any(inv_dataseries$name %in% gSeries)){
stop(paste0("! please first create the new geometry series '", gSeries,"' via 'regDataseries()' !"))
}
} else {
geomSeries <- NA_integer_
}
} else{
tempDatID <- inv_dataseries$datID[inv_dataseries$name %in% gSeries]
tempLabels <- map_chr(.x = inv_geometries$label,
.f = function(x){
str_split(tail(str_split(x, "\\|")[[1]], 1), "=")[[1]][1]
})
geomSeries <- inv_geometries$geoID[inv_geometries$datID %in% tempDatID & tempLabels == label]
if(length(geomSeries) < 1){
stop(paste0("! please first register geometries of the series '", gSeries,"' via 'regGeometries()' !"))
}
}
if(is.null(label)){
message("please type in the ontology label of the units: ")
if(!testing){
label <- readline()
} else {
label <- 1
}
if(is.na(label)){
label = NA_character_
}
}
if(is.null(begin)){
message("please type in the first year in the table: ")
if(!testing){
begin <- readline()
} else {
begin <- 1990
}
if(is.na(begin)){
begin = NA_integer_
}
}
if(is.null(end)){
message("please type in the last year in the table: ")
if(!testing){
end <- readline()
} else {
end <- 2017
}
if(is.na(end)){
end = NA_integer_
}
}
if(is.null(schema)){
message("please provide the schema description for this table: ")
if(!testing){
schema <- readline()
} else {
schema <- readRDS(file = paste0(intPaths, "/meta/schemas/example_schema.rds"))
}
if(length(schema) < 1){
schema = NA_character_
}
}
if(is.null(archive)){
message("please type in the archives' file name: ")
if(!testing){
archive <- readline()
} else {
archive <- "example_table.7z"
}
if(is.na(archive)){
archive = NA_character_
}
}
# put together file name and get confirmation that file should exist now
fileName <- paste0(mainPoly, "_", label, "_", subset, "_", begin, "_", end, "_", dSeries, ".csv")
filePath <- paste0(intPaths, "/adb_tables/stage2/", fileName)
fileArchive <- str_split(archive, "\\|")[[1]]
if(any(inv_tables$source_file %in% fileName)){
if(overwrite){
theSchemaName <- inv_tables$schema[inv_tables$source_file == fileName]
newTID <- inv_tables$tabID[which(inv_tables$source_file %in% fileName)]
} else {
return(paste0("'", fileName, "' has already been registered."))
}
} else {
theSchemaName <- paste0("schema_", newTID)
}
# make a schema description
write_rds(x = schema, file = paste0(intPaths, "/meta/schemas/", theSchemaName, ".rds"))
if(is.null(archiveLink)){
message("please type in the weblink from which the archive was downloaded: ")
if(!testing){
archiveLink <- readline()
} else {
archiveLink <- "https://gadm.org/downloads/example_geom.7z.html"
}
if(is.na(archiveLink)){
archiveLink = NA_character_
}
}
if(is.null(updateFrequency)){
message(paste("please type in the frequency in which the table gets updated \n -> select one of: continual, daily, weekly, fortnightly, quarterly, biannually, annually, asNeeded, irregular, notPlanned, unknown, periodic, semimonthly, biennially: "))
if(!testing){
updateFrequency <- readline()
while(!is.element(updateFrequency,
c("continual", "daily","weekly", "fortnightly",
"quarterly", "biannually", "annually", "asNeeded",
"irregular", "notPlanned", "unknown", "periodic",
"semimonthly", "biennially"))){
# test missing
message(paste(" -> input one of: continual, daily, weekly, fortnightly, quarterly, biannually, annually, asNeeded, irregular, notPlanned, unknown, periodic, semimonthly, biennially \n
please repeat: "))
updateFrequency <- readline()
}
} else {
updateFrequency <- "quarterly"
}
if(is.na(updateFrequency)){
# this might fail, there is no NA_Date_
# also, it should be impossible to land here
updateFrequency = as.Date(NA)
}
}
if(is.null(nextUpdate)){
if(updateFrequency %in% c("asNeeded", "notPlanned", "unknown")){
nextUpdate <- as.Date(NA)
} else {
message("please type in when the table gets its next update (YYYY-MM-DD): ")
if(!testing){
nextUpdate <- as.Date(readline(), "%Y-%m-%d")
} else {
nextUpdate <- as.Date("2019-10-01", "%Y-%m-%d")
}
if(is.na(nextUpdate)){
# this might fail, there is no NA_Date_
nextUpdate = as.Date(NA)
}
}
}
if(is.null(metadataLink)){
message(paste("if there is already metadata available:\n -> type in the weblink to the metadataset: "))
if(!testing){
metadataLink <- readline()
} else {
metadataLink <- "https://ec.europa.eu/eurostat/de/table1/metadata"
}
if(is.na(metadataLink)){
metadataLink = NA_character_
}
}
if(is.null(metadataPath)){
message(paste("if there was an existing metadataset downloaded:\n -> type in the local path to the metadataset: "))
if(!testing){
metadataPath <- readline()
} else {
metadataPath <- "C:/Users/arue/Projects/GeoKur/Luckinet/census/table1_meta.txt"
}
if(is.na(metadataLink)){
metadataPath = NA_character_
}
}
if(is.null(notes)){
notes = NA_character_
}
if(update){
# test whether the archive file is available
if(!testFileExists(x = paste0(intPaths, "/adb_tables/stage1/", fileArchive[1]))){
message(paste0("... please store the archive '", fileArchive[[1]], "' in './adb_tables/stage1'"))
if(!testing){
done <- readline(" -> press any key when done: ")
}
# make sure that the file is really there
assertFileExists(x = paste0(intPaths, "/adb_tables/stage1/", fileArchive[1]))
# ... and if it is compressed, whether also the file therein is given that contains the data
if(testCompressed(x = fileArchive[1]) & length(fileArchive) < 2){
message(paste0("please give the name of the file in ", fileArchive[1]," that contains the table: "))
if(!testing){
theArchiveFile <- readline()
} else {
theArchiveFile <- "example_table.csv"
}
archive <- paste0(archive, "|", theArchiveFile)
}
}
# test that the file is available
if(!testFileExists(x = filePath, extension = "csv")){
processedPath <- paste0(intPaths, "/adb_tables/stage2/processed/", fileName)
if(testFileExists(x = processedPath, extension = "csv")){
temp <- inv_tables[which(inv_tables$source_file %in% fileName), ]
message(paste0("! the table '", fileName, "' has already been normalised !"))
return(temp)
}
message(paste0("... please store the table as '", fileName, "' with utf-8 encoding in './adb_tables/stage2'"))
if(!testing){
done <- readline(" -> press any key when done: ")
}
# make sure that the file is really there
assertFileExists(x = filePath, extension = "csv")
}
# put together new census database entry
doc <- tibble(tabID = newTID,
geoID = geomSeries,
datID = dataSeries,
source_file = fileName,
schema = theSchemaName,
orig_file = archive,
orig_link = archiveLink,
download_date = Sys.Date(),
next_update = nextUpdate,
update_frequency = updateFrequency,
metadata_link = metadataLink,
metadata_path = metadataPath,
notes = notes)
if(!any(inv_tables$source_file %in% fileName) | overwrite){
# in case the user wants to update, attach the new information to the table inv_sourceData.csv
updateTable(index = doc, name = "inv_tables", matchCols = c("source_file"))
}
return(doc)
} else {
stage1Exists <- testFileExists(x = paste0(intPaths, "/adb_tables/stage1/", fileArchive[1]), "r")
stage2Exists <- testFileExists(x = filePath, "r", extension = "csv")
if(stage2Exists){
# thisTable <- as_tibble(read.csv(file = filePath, header = FALSE, as.is = TRUE, na.strings = schema@format$na, encoding = "UTF-8"))
thisTable <- read_csv(file = filePath, col_names = FALSE, col_types = cols(.default = "c"))
temp <- tryCatch(expr = reorganise(input = thisTable, schema = schema),
error = function(e){
return("There was an error message")
},
warning = function(w){
return("There was a warning message")
})
isTable <- testDataFrame(x = temp)
correctNames <- testNames(x = names(temp), must.include = names(schema@variables))
if(isTable & correctNames){
schema_ok <- "schema ok"
} else {
schema_ok <- temp
}
} else {
schema_ok <- "not checked"
}
diag <- tibble(stage1_name = fileArchive[1],
stage2_name = fileName,
schema_name = schemaName,
stage1_ok = stage1Exists,
stage2_ok = stage2Exists,
schema = schema_ok)
updateTable(index = diag, name = "diag_tables", matchCols = c("stage2_name"), backup = FALSE)
return(diag)
}
}
|
9a1b946f475a0cf560ea3b947481621562ca955b | fdca828567c20a6e09f766b6805f74f72f38937e | /multiple_models_pima.r | 8e95e9472ddb24c68fac2cfddebe212cab4cf5e4 | [] | no_license | rakato/NonInsulting-MachineLearning | bcd7d9ba6b6dbd5c4a1ac394c226e9bfcdc5d64c | defa660e0509a19104e01547b86f37c5beb93266 | refs/heads/master | 2021-09-27T16:21:56.427296 | 2018-11-09T15:40:54 | 2018-11-09T15:40:54 | 90,258,028 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,977 | r | multiple_models_pima.r |
set.seed(123)
library(mlbench)
library(caret)
library(corrplot)
library(fastAdaboost)#for running Adaboost from Caret package
data(PimaIndiansDiabetes)
str(PimaIndiansDiabetes)
summary(PimaIndiansDiabetes)
# correlation matrix
corrmatrix <- cor(PimaIndiansDiabetes[,1:8])
# summarize correlation matrix
corrplot(corrmatrix)
#Control the computational nuances of the train function
control <- trainControl(method="repeatedcv", number=10, repeats=3)
#method=The resampling method
#number=Either the number of folds or number of resampling iterations
#repeats= For repeated k-fold cross-validation only: the number of complete sets of folds to compute
# train the model
#The default method for optimizing tuning parameters in train is to use a grid search
model <- train(diabetes~., data=PimaIndiansDiabetes, method="lvq", preProcess="scale", trControl=control)
#ROC Curve for variable importance
importance <- varImp(model, scale=FALSE)
importance
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
# run the RFE algorithm
results <- rfe(PimaIndiansDiabetes[,1:8], PimaIndiansDiabetes[,9], sizes=c(1:8), rfeControl=control)
results
# list the chosen features
predictors(results)
# plot the results
plot(results, type=c("g", "o"))
#run a number of algos on Pima Indians dataset
#CART, LDA, SVM, kNN, Random Forest
#Control the computational nuances of the train function
control <- trainControl(method="repeatedcv", number=10, repeats=3)
set.seed(123)
# CART
#The default method for optimizing tuning parameters in train is to use a grid search
fit.cart <- train(diabetes~., data=PimaIndiansDiabetes, method="rpart", trControl=control)
# LDA
fit.lda <- train(diabetes~., data=PimaIndiansDiabetes, method="lda", trControl=control)
# SVM
fit.svm <- train(diabetes~., data=PimaIndiansDiabetes, method="svmRadial", trControl=control)
# kNN
fit.knn <- train(diabetes~., data=PimaIndiansDiabetes, method="knn", trControl=control)
# Random Forest
fit.rf <- train(diabetes~., data=PimaIndiansDiabetes, method="rf", trControl=control)
#Adaboost
fit.ada <- train(diabetes~., data=PimaIndiansDiabetes, method="adaboost", trControl=control)
#Stochastic Gradient Boosting
fit.gbm<- train(diabetes~., PimaIndiansDiabetes, method="gbm", trControl=control)
# collect resamples
results <- resamples(list(CART=fit.cart, LDA=fit.lda, SVM=fit.svm, KNN=fit.knn, RF=fit.rf, ADA=fit.ada))
#compare accuracy and kappa
summary(results)
#plot results
scales <- list(x=list(relation="free"), y=list(relation="free"))
#boxplots
bwplot(results, scales=scales)
#gbm with Sonar datset
library(gbm)
data("Sonar")
dataset<- Sonar
x<- dataset[,1:60] #using these variables
y<- dataset[,61] #solving for this variable
set.seed(123)
control<- trainControl(method="repeatedcv", number=10, repeats=3)
fit.gbmsonar<- train(Class~., Sonar, method="gbm", metric="Accuracy")
#plot accuracy vs boosting iterations and tree depth
plot(fit.gbmsonar, type=c("g","o"))
|
832b8ecf12ce0437ebb3d62a1433c8e143d3c2dc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hddtools/examples/catalogueGRDC.Rd.R | 02b45f943e329c99001c3f4c1a45858d35576695 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 848 | r | catalogueGRDC.Rd.R | library(hddtools)
### Name: catalogueGRDC
### Title: Data source: Global Runoff Data Centre catalogue
### Aliases: catalogueGRDC
### ** Examples
## Not run:
##D # Retrieve the whole catalogue
##D GRDC_catalogue_all <- catalogueGRDC()
##D
##D # Define a bounding box
##D areaBox <- raster::extent(-3.82, -3.63, 52.41, 52.52)
##D # Filter the catalogue based on bounding box
##D GRDC_catalogue_bbox <- catalogueGRDC(areaBox = areaBox)
##D
##D # Get only catchments with area above 5000 Km2
##D GRDC_catalogue_area <- catalogueGRDC(columnName = "area",
##D columnValue = ">= 5000")
##D
##D # Get only catchments within river Thames
##D GRDC_catalogue_river <- catalogueGRDC(columnName = "river",
##D columnValue = "Thames")
## End(Not run)
|
5ea83c0db7c2e702fd786e20dee74d2ab827057a | 15edb71c431bd3be69173495e875e1c34d9184b4 | /src/hclus.R | 2dc76eab8775b99caeeb032aa6b2dd8294a7cc4e | [] | no_license | johnmyleswhite/nyc_dca_licenses | 861fceb9e4a70ea44f27a1fb042666bcd75dcb21 | eb6708015e59579ef1ad8d642e3c274f1c296ad4 | refs/heads/master | 2016-09-05T23:32:22.130507 | 2012-10-23T20:05:10 | 2012-10-23T20:05:10 | 6,280,815 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,433 | r | hclus.R | # File-Name: hclus.R
# Date: 2012-10-23
# Author: Drew Conway
# Email: drew.conway@nyu.edu
# Purpose: Perform a hierarchical clustering of NYC liscense data
# Data Used: ../data/licenses.csv
# Packages Used: base
# Machine: Drew Conway's MacBook Pro
# Copyright (c) 2012, under the Simplified BSD License.
# For more information on FreeBSD see: http://www.opensource.org/licenses/bsd-license.php
# All rights reserved.
setwd("../") # Only required if you are running code directly from this file
library("ProjectTemplate")
load.project()
# Dissimilarty measures
# dissimilarityMatrix <- function(m1,m2) {
# dissimilarity.list <- list()
# for(i in 1:nrow(log.licenses)) {
# dissimilarity.list[[i]] <- sapply(1:nrow(log.licenses),
# function(j) {
# return(sqrt((log.licenses[i,m1] - log.licenses[j,m2])**2))
# })
# }
# return(do.call(rbind, dissimilarity.list))
# }
#
# ## Attempt 1: Root-squared difference in residential area and commericial area
# Need to take a random sample rows because (hot damn!)
num.rows <- 5000
row.sample <- sample(1:nrow(log.licenses), num.rows)
# Take the sample
license.sample <- log.licenses[row.sample,]
resarea.comarea <- dist(cbind(license.sample$comarea, license.sample$resarea), method="euclidian")
rc.hc <- hclust(resarea.comarea)
for(i in 3:12) {
license.sample[,paste("CUT", i, sep=".")] <- cutree(rc.hc, i)
}
rc.plot <- ggplot(license.sample, aes(x=x, y=y))+geom_point(aes(color=as.factor(CUT.5), size=0.01, alpha=0.25)) +
facet_wrap(~factor(CUT.5)) +
scale_color_brewer(type="qual", palette="Dark2", name="Cluster Partition") +
scale_size(limits=c(0,1), guide="none") +
scale_alpha(guide="none")
## Attempt 2: full feature set dissimilarites
full.feature <- log.licenses[1:2500,]
full.hc <- hclust(as.dist(dissimilarity))
for(i in 3:12) {
full.feature[,paste("CUT", i, sep=".")] <- cutree(full.hc, i)
}
full.plot <- ggplot(full.feature, aes(x=x, y=y))+geom_point(aes(color=as.factor(CUT.5))) +
scale_color_brewer(type="qual", palette="Dark2")
## Attempt 3: using business names
# business.sample <- business.names[row.sample,]
#
# business.hc <- hclust(dist(business.sample))
#
# for(i in 3:12) {
# business.sample[,paste("CUT", i, sep=".")] <- cutree(business.hc, i)
# }
|
1a790e64304d04c84684052414fc35a7f0ab0afa | f67642256737632b0e4a794af02f2df1aee726b8 | /man/plot.select_arima.Rd | f7d2cfe4ed770c2c3efb438246602924e7f92da9 | [] | no_license | SMAC-Group/exts | 0a430cc0df20e85903e55eb1ed5c8be76c3c6d8a | 0aa78daff83dd4dca9fc3e166afbd2a3d726966d | refs/heads/master | 2020-04-17T05:48:48.245078 | 2016-11-14T02:14:00 | 2016-11-14T02:14:00 | 67,654,379 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 527 | rd | plot.select_arima.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_arima.R
\name{plot.select_arima}
\alias{autoplot.select_arima}
\alias{plot.select_arima}
\title{Plot ARIMA Selection Matrix}
\usage{
\method{plot}{select_arima}(x, ...)
\method{autoplot}{select_arima}(object, ...)
}
\arguments{
\item{x, object}{An object that is either of type \code{\link{select_arima}}
or \code{\link{select_arma}}.}
}
\description{
Constructs a facet wrapped graph showing model selection
criteria over multiple terms
}
|
864678fd19284ecb279aca5c639015108b8c01a2 | 9a82d308a3a016dad81d583f2c1730b88a23b022 | /tests/testthat/test-print_sbo_predtable.R | c02516ced053e88bbcb38933835b10f9683dfab3 | [] | no_license | vgherard/sbo | 7dd13d3b566c21842178305ee3cd7c34229a9e49 | 9d8374b42d9700298a27b6a10dc88ce6fba1feb6 | refs/heads/master | 2023-06-12T13:40:51.369831 | 2021-07-07T13:49:55 | 2021-07-07T13:49:55 | 284,348,234 | 9 | 2 | null | 2020-11-25T10:55:47 | 2020-08-01T22:20:34 | R | UTF-8 | R | false | false | 596 | r | test-print_sbo_predtable.R | context("print.sbo_predtable")
test_that("returns invisibly the correct value", {
capture_output({
expect_invisible(print(twitter_predtable))
expect_identical(print(twitter_predtable), twitter_predtable)
},
print = F)
})
test_that("Prints simple title and an empty line followed by some text", {
output <- capture_output_lines(print(twitter_predtable), print = F)
expect_length(output, 3)
expect_identical(output[1], "A Stupid Back-Off prediction table.")
expect_identical(output[2], "")
}) |
b32ca16db5e0a742ac838a66e15f0677f45a895c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sandwich/examples/weightsAndrews.Rd.R | e292faed5ab0c3ae9d9bdde6dee5deb4d714a83e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,213 | r | weightsAndrews.Rd.R | library(sandwich)
### Name: weightsAndrews
### Title: Kernel-based HAC Covariance Matrix Estimation
### Aliases: weightsAndrews bwAndrews kernHAC
### Keywords: regression ts
### ** Examples
curve(kweights(x, kernel = "Quadratic", normalize = TRUE),
from = 0, to = 3.2, xlab = "x", ylab = "k(x)")
curve(kweights(x, kernel = "Bartlett", normalize = TRUE),
from = 0, to = 3.2, col = 2, add = TRUE)
curve(kweights(x, kernel = "Parzen", normalize = TRUE),
from = 0, to = 3.2, col = 3, add = TRUE)
curve(kweights(x, kernel = "Tukey", normalize = TRUE),
from = 0, to = 3.2, col = 4, add = TRUE)
curve(kweights(x, kernel = "Truncated", normalize = TRUE),
from = 0, to = 3.2, col = 5, add = TRUE)
## fit investment equation
data(Investment)
fm <- lm(RealInv ~ RealGNP + RealInt, data = Investment)
## compute quadratic spectral kernel HAC estimator
kernHAC(fm)
kernHAC(fm, verbose = TRUE)
## use Parzen kernel instead, VAR(2) prewhitening, no finite sample
## adjustment and Newey & West (1994) bandwidth selection
kernHAC(fm, kernel = "Parzen", prewhite = 2, adjust = FALSE,
bw = bwNeweyWest, verbose = TRUE)
## compare with estimate under assumption of spheric errors
vcov(fm)
|
fdf5f144f544b0b03883ea7b3fd203aea33692ae | f1e997b1db858011c8f6221d4466fe0b03aca77f | /man/ocean.Rd | dcd00a5f4fc07ede32924d7344b4787d66a078af | [] | no_license | cran/wavelets | 39b031e357a4875af0f70bd2d43972609fc9cb3c | 8365ac9d4a96b9286133ad5f255418163c1746c3 | refs/heads/master | 2020-03-25T17:42:43.374090 | 2020-02-17T18:40:02 | 2020-02-17T18:40:02 | 17,700,847 | 4 | 5 | null | null | null | null | UTF-8 | R | false | false | 753 | rd | ocean.Rd | \name{ocean}
\docType{data}
\alias{ocean}
\title{Vertical Ocean Sheer Measurements}
\description{
Measurements of vertical ocean sheer, obtained by dropping an
instrument veritcally into the ocean and recording observations at 0.1
meter intervals. Starting depth for the series is 350.0 meters and
ending depth is 1037.4 meters. For more detailed information regarding
the collection of this data, see Percival and Walden (2000), p. 193.
}
\usage{data(ocean)}
\format{A time series object containing 6875 observations.}
\source{http://www.ms.washington.edu/~s530/data.html}
\references{
Percival, D. B. and A. T. Walden (2000) \emph{Wavelet Methods for Time
Series Analysis}, Cambridge University Press, sec. 5.10.
}
\keyword{datasets}
|
5e1fb990fb5c1287989fe94e98a68955f209899a | 4f96a90b8fb44e20ff914785d547570bc736792b | /R/getOpenMap.R | f66738e450492ab60a6e0652c6b957377badf841 | [] | no_license | heibl/rornitho | d79e01a2570284290b682ae8ee10fd5b15495721 | 5997e0beaa096814f1ac55881689dd3f688ab2c9 | refs/heads/master | 2021-01-11T00:32:27.371266 | 2016-10-10T18:46:18 | 2016-10-10T18:46:18 | 48,445,074 | 1 | 2 | null | 2016-01-27T19:31:48 | 2015-12-22T17:27:17 | R | UTF-8 | R | false | false | 1,232 | r | getOpenMap.R | ## This code is part of the rornitho package
## © S. Thorn 2016 (last update 2016-01-26)
getOpenMap <- function(border, type){
border <- spTransform(border,
CRS("+proj=longlat +ellps=WGS84"))
xy <- getCorners(sp = border)
# compile background layer
#backgr <- matrix(c(xy$upperLeft, c(xy$upperLeft[1], xy$lowerRight[2]),
# xy$lowerRight, c(xy$lowerRight[1], xy$upperLeft[2])),
# ncol = 2, byrow = T)
#backgr <- backgr[,2:1]
#backgr <- Polygon(backgr)
#backgr <- Polygons(list(backgr),1)
#backgr <- SpatialPolygons(list(backgr))
#backgr@proj4string <- CRS("+proj=longlat +ellps=WGS84")
#backgr <- spTransform(backgr,
# CRS("+proj=tmerc +lat_0=0 +lon_0=9 +k=1 +x_0=3500000 +y_0=0 +datum=potsdam +units=m +no_defs +ellps=bessel +towgs84=598.1,73.7,418.2,0.202,0.045,-2.455,6.7"))
# retrieve open map and project according to border
map <- openmap(upperLeft = xy$upperLeft, lowerRight = xy$lowerRight,
type = type)
map <- openproj(map,
projection = CRS("+proj=tmerc +lat_0=0 +lon_0=9 +k=1 +x_0=3500000 +y_0=0 +ellps=bessel +units=m +no_defs"))
map
}
|
abaf35914f79027a80f8c57d5afdb3a201cd84ca | d8ddd7ef0bfa872dea2023b7d7abbf80b435f092 | /mixed_distribution_FFA/Code/MLE_mixedGEV.R | 31f7524c320ce022605a528cb91234c89eee51de | [] | no_license | pches/FFA-mixed-distributions | 63fb5807e21bf6fb8181599bc450b5f324a7294c | 443fab24eb9ab7425eaf2e77ab815a01d63c3849 | refs/heads/master | 2020-04-10T00:55:52.286987 | 2018-12-21T00:13:56 | 2018-12-21T00:13:56 | 160,700,276 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,834 | r | MLE_mixedGEV.R | #################################################################################
#
# -file = "MLE_mixedGEV.R" Code written June 2018
# - Author: Kenneth Joel Roop-Eckart (kjr30@psu.edu)
#
# - This code fits the Mixed GEV to the gage, historical, and paleo data
# by using four indpendently initiated runs of DEoptim to search for
# the maximum likelihood parameter values. The parameter estimate with
# the best likelihood value is accepted as the MLE.
#
# THIS CODE IS PROVIDED AS-IS WITH NO WARRANTY (NEITHER EXPLICIT
# NOT IMPLICIT). I SHARE THIS CODE IN HOPES THAT IT IS USEFUL,
# BUT I AM NOT LIABLE FOR THE BEHAVIOR OF THIS CODE IN YOUR OWN
# APPLICATION. YOU ARE FREE TO SHARE THIS CODE SO LONG AS THE
# AUTHOR(S) AND VERSION HISTORY REMAIN INTACT.
#
# To use this function, simply source this file:
# source("MLE_mixedGEV.R")
#
###################################################################################
################################### Run four DEoptim runs from different seeds ###########################
# load relevant libraries
library(DEoptim) # for DEoptim function
library(fExtremes) # for GEV functions
# start
iter <- 2500
set.seed(1)
MGEVrun1<-DEoptim(GEV_gage_hist_paleo_mixed_MLE_test,upper = c(3, 250, 500,3, 250, 500,0.9), lower = c(-4, 0, 0,-4, 0, 0, 0.6),
data = data,gage_uncertainty=gage_uncertainty, hist = hist,
hist_record = hist_record, hist_uncertainty=hist_uncertainty,
paleo_flow_upper = paleo_flow_upper, paleo_flow_lower = paleo_flow_lower,
paleo_age = paleo_age,paleo_age_uncertainty=paleo_age_uncertainty,
paleo_flow_uncertainty=paleo_flow_uncertainty, control = DEoptim.control(itermax = iter, NP = 500, F = 0.7, CR = 0.5))
set.seed(2)
MGEVrun2<-DEoptim(GEV_gage_hist_paleo_mixed_MLE_test,upper = c(3, 250, 500,3, 250, 500,0.9), lower = c(-4, 0, 0,-4, 0, 0, 0.6),
data = data,gage_uncertainty=gage_uncertainty, hist = hist,
hist_record = hist_record, hist_uncertainty=hist_uncertainty,
paleo_flow_upper = paleo_flow_upper, paleo_flow_lower = paleo_flow_lower,
paleo_age = paleo_age,paleo_age_uncertainty=paleo_age_uncertainty,
paleo_flow_uncertainty=paleo_flow_uncertainty, control = DEoptim.control(itermax = iter, NP = 500, F = 0.7, CR = 0.5))
set.seed(4)
MGEVrun4<-DEoptim(GEV_gage_hist_paleo_mixed_MLE_test,upper = c(3, 250, 500,3, 250, 500,0.9), lower = c(-4, 0, 0,-4, 0, 0, 0.6),
data = data,gage_uncertainty=gage_uncertainty, hist = hist,
hist_record = hist_record, hist_uncertainty=hist_uncertainty,
paleo_flow_upper = paleo_flow_upper, paleo_flow_lower = paleo_flow_lower,
paleo_age = paleo_age,paleo_age_uncertainty=paleo_age_uncertainty,
paleo_flow_uncertainty=paleo_flow_uncertainty, control = DEoptim.control(itermax = iter, NP = 500, F = 0.7, CR = 0.5))
set.seed(5)
MGEVrun5<-DEoptim(GEV_gage_hist_paleo_mixed_MLE_test,upper = c(3, 250, 500,3, 250, 500,0.9), lower = c(-4, 0, 0,-4, 0, 0, 0.6),
data = data,gage_uncertainty=gage_uncertainty, hist = hist,
hist_record = hist_record, hist_uncertainty=hist_uncertainty,
paleo_flow_upper = paleo_flow_upper, paleo_flow_lower = paleo_flow_lower,
paleo_age = paleo_age,paleo_age_uncertainty=paleo_age_uncertainty,
paleo_flow_uncertainty=paleo_flow_uncertainty, control = DEoptim.control(itermax = iter, NP = 500, F = 0.7, CR = 0.5))
############################## AIC ##################################
MGEVrun1_AIC <- 2*MGEVrun1$optim$bestval+2*length(MGEVrun1$optim$bestmem) # MGEV AIC
MGEVrun2_AIC <- 2*MGEVrun2$optim$bestval+2*length(MGEVrun1$optim$bestmem) # MGEV AIC
MGEVrun4_AIC <- 2*MGEVrun4$optim$bestval+2*length(MGEVrun1$optim$bestmem) # MGEV AIC
MGEVrun5_AIC <- 2*MGEVrun5$optim$bestval+2*length(MGEVrun1$optim$bestmem) # MGEV AIC
############################## BIC ###################################
MGEVrun1_BIC <- 2*MGEVrun1$optim$bestval+length(MGEVrun1$optim$bestmem)*
log(length(data)+length(hist)+length(paleo)) # MGEV BIC
MGEVrun2_BIC <- 2*MGEVrun2$optim$bestval+length(MGEVrun1$optim$bestmem)*
log(length(data)+length(hist)+length(paleo)) # MGEV BIC
MGEVrun4_BIC <- 2*MGEVrun4$optim$bestval+length(MGEVrun1$optim$bestmem)*
log(length(data)+length(hist)+length(paleo)) # MGEV BIC
MGEVrun5_BIC <- 2*MGEVrun5$optim$bestval+length(MGEVrun1$optim$bestmem)*
log(length(data)+length(hist)+length(paleo)) # MGEV BIC
############################# set run 1 chosen as MLE value due to having the best likelihood
MLE_MGEV_optim <- MGEVrun5
|
dee67a92fe9d85f56079cf567bf74673c767f8a9 | 99b38a7bc8dbca88c039a334829a2bce00194348 | /R/functions/calculate.roc.in.all.settings.R | df00d92878f3a765ed16f7d6cd10b650e027034a | [] | no_license | HOPE-UIB-BIO/RateOfChange | 4c2a288766425201f3f258050855b4c4eba322f4 | b28806c3e2682f95c481f40a42d6a84ede05175b | refs/heads/master | 2023-06-05T11:32:42.914896 | 2021-06-24T10:34:09 | 2021-06-24T10:34:09 | 236,754,706 | 1 | 1 | null | 2021-06-24T10:34:11 | 2020-01-28T14:30:49 | R | UTF-8 | R | false | false | 1,271 | r | calculate.roc.in.all.settings.R | .calculate.roc.in.all.settings <- function(data){
smooth_method <- c("none", "shep", "m.avg", "age.w" ,"grim")
DC_method <- c("chord", "chisq")
res_tibble <-
expand.grid(smooth_method, DC_method) %>%
as_tibble() %>%
rename(
"smooth_type" = Var1,
"DC" = Var2) %>%
mutate_if(is.factor,as.character)
res_tibble <-
res_tibble %>%
mutate(
ROC = purrr::map2(smooth_type, DC, function(x, y){
roc_score <-
RRatepol::fc_estimate_RoC(
data_source_community = data$community_data,
data_source_age = data$age_data,
age_uncertainty = data$uncertainity_data,
smooth_method = x,
smooth_N_points = 5,
smooth_N_max = 9,
smooth_age_range = 500,
Working_Units = "MW",
rand = roc_n_rand,
standardise = T,
N_individuals = pollen_grains,
DC = y,
treads = T,
interest_threshold = age_lim,
time_standardisation = 500,
Debug = F)
roc_peak <-
RRatepol::fc_detect_peak_points(roc_score,method = "trend_non_linear")
return(roc_peak)
})
)
return(res_tibble)
}
|
35e0238109dab2d02bf37d112bc84885bc6e4d71 | 8c00cd629578d93230602be5be49d035240c6e4c | /man/MapAPAc.Rd | c77118031e9f2701496a37aea7a566393b38cc0f | [] | no_license | HHengUG/hatpal2 | d24bed94177b0d2e0e64ea1e348970dbd7fc0087 | f3cd54887feb52a408212d0673b01bd113ad94af | refs/heads/main | 2023-04-18T17:43:42.350748 | 2021-05-08T08:44:14 | 2021-05-08T08:44:14 | 363,807,829 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 973 | rd | MapAPAc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapAPAc.R
\name{MapAPAc}
\alias{MapAPAc}
\title{Map APAc with prebed to count region}
\usage{
MapAPAc(
RE_prebed,
SA_prebed,
APAc_file,
out_dir,
blocksize = 250,
APAdist_cut = 25,
SA_weight = 3
)
}
\arguments{
\item{RE_prebed}{RE file of one strand generated from ExtractAPA()}
\item{SA_prebed}{SA file of one strand generated from ExtractAPA()}
\item{APAc_file}{APA clusters file from FilterAPAc()}
\item{out_dir}{The directory where the output files are stored}
\item{blocksize}{Threshold for isolated nodes and clusters (default 250)}
\item{APAdist_cut}{Threshold for clustering the APA (default 25)}
\item{SA_weight}{The weight increase of SA file (default 3)}
}
\value{
NULL, writes the output "APA_map.out" in the out_dir.
}
\description{
Map APAc with prebed to count region
}
\examples{
MapAPAc("rc_end.prebed", "sa.prebed", "APA_clusters.filtered.out", "output/")
}
|
ee7acd3245aafa423580fa7dfa237474d2ea44a3 | eb16d1b115216884fb4b95b0b1293fb6fdd6e9d9 | /utils.R | 8f895bfab6aaf43ba77cef2175e817dfd349c6bd | [] | no_license | BacotRaphael/odk-audit | b3c2c555534193faed9073fb37fe47414d99e08c | 8b84f309c3e6b42f2f791da88dbcc421b02c4607 | refs/heads/main | 2023-05-27T03:40:15.845730 | 2021-03-03T07:37:52 | 2021-03-03T07:37:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,611 | r | utils.R | load.audit.files <- function(directory.audits){
audit.filenames <- list.files(directory.audits, pattern="audit.csv", recursive=TRUE, full.names=TRUE)
print(paste("Loading",length(audit.filenames),"audit logs"))
for (filename in audit.filenames){
# get uuid from filename
sp <- strsplit(filename, "\\/")[[1]]
uuid <- sp[length(sp)-1]
# load file
audit <- read.csv(filename, stringsAsFactors = FALSE) %>% mutate(uuid=uuid, .before=1)
if (filename==audit.filenames[1]) res <- audit
else res <- rbind(res, audit)
}
res <- res %>%
mutate(duration=(end-start)/1000,
question=sapply(str_split(node, '\\/'), function(x){
return(x[length(x)])})) %>%
mutate(event=str_replace_all(event, " ", "."))
return(res)
}
load.audit.files.msna <- function(directory.audits){
audit.filenames <- list.files(directory.audits, pattern="audit.csv", recursive=TRUE, full.names=TRUE)
print(paste("Loading",length(audit.filenames),"audit logs"))
for (filename in audit.filenames){
# get uuid from filename
sp <- strsplit(filename, "\\/")[[1]]
uuid <- sp[length(sp)-1]
# load file
audit <- read.csv(filename, stringsAsFactors = FALSE) %>% mutate(uuid=uuid, .before=1)
if (filename==audit.filenames[1]) res <- audit
else res <- rbind(res, audit)
}
res <- res %>%
mutate(duration=(end-start)/1000,
question=sapply(str_split(node, '\\/'), function(x){
return(x[length(x)])}),
section=sapply(str_split(node, '\\/'), function(x){
s <- x[3]
return(ifelse(grepl("[", s, fixed = TRUE),
str_split(s, '\\[')[[1]][1],
s))})) %>%
mutate(question=ifelse(!is.na(section)&is.na(question), section, question)) %>%
mutate(event=str_replace_all(event, " ", "."))
return(res)
}
load.survey.data <- function(directory.survey.data, col.uuid, col.enum){
filename <- list.files(directory.survey.data, pattern=".xlsx|csv", all.files=TRUE, recursive=TRUE, full.names=TRUE)
if (length(filename) > 1) stop("The directory survey_data must contain only 1 file.")
if (grepl(".xlsx", filename)) survey.data <- read_xlsx(filename)
else if (grepl(".csv", filename)) survey.data <- read_csv(filename)
else stop("Survey data should either be a .xlsx or .csv file.")
survey.data <- survey.data %>%
mutate(uuid=!!sym(col.uuid),
enum=str_replace_all(tolower(!!sym(col.enum)), " ","")) %>%
select(uuid, enum) %>% distinct()
return(survey.data)
}
get.summary.general <- function(audit){
survey.duration <- audit %>%
group_by(uuid) %>%
summarise(event="survey.duration", n=(max(start)-min(start))/1000)
survey.info.question <- audit %>%
filter(event %in% c("question", "group.questions")) %>%
group_by(uuid) %>%
summarise(survey.response.time=sum(duration, na.rm=TRUE),
num.unique.questions=length(unique(question)),
num.unique.nodes=length(unique(node)),
avg.edits.per.question=round(n()/length(unique(question)), 2))
survey.info.question.pivot <- pivot_longer(survey.info.question,
cols = c("survey.response.time", "num.unique.questions", "num.unique.nodes", "avg.edits.per.question"),
names_to = "event",values_to = "n")
summary.general <- audit %>%
group_by(uuid, event) %>%
summarise(n=n()) %>%
rbind(survey.duration, survey.info.question.pivot)
return(summary.general)
}
get.summary.section <- function(audit){
summary.section <- audit %>% filter(node!="") %>%
group_by(uuid, section) %>% summarise(tot.duration=sum(duration, na.rm=TRUE)) %>%
mutate(tot.duration.log10=log10(tot.duration)) %>%
group_by(section) %>%
summarise(num.surveys=n(),
mean=mean(tot.duration.log10),
sd=sd(tot.duration.log10),
median.response.time=round(median(tot.duration),2)) %>%
mutate(section=factor(section, levels = as.character(section)))
return(summary.section)
}
get.summary.question <- function(audit){
summary.question <- audit %>% filter(node!="") %>%
group_by(uuid, question) %>% summarise(tot.duration=sum(duration, na.rm=TRUE)) %>%
mutate(tot.duration.log10=log10(tot.duration)) %>%
group_by(question) %>%
summarise(num.surveys=n(),
mean=mean(tot.duration.log10),
sd=sd(tot.duration.log10),
median.response.time=round(median(tot.duration),2))
return(summary.question)
} |
e5a5d1b4a1d574273262bb10ec796cd422decd8b | 1d043bca6e34da5e1e54d5d8e113d01b245048c4 | /R/lgi.helpers.R | 9be532014466ad39f14fe5763b98b4370dbbd1f2 | [] | no_license | wvengen/Rlgi | 406526b13b397fe25a8a67087e6ff90503f70055 | 404d48434fce5fb11f32989515ebdc33b065d752 | refs/heads/master | 2021-01-25T06:05:40.261187 | 2012-03-01T11:04:42 | 2012-03-01T11:04:42 | 1,541,778 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,931 | r | lgi.helpers.R | # $Id$
#determines how to split input data
lgi.split <- function (x, ncl) {
# The version in snow of splitrows uses 'F' instead of 'FALSE' and
# so, causes errors in R CMD check
if(is.matrix(x) || is.data.frame(x))
lapply(splitIndices(nrow(x), ncl), function(i) x[i, , drop = FALSE])
else if(is.list(x) || is.vector(x))
lapply(splitIndices(length(x), ncl), function(i) as.array(x[i]))
else {
warning("Type not allowed to be split.")
return(NULL)
}
}
# create a new Curl handle or return an existing one
lgi.curl.getHandle <- function() {
if (!exists('lgi.curl.handle')) {
assign('lgi.curl.handle', getCurlHandle(
cainfo=getOption("lgi.cacert"),
sslcert=getOption("lgi.certificate"),
sslkey=getOption("lgi.privatekey")
), envir=.GlobalEnv)
}
return(lgi.curl.handle)
}
# POSTs an HTTPS request to the LGI project server
lgi.request <- function(apipath, variables=c(), files=c(), path=NA, trace=getOption("lgi.trace")) {
data <- as.list(variables)
if (length(files)>0) {
for (i in 1:length(files)) {
data[[paste('uploaded_file_',i,sep='')]] = fileUpload(files[i])
}
}
headers <- c(
'Accept' = 'text/plain',
'Connection' = 'keep-alive'
)
if (is.na(path)) path <- paste(getOption("lgi.server"), apipath, sep='')
return(postForm(path, .params=data, style='httppost', curl=lgi.curl.getHandle(), .opts=list(
verbose=as.logical(trace)
)))
}
# return XML document containing job information
lgi.qstat <- function(jobid=NULL, trace=getOption("lgi.trace")) {
args <- c(
'project' = getOption('lgi.project'),
'user' = getOption('lgi.user'),
'groups' = getOption('lgi.groups'),
'job_id' = jobid
)
result <- lgi.request('/interfaces/interface_job_state.php', args, trace=trace)
result <- xmlRoot(xmlTreeParse(result, asText=TRUE))
result <- result[["response"]]
if (!is.null(result[["error"]])) stop(xmlValue(result[["error"]][["message"]]))
return(result)
}
# return job status string from XML result (qsub/qstat)
lgi.job.status <- function(xml) {
if (xmlName(xml)=="response") xml <- xml[["job"]]
return(xmlValue(xml[["state"]]))
}
# return job id from XML result (qsub/qstat)
lgi.job.id <- function(xml) {
if (xmlName(xml)=="response") xml <- xml[["job"]]
return(as.integer(xmlValue(xml[["job_id"]])))
}
# return job title from XML result (qsub/qstat), if any
lgi.job.title <- function(xml) {
if (xmlName(xml)=="response") xml <- xml[["job"]]
return(xmlValue(xml[["job_specifics"]][["title"]]))
}
# return repository url from XML result (qsub/qstat)
lgi.job.repourl <- function(xml) {
if (xmlName(xml)=="response") xml <- xml[["job"]]
return(xmlValue(xml[["job_specifics"]][["repository_url"]]))
}
# return job input from XML result (qsub/qstat)
lgi.job.input <- function(xml) {
if (xmlName(xml)=="response") xml <- xml[["job"]]
return(lgi.hexbin(xmlValue(xml[["input"]])))
}
# return output from XML result (qstat)
lgi.job.output <- function(xml) {
if (xmlName(xml)=="response") xml <- xml[["job"]]
return(lgi.hexbin(xmlValue(xml[["output"]])))
}
# submit LGI job directly, return XML node containing job information
# jobSpecifics can be either a string, or a list which will be converted to xml
lgi.qsub <- function(rcode, application=NA, files=c(), targetResources='any', jobSpecifics=NA, writeAccess=NA, readAccess=NA, trace=getOption("lgi.trace")) {
if (is.na(application))
application=getOption('lgi.application')
if (!is.null(attributes(jobSpecifics)))
jobSpecifics <- paste(mapply(function(k,v){toString(xmlNode(k,v))}, k=names(jobSpecifics), v=jobSpecifics), collapse='')
args <- na.omit(c(
'project' = getOption('lgi.project'),
'user' = getOption('lgi.user'),
'groups' = getOption('lgi.groups'),
'application' = application,
'target_resources' = targetResources,
'job_specifics' = jobSpecifics,
'write_access' = writeAccess,
'read_access' = readAccess,
'input' = lgi.binhex(rcode),
'number_of_uploaded_files' = length(files)
))
result <- lgi.request('/interfaces/interface_submit_job.php', args, files, trace=trace)
# parse output
result <- xmlRoot(xmlTreeParse(result, asText=TRUE))
result <- result[["response"]]
if (!is.null(result[["error"]])) stop(xmlValue(result[["error"]][["message"]]))
return(result)
}
# retrieve filenames from repository to current directory (reads full file into memory)
lgi.file.get <- function(repo, files, trace=getOption('lgi.trace')) {
for (fn in files) {
# I've tried using curlPerform with file=CFILE(fn, mode='wb')@ref
# but the last chunck would not be written until R exit and I've
# found no way to close the CFILE.
content <- getBinaryURL(paste(repo, fn, sep='/'),
curl=lgi.curl.getHandle(),
verbose=as.logical(trace)
)
f = file(fn, 'wb')
writeBin(content, f)
close(f)
}
}
# upload files to repository
lgi.file.put <- function(repo, files, trace=getOption('lgi.trace')) {
for (fn in files) {
result <- curlPerform(url=paste(repo, basename(fn), sep='/'),
upload=TRUE,
readdata=CFILE(fn, mode='rb')@ref,
infilesize=file.info(fn)$size,
curl=lgi.curl.getHandle(),
verbose=as.logical(trace)
)
}
}
# list files in repository
lgi.file.list <- function(repo, trace=getOption('lgi.trace')) {
# parse components of url
server <- sub('[^/]+$', '', repo)
dir <- sub('^.*/', '', repo)
# do request
result <- getURL(paste(server, '../repository_content.php?repository=', dir, sep=''),
curl=lgi.curl.getHandle(),
verbose=as.logical(trace)
)
# return result
result <- xmlRoot(xmlTreeParse(result, asText=TRUE))
files <- c()
for (r in xmlChildren(result)) {
files[[ xmlAttrs(r)[['name']] ]] = xmlApply(r, xmlValue)
}
return(files)
}
lgi.binhex <- function(b) {
# there must be a better way ... please let me know!
return(paste(sapply(unlist(strsplit(b,'')), function(x){sprintf('%02x',as.integer(charToRaw(x)))}), collapse=''))
}
lgi.hexbin <- function(h) {
return(rawToChar(as.raw(sapply(substring(h,seq(1,nchar(h),2),seq(2,nchar(h),2)), function(x){strtoi(x,16)}))));
}
# this function is mainly for testing, it doesnt logically serve a purpose
# because it is rarely a good job to block for a single submission
# it will always be slower, the only use case is if the submission machine is loaded.
lgi.run <- function(..., debug=getOption("lgi.debug"), trace=getOption("lgi.trace")) {
jobid <- lgi.submit(..., debug=debug, trace=trace)
status <- lgi.job.status(lgi.qstat(jobid, debug=debug, trace=trace))
while(status %in% c("queued", "scheduled", "running")) {
Sys.sleep(4)
status <- lgi.job.status(lgi.qstat(jobid, debug=debug, trace=trace))
cat("Job status:", status, "\r")
}
cat("\n")
if (status!="finished") stop("Job did not finish succesfully")
return(lgi.result(jobid, debug=debug, trace=trace))
}
|
a1795654133c9a64520ce355deba32984e42830f | b783e9d1cd71114b962d8498c9fea549db9e57e1 | /_site/articles-pdf-bak/rmr2/rmr2_lib.R | 3e01bb30aded82863a0934b750e976e7a2f6a689 | [
"MIT",
"WTFPL",
"OFL-1.1",
"GPL-1.0-or-later",
"BSD-3-Clause",
"CC-BY-3.0-US"
] | permissive | micheleusuelli/micheleusuelli.github.io | eccce53dac2c197cdd23ef2d65df4336a999654c | c6199af2a56ceee235d25e4c1cfd5ddbfd39fc55 | refs/heads/master | 2023-01-22T21:32:05.702223 | 2023-01-14T19:17:12 | 2023-01-14T19:17:12 | 93,247,084 | 2 | 1 | MIT | 2023-01-13T18:29:13 | 2017-06-03T12:35:06 | HTML | UTF-8 | R | false | false | 888 | r | rmr2_lib.R |
library(data.table)
keyval <- function(key, val){
list(key = key, val = val)
}
mapreduce <- function(
input,
map,
reduce,
chunk_size = 10
) {
# prepare the input
if(is(input, "data.frame")){
table_input <- data.table(input)
table_input[, chunk := floor(seq(1, .N) / chunk_size)]
table_map <- table_input[
,
map(NULL, data.frame(.SD, stringsAsFactors = FALSE)),
by = chunk]
} else if(is(input, "list")){
map_outputs <- lapply(input, function(input_el){
map(input_el$key, input_el$val)
})
# shuffle
list_to_shuffle <- lapply(map_outputs, function(map_el){
data.frame(map_el, stringsAsFactors = FALSE)
})
table_map <- data.table(do.call(rbind, list_to_shuffle))
}
# reduce
table_reduce <- table_map[, reduce(key, val), by = key]
keyval(table_reduce[, key], table_reduce[, val])
} |
26fd0c82f39d9366487ec3e47a3e88108de2fb1a | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/13469_0/rinput.R | 285fff258df203f6c0c3880c1657ae2f64da27ee | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("13469_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13469_0_unrooted.txt") |
b7792017888e2028d8609774d9b0ce6b70a1e435 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/9300_0/rinput.R | c10f91f61681b3d9b587c380863f92341e785dcb | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("9300_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9300_0_unrooted.txt") |
3c143c001325350875f617afb76b9a44ce0073f2 | aabebe5c761be602ee434a0348bef446b4933683 | /Module 3/M3T2/M03_T02_E19_Vectors.R | 9352494377bd8e45c06bbc3c2f4169d89e3af48f | [] | no_license | Stephyj2/R-Programming | d2d927e9dcd2846d42247411580d063b99477a9f | f332649dc8f6f86fcdaefb80cb994defc515f6c0 | refs/heads/master | 2022-12-13T08:58:03.897528 | 2020-07-31T08:05:30 | 2020-07-31T08:05:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,579 | r | M03_T02_E19_Vectors.R | # Vectors
# Vector hierarchy in R
v_logical <- c(T,F,T) # logical vector
v_logical
is.vector(v_logical)
is.atomic(v_logical)
typeof(v_logical)
v_integer <- c(1L,2L,5L) # integer vector
v_integer
is.vector(v_integer)
is.atomic(v_integer)
typeof(v_integer)
v_double <- c(1.3,2.1,5.0) # double vector
v_double
is.vector(v_double)
is.atomic(v_double)
typeof(v_double)
v_character <- c("a", "b", "c") # character vector
v_character
is.vector(v_character)
is.atomic(v_character)
typeof(v_character)
v_NULL <- NULL # NULL
v_NULL
typeof(v_NULL)
# Mix type vector (type coercion or conversion)
v_mix <- c(T, 1L, 1.25, "a")
v_mix # all elements converted to charatcers (based on hierarchy)
is.vector(v_mix)
typeof(v_mix)
# Vector properties
v <- c(1,2,3,4,5)
# vector length
length(v)
# type
typeof(v)
class(v)
# naming elements
names(v) # without names
vnames <- c("first", "second", "third", "fourth", "fifth") # element names
names(v) <- vnames # naming elements
v
names(v) # new names
# Create vector, access elements, modify vector
# create using c()
v <- c(1,3,5,8,0)
# create using operator :
1:100
10:-10
# using sequence seq()
v <- seq(from = 1, to = 100, by = 1)
v
v <- seq(from = 0, to = 1, by = 0.01)
v
v <- seq(from = 0, to = 10, length.out = 5)
v
# let's create a vector for accessing vector elements
v <- 1:10
names(v) <- c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j")
v
# access vector elements using integer vector index
v[c(1,5,10)]
v[1:5] # range index selection (slicing)
v[seq(from = 1, to = 9, by = 2)]
v[10:1] # reverse order selection
v[c(10,1,5,3)] # mix orfer selection
# access vector elements using logical vector index
v[c(T,F,F,F,F,F,F,F,F,F)] # access first element
v[c(F,F,F,F,F,F,F,T,T,T)] # access last three elements
# access elements using names
v[c("a","c","e")]
v[c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j")]
# modify vector elements
v
v[2] <- 20 # alter second element
v
v[c(1,5,10)] <- c(0,0,0) # alter multiple elements
v
# modify elements with value 0
v
v[v==0] # filter with condition
v[v==0] <- 1000
v
# truncate vector to first 3 elements
v <- v[1:3]
v
# transpose vector change row to column vector or vice versa
v
t(v)
# delete or remove a vector
v <- NULL
v
rm(v)
# combine 2 different vectors
v1 <- 1:3
v2 <- 100:105
v1
v2
v3 <- c(v1,v2) # combine vectors
v3
# repet elements of a vector
rep(x = v1, times = 2)
rep(x = v1, times = 100)
rep(10,10)
# Vector arithmetics
# vector - scalar (scalar with each vector element)
v <- 1:5
a <- 10
v
a
# Addition +
v + a
# Subtraction -
v - a
# Multiplication *
v * a
# Division /
v / a
# Exponent ^ **
v^a
# Modulus (Remainder from division) %%
v %% 2
# Integer Division %/%
v %/% 2
# Other functions on vector elements
sqrt(v)
log(v)
sum(v)
# vector - vector (vector element to element | member-by-member)
v1 <- seq(10,30,10)
v2 <- rep(3,3)
# Addition +
v1 + v2
# Subtraction -
v1 - v2
# Multiplication *
v1 * v2
# Division /
v1 / v2
# Exponent ^ **
v1^v2
# Modulus (Remainder from division) %%
v1 %% v2
# Integer Division %/%
v1 %/% v2
# Vector-matrix style multiplication
v1
v2
10*3 + 20*3 + 30*3
t(v1) %*% v2
v1 %*% v2
v1 %*% t(v2)
# Recycling rule
v1 <- c(1,1,1)
v2 <- 1:6
v1
v2
v1 + v2
# Set operations
v1 <- c("a", "b", "c")
v2 <- c("c", "d", "e")
v1
v2
union(v1,v2) # union of both sets (all unique elements)
intersect(v1,v2) # intersection of both sets (elements in both sets)
setdiff(v1,v2) # difference of elements (elements in v1 and not in v2)
identical(v1, v2) # check if vectors are identical
identical(c(1,2,3), c(1,2,3))
|
7845711cba081b512359d7a15036fad1ef9f0d4e | d3f9da41531daed9649cd22460d8a12054193599 | /pkg/R/utility_fix_p.R | cc9de69557b9c5e15dd576d50763c036eae7f58b | [] | no_license | johnros/ParalSimulate | f60a34c0809f465101435e4a57c89a4baa1f3d78 | 2788562ad4512b4ec02acfd42d7568941c8ea7ba | refs/heads/master | 2021-01-15T14:35:58.785414 | 2016-01-28T09:17:39 | 2016-01-28T09:17:39 | 28,912,845 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,474 | r | utility_fix_p.R |
# First order approximation of MSE for OLS in fix_p:
ApproxMSE_OLS_fixp <- function(n,p,m,sigma.sq){
p/(n*m)*sigma.sq
}
## Testing:
# ApproxMSE_OLS_fixp(100,10,2, 1)
# Second order approximation of MSE for OLS in fix_p fix_N:
ApproxMSE_OLS_fixp2.matrix <- function(n, p, m, sigma.sq, Sigma=diag(p)){
Sigma.inv <- solve(Sigma)
I <- diag(p)
zeta0 <- 0
zeta1 <- sigma.sq * Sigma.inv
zeta2 <- - sigma.sq * Sigma.inv * (1+p)
zeta3 <- sigma.sq * Sigma.inv * (1+p)
zeta4 <- sigma.sq * Sigma.inv * (1+p)
N1 <- m*n
N2 <- m*n^2
M <- (m-1)/(m*n^2) * zeta0 + 1/N1 * zeta1 + 1/N2 * (2* zeta2 + zeta3+ 2*zeta4)
return(M)
}
## Testing:
# ApproxMSE_OLS_fixp2.matrix(n=100, p=10, m=3, sigma.sq=1 )
ApproxMSE_OLS_fixp2 <- function(...){
Trace(ApproxMSE_OLS_fixp2.matrix(...))
}
## Testing:
# ApproxMSE_OLS_fixp2(n=100, p=10, m=3, sigma.sq=1)
# First order approximation of MSE for Ridge in fix_p:
ApproxMSE_Ridge_fixp <- function(n, p, m, lambda, sigma.sq, beta){
if(is.list(beta)) beta <- unlist(beta)
( (p+1) * (lambda)^2/(1+lambda)^4 * SSQ(beta) + (p * sigma.sq) ) / (n*m)
}
## Testing:
# ..p <- 1e1
# ApproxMSE_Ridge_fixp(n = 1e3, p = ..p, lambda = 2, beta = runif(..p))
# Second order approximation of MSE for Ridge in fix_p:
ll <- function(lambda,x,y){
lambda^x/(1+lambda)^y
}
ApproxMSE_Ridge_matrix <- function(n, p, m, lambda, sigma.sq, beta){
B <- outer(beta, beta)
I <- diag(p)
A <- SSQ(beta) * I
l <- function(x,y) ll(lambda,x,y)
zeta0 <- l(2,6) * (p+1)^2 * B
zeta1 <- l(2,4)*(B+A) + l(0,2)*sigma.sq*I
zeta2 <- - l(2,5)*(B*(p+4)+A*(p+2))- l(0,3)*(p+1)*sigma.sq*I
zeta3 <- l(2,6)*B*(p^2+p+5) + l(2,6)*A*(p+2)+ l(0,4)*sigma.sq*(p+1)*I
zeta4 <- l(2,5)*B*(2*p+5) + l(2,5)*A*(2*p+3) + l(0,3)*sigma.sq*(p+1)*I
N1 <- m*n
N2 <- m*n^2
M <- (m-1)/(m*n^2) * zeta0 + 1/N1 * zeta1 + 1/N2 * (2* zeta2 + zeta3+ 2*zeta4)
return(M)
}
## Testing:
# sum(diag(ApproxMSE_Ridge_matrix(100,10,5,2,1,rnorm(10))))
ApproxMSE_Ridge_fixp2 <- function(n, p, m, lambda, sigma.sq, beta){
if(is.list(beta)) beta <- unlist(beta)
# Return trace of second order error matrix
sum(diag(ApproxMSE_Ridge_matrix(n, p, m, lambda, sigma.sq, beta)))
}
## Testing:
# ApproxMSE_Ridge_fixp2(100,10,5,2,1,rnorm(10))
# Generate true parameters
makeBetasRandom <- function(p, beta.norm){
beta <- rnorm(p)
beta <- beta / sqrt(beta %*% beta) * beta.norm
return(beta)
}
## Testing:
# makeBetasRandom(100)
makeBetasDeterministic <- function(p, beta.norm){
beta <- 1:p
beta <- beta / sqrt(beta %*% beta) * beta.norm
return(beta)
}
## Testing:
# makeBetasDeterministic(100)
makeTest <- function(reps=1e1,
m=c(1e1,1e2),
p=5e1,
n=c(1e2,1e3),
kappa=0.1,
lambda=2,
model=my.ols){
.reps <<- reps
.m <<- m
.p <<- p
.n <<- n
.lambda<<- lambda
.kappa <<- kappa
.model <<- model
}
## Testing
# makeTest()
# .lambda
# Return a frame with all simulation configurations
makeConfiguration <- function(reps,
m,
p=NA,
kappa=NA,
n,
lambda=NA,
model,
link=identity,
sigma=1,
beta.maker,
beta.norm,
beta.star.maker,
data.maker,
name){
if(is.na(p) && is.na(kappa)) stop('Either p or kappa are required!')
if(!is.na(p) && !is.na(kappa)) stop('Only p or kappa are required!')
configurations.frame <- expand.grid(replications=reps,
m=m,
p=p,
kappa=kappa,
n=n,
model=list(model),
link=c(link),
sigma=sigma,
data.maker=c(data.maker),
lambda=lambda,
name=name,
beta.norm=beta.norm )
if(is.na(p)) {
configurations.frame %<>% mutate(p=n*kappa)
}
configurations.frame %<>% filter(p<n)
configurations.frame %<>% mutate(N=n*m)
configurations.frame$beta <- lapply(configurations.frame$p,
beta.maker, beta.norm=beta.norm)
configurations.frame$beta.star <- lapply(configurations.frame$beta,
beta.star.maker, lambda=lambda)
# Add theoretical performances
return(configurations.frame)
}
## Testing:
# OLS:
# makeTest()
# .configurations <- makeConfiguration(reps = .reps, m = .m, p = .p, n = .n, kappa = .kappa, lambda = 2, model = .model, link = identity, sigma = 1, beta.maker = makeBetasRandom, beta.star.maker = identityBeta, data.maker = makeRegressionData)
# .configurations %>% dim
# .configurations %>% names
# str(.configurations)
# .configurations$beta
# Ridge:
# makeTest()
# .configurations <- makeConfiguration(reps = .reps, m = .m, p = .p, n = .n, kappa = .kappa, lambda = 2, model = .model, link = identity, sigma = 1, beta.maker = makeBetasRandom, beta.star.maker = identityBeta, data.maker = makeRegressionData)
# .configurations %>% dim
# .configurations %>% names
# str(.configurations)
# .configurations$beta
# Take betas and make sample:
## betas are assumed to have unit variance.
makeRegressionData <- function(p, N, beta, link, sigma,...){
# Deal with call from do.call where link is a list:
if(is.list(link)) link <- link[[1]]
if(is.list(beta)) beta <- beta[[1]]
# Generate data:
X <- matrix(rnorm(p*N), nrow=N, ncol=p)
linear.effect <- X %*% beta
y <- link(linear.effect) + rnorm(N, 0, sd=sigma)
result <- list(y=y, X=X)
return(result)
}
## Testing:
# .p <- 1e2
# .N <- 1e4
# .betas <- makeBetas(p = .p)
# makeRegressionData(.p, .N, .betas, identity, 1)
# do.call(makeRegressionData, configurations[1,])
analyzeParallel <- function(data, m, model, N, p, ...){
y <- data$y
X <- data$X
FITTER <- model$fitter
COEFS <- model$coefs
## Centralized Solution:
the.fit <- FITTER(y=y, x=X, ...)
center.coefs <- COEFS(the.fit)
## Parallelized solution:
machine.ind <- rep(1:m, times = N/m)[sample(1:N)]
# Check if there are enough observations per machine
.min <- machine.ind %>% table %>% min
if(.min < p) stop('Not enough observations per machine')
machine.wise <- matrix(NA, ncol = m, nrow = ncol(X))
for(i in seq_len(m)){
.the.fit <- FITTER(y=y[machine.ind==i], x=X[machine.ind==i,],...)
.coefs<- COEFS(.the.fit)
machine.wise[,i] <- .coefs
}
averaged.coefs <- rowMeans(machine.wise, na.rm=TRUE) # aggregate by averaging
result <- list(averaged=averaged.coefs,
centralized=center.coefs)
return(result)
}
## Testing:
# Get single configuration, make data and return errors for parallelized and distributed:
getErrors <- function(configuration){
data.maker <- configuration[['data.maker']] # how to create data?
data <- do.call(data.maker, configuration) # make data
coefs <- do.call(analyzeParallel, c(list(data=data), configuration)) # compute error
errors <- list( # substract estimates from truth
averaged= coefs$averaged - configuration$beta.star,
centralized= coefs$centralized - configuration$beta.star)
return(errors)
}
## Testing:
# .beta <- makeBetas(.p)
# .beta.star <- .beta
# makeTest(n=c(1e4))
# .configurations <- makeConfiguration(.reps, .m, .p, .n, .kappa, .model, .params, beta=.beta, beta.star=.beta.star)
# .errors <- getErrors( .configurations[1,])
# plot(averaged~centralized, data=.errors)
# Compute the bias from the output ot replicateMSE (complicated structure!)
getBiasNorm <- function(x) {
getBias(x) %>%
SSQ %>%
sqrt
}
## Testing
# Get raw errors of averaged estimator
getBias <- function(x){
x['errors',] %>%
lapply(function(x) x[['averaged']]) %>%
do.call(cbind,.) %>%
rowMeans
}
## Get errors (MSE and bias) for each configuration and return data.frame.
frameMSEs <- function(MSEs, configurations, coordinate){
# Compute norm of bias
parallel.bias <- lapply(MSEs, getBias) # list of raw error in each configuration and replication
# Average error over replication in each configuration (should return matrix of replicationXp)
bias.single <- parallel.bias %>% sapply( function(x) {x[coordinate]})
bias.mean <- parallel.bias %>% sapply( function(x) {x %>% mean})
# Frame MSE of each configuration
MSEs.list <- lapply(MSEs, cleanMSEs)
ratios.frame <- MSEs.list %>%
lapply(function(x) x['ratio',]) %>% {
average <- sapply(.,mean, na.rm=TRUE)
std.dev <- sapply(.,sd, na.rm=TRUE)
median <- sapply(.,median, na.rm=TRUE)
mad <- sapply(.,mad, na.rm=TRUE)
cbind(average=average, std.dev=std.dev, median=median, mad=mad)
} %>%
as.data.frame
MSEs.frame.parallel <- MSEs.list %>%
sapply(getMSEParallel) %>%
as.data.frame %>%
setNames('parallel.MSE')
# configurations %<>%
# mutate(
# mse.highdim=mse.fun.highdim(lambda=lambda, p=p, N=N, m=m, beta=beta.star),
# mse.fixp=mse.fun.fixp(lambda=lambda, p=p, N=N, m=m, beta=beta.star),
# bias.highdim=bias.fun.highdim(lambda=lambda, p=p, N=N, m=m, beta=beta.star),
# bias.fixp=bias.fun.fixp(lambda=lambda, p=p, N=N, m=m, beta=beta.star),
# )
MSEs.framed <- data.frame(configurations,
ratios.frame,
MSEs.frame.parallel,
bias.mean=bias.mean,
bias.single=bias.single,
error.asympt=NA)
return(MSEs.framed)
}
## Testing:
# .configurations <- makeConfiguration(
# reps = 1e1, m = 1e1, p = 5e1,
# n = seq(2e2,5e2,length.out=3) ,
# kappa = 5e-1, model = my.ols, link = identity, sigma = 1e1 )
# .MSEs <- apply(.configurations, 1, replicateMSE)
# frameMSEs(.MSEs, .configurations)
# Plot results of fixed p regime:
plotMSEs <- function(MSEs.framed,
the.title,
y.lab= '',
y.lim,
center,
legend.position="none",
jitter=0,
line=TRUE,
lty=3,
size=1,
font.size=50){
if(center=='MSE'){
MSEs.framed %<>% mutate(center=parallel.MSE, arm=0)
}
if(center=='bias.mean'){
MSEs.framed %<>% mutate(center=bias.mean, arm=0)
}
if(center=='bias.norm'){
MSEs.framed %<>% mutate(center=bias.norm, arm=0)
}
if(center=='median'){
MSEs.framed %<>% mutate(center=median, arm=mad)
}
if(center=='average'){
MSEs.framed %<>% mutate(center=average, arm=std.dev)
}
MSEs.framed %<>% mutate(n=n+runif(nrow(MSEs.framed),-jitter,jitter),
m=as.factor(m))
# Actual Plotting:
plot.1 <- ggplot(data = MSEs.framed, aes(x=n, y=center, colour=m))+
geom_point(aes(shape=m), size=size)+
geom_segment(aes(xend=n, y=center+arm, yend=center-arm))
plot.1 <- plot.1 +
labs(title = the.title)+
ylab(y.lab)+
xlab(expression(n))+
#scale_x_continuous(trans=log_trans(base = 10), breaks=c(5e2, 1e3, 5e3))+
theme_bw()+
theme(text = element_text(size=font.size), legend.position = legend.position)
if(line){
plot.1 <- plot.1 + geom_line(linetype=lty)
}
if(!missing(y.lim)){
plot.1 <- plot.1 + ylim(y.lim)
}
return(plot.1)
}
## Testing
# .configurations <- makeConfiguration(
# reps = 1e1, m = 1e1, p = 5e1,
# n = seq(1.1e2,3e2,length.out=3) ,
# kappa = 5e-1, model = my.ols, link = identity, sigma = 1e1 )
# .MSEs <- apply(.configurations, 1, replicateMSE)
# .MSEs.framed <- frameMSEs(.MSEs, .configurations)
# plotMSEs(.MSEs.framed, 'test')
# Make data for classification problems:
makeClassificationData <- function(p, N, betas, link,...){
X <- matrix(rnorm(p*N), nrow=N, ncol=p)
linear.effect <- X %*% betas
probs <- link(linear.effect)
y <- rbinom(N, 1, probs) %>% factor
result <- list(y=y,X=X)
return(result)
}
## Testing:
# .p <- 1e2
# .N <- 1e4
# .betas <- makeBetasRandom(.p)
# .classification.data <- makeClassificationData(.p, .N, .betas, sigmoid)
# .classification.data$y
## Deprecated!
# For creating lty='b' type lines using package 'ggExtra'
# geom_barbed <- GeomBarbed$build_accessor()
# Plot results for choosing m:
plotMSEs2 <- function(MSEs.framed,
the.title,
y.lab= '',
y.lim,
robust=FALSE,
legend.position="none",
jitter=0,
line=TRUE,
fix,
center,
rounding=-2,
lty=3,
lwd,
lwd.error,
lty.error,
point.size=1,
point.size.error=0,
scale.y=scale_y_continuous(),
font.size=20){
MSEs.framed %<>% mutate(arm=0,
n=as.factor(n),
N=as.factor(round(N,rounding)),
p=as.factor(p))
if(center=='MSE'){
MSEs.framed %<>% mutate(center=parallel.MSE)
}
if(center=='bias.mean'){
MSEs.framed %<>% mutate(center=bias.mean)
}
if(center=='bias.norm'){
MSEs.framed %<>% mutate(center=bias.norm)
}
if(center=='bias.single'){
MSEs.framed %<>% mutate(center=bias.single)
}
if(fix=='N'){
plot.1 <- ggplot(data = MSEs.framed,
aes(x=m, y=center, colour=N, group=N))
}
if(fix=='n'){
plot.1 <- ggplot(data = MSEs.framed,
aes(x=m, y=center, colour=n, group=n))
}
if(fix=='p'){
plot.1 <- ggplot(data = MSEs.framed,
aes(x=m, y=center, colour=p, group=p))
}
if(fix=='Np'){
plot.1 <- ggplot(data = MSEs.framed,
aes(x=m, y=center,
shape=p,
colour=p,
group=p))
}
if(!missing(y.lim)) {
plot.1 <- plot.1 + ylim(y.lim)
}
# Actual plotting
plot.1 <- plot.1 +
# geom_line(lwd=lwd) +
geom_point(size=point.size) +
geom_line(linetype=lty, size=lwd)+
labs(title = the.title)+
ylab(y.lab)+
xlab(expression(m))+
#scale_x_continuous(trans=log_trans(base = 10), breaks=c(5e2, 1e3, 5e3))+
theme_bw()+
theme(text = element_text(size=font.size), legend.position = legend.position)+
scale.y
if(!is.na(MSEs.framed$error.asympt[1]))
plot.1 <- plot.1 +
geom_line(aes(x=m, y=error.asympt), linetype=lty.error, lwd=lwd.error)+
geom_point(aes(x=m, y=error.asympt, shape=p), size=point.size.error)
return(plot.1)
}
## Testing:
|
3a1c023e5b714b86bf1fbdafb1b3296334297c04 | dd8132404e8c7b028cb13cba904c50aace01c6a7 | /swt/src/lib/swt/src/date.r | 16ce05e7588a99bffefb274f110b36b05ac69ab6 | [] | no_license | arnoldrobbins/gt-swt | d0784d058fab9b8b587f850aeccede0305d5b2f8 | 2922b9d14b396ccd8947d0a9a535a368bec1d6ae | refs/heads/master | 2020-07-29T09:41:19.362530 | 2019-10-04T11:36:01 | 2019-10-04T11:36:01 | 209,741,739 | 15 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,649 | r | date.r | # date --- pick up useful information about the time of day
# Argument 1 is a switch, to select the data returned.
# SYS_DATE => date, in format mm/dd/yy
# SYS_TIME => time, in format hh:mm:ss
# SYS_USERID => login name
# SYS_PIDSTR => user number
# SYS_DAY => day of the week
# SYS_PID => numeric user number in str (1)
# SYS_LDATE => name of day, name of month, day, and year
# SYS_MINUTES=> number of minutes past midnight in str (1..2)
# SYS_SECONDS=> number of seconds past midnight in str (1..2)
# SYS_MSEC => number of msec past midnight in str (1..2)
# Argument 2 is a string to receive data specified by
# argument 1.
# Length of string is returned as function value.
integer function date (item, str)
integer item
character str (ARB)
integer td (28), day, month, year
integer encode, ptoc, wkday, mapup
integer snum (2)
longint lnum
equivalence (snum, lnum)
string_table ix, days _
/ "sun" / "mon" / "tues" / "wednes" _
/ "thurs" / "fri" / "satur"
string_table iy, months _
/ "January" / "February" / "March" _
/ "April" / "May" / "June" _
/ "July" / "August" / "September" _
/ "October" / "November" / "December"
if (item < SYS_DATE || item > SYS_MSEC) {
str (1) = EOS
return (0)
}
call timdat (td, 12 + MAXPACKEDUSERNAME)
select (item)
when (SYS_DATE) # date, in format mm/dd/yy
return (encode (str, 9, "*,2p/*,2p/*,2p"s,
td (1), td (2), td (3)))
when (SYS_TIME) # time, in format hh:mm:ss
return (encode (str, 9, "*2,,0i:*2,,0i:*2,,0i"s,
td (4) / 60, mod (td (4), 60), td (5)))
when (SYS_USERID) # login name
return (ptoc (td (13), ' 'c, str, MAXUSERNAME))
when (SYS_PIDSTR) # user number
return (encode (str, 4, "*3,,0i"s, td (12)))
when (SYS_DAY) { # day of week
td (1) = td (1) - '00'
td (2) = td (2) - '00'
td (3) = td (3) - '00'
day = rs (td (2), 8) * 10 + rt (td (2), 8)
month = rs (td (1), 8) * 10 + rt (td (1), 8)
year = rs (td (3), 8) * 10 + rt (td (3), 8)
return (encode (str, 20, "*sday"s,
days (ix (wkday (month, day, year) + 1))))
}
when (SYS_PID) { # numeric user number in str (1)
str (1) = td (12)
return (0)
}
when (SYS_LDATE) { # name of day, name of month, day, and year
td (1) = td (1) - '00'
td (2) = td (2) - '00'
td (3) = td (3) - '00'
day = rs (td (2), 8) * 10 + rt (td (2), 8)
month = rs (td (1), 8) * 10 + rt (td (1), 8)
year = rs (td (3), 8) * 10 + rt (td (3), 8)
date = encode (str, 50, "*sday, *s *i, 19*i"s,
days (ix (wkday (month, day, year) + 1)),
months (iy (month + 1)), day, year)
str (1) = mapup (str (1))
return
}
when (SYS_MINUTES) { # minutes past midnight
lnum = td (4)
str (1) = snum (1)
str (2) = snum (2)
return (0)
}
when (SYS_SECONDS) { # seconds past midnight
lnum = intl (td (4)) * 60 + td (5)
str (1) = snum (1)
str (2) = snum (2)
return (0)
}
when (SYS_MSEC) { # milliseconds past midnight
lnum = (intl (td (4)) * 60 + td (5)) * 1000 _
+ (td (6) * 1000) / td (11)
str (1) = snum (1)
str (2) = snum (2)
return (0)
}
return (0)
end
|
3bdc3064b1979540964b6cdc26a536cc81914914 | 4951e7c534f334c22d498bbc7035c5e93c5b928d | /regression/fse13-dynodroid.R | 82be8714f61fcabecb9af79273e919b08d056a3b | [] | no_license | Derek-Jones/ESEUR-code-data | 140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1 | 2f42f3fb6e46d273a3803db21e7e70eed2c8c09c | refs/heads/master | 2023-04-04T21:32:13.160607 | 2023-03-20T19:19:51 | 2023-03-20T19:19:51 | 49,327,508 | 420 | 50 | null | null | null | null | UTF-8 | R | false | false | 1,241 | r | fse13-dynodroid.R | #
# fse13-dynodroid.R, 23 Sep 16
#
# Data from:
# Dynodroid: {An} Input Generation System for {Android} Apps
# Aravind Machiry and Rohan Tahiliani and Mayur Naik
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG experiment_testing input_generation testing_input
source("ESEUR_config.r")
pal_col=rainbow(3)
hcl_col=rainbow_hcl(4)
dh=read.csv(paste0(ESEUR_dir, "benchmark/fse13-dynohuman.csv.xz"), as.is=TRUE)
# dm=read.csv(paste0(ESEUR_dir, "benchmark/fse13-dynomonkey.csv.xz"), as.is=TRUE)
# Dynodroid vs human
dh$total_covered=dh$LOC.covered.by.both.Dyno.and.Human..C.+
dh$LOC.covered.exclusively.by.Dyno..D.+
dh$LOC.covered.exclusively.by.Human..H.
plot(dh$Total.App.LOC..T.,
dh$LOC.covered.by.both.Dyno.and.Human..C./dh$total_covered,
log="x", col=pal_col[1],
xlab="Lines in Application", ylab="Percentage of covered lines\n")
points(dh$Total.App.LOC..T.,
dh$LOC.covered.exclusively.by.Dyno..D./dh$total_covered,
col=pal_col[2])
points(dh$Total.App.LOC..T.,
dh$LOC.covered.exclusively.by.Human..H./dh$total_covered,
col=pal_col[3])
legend(x="left", legend=c("Human & Dynodroid", "Dynodroid", "Human"),
bty="n", fill=pal_col, cex=1.2)
|
2341ecf70889ac24023306aae5a9e404b57226f7 | 513c96a4a1c5ab97f7f516517cac578771e6c9ad | /src/from_windows/testing_ret_from_abatz_climdata.R | 5e0d61b139920dd263a2173cf68a35661848ff99 | [] | no_license | htn5098/heidi_master_repo | 9c27c1d90081a39e45ba16ceb7c1247aeb27163c | c80adf7ae608d0b8a8fbc3acab24a4d899cb25ea | refs/heads/master | 2021-07-18T23:19:47.583208 | 2021-02-02T21:08:29 | 2021-02-02T21:08:29 | 238,055,282 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,617 | r | testing_ret_from_abatz_climdata.R | setwd('C:\\01.PSU\\02.DataAnalysis\\data\\raw\\abatz_met')
library(ncdf4)
library(dplyr)
library(Evapotranspiration)
files <- list.files()
clim.ls <- list()
gridret <- read.table("C:\\01.PSU\\02.DataAnalysis\\data\\external\\ret_indx.txt", sep = ',',
header = T)[,c('Grid','COUNTYNS')]
for (i in 1:length(files)) {
nc_file <- nc_open(files[i])
var <- names(nc_file$var)
ret_se <- ncvar_get(nc_file,var,start = c(795,210,1), count = c(391,266,-1))
dim <- dim(ret_se)
ret_se_matrix <- aperm(ret_se, c(3,2,1))
dim(ret_se_matrix) <- c(dim[3],dim[2]*dim[1])
ret_se_matrix[1:20,1:2]
dim(ret_se_matrix)
ret_se_sel <- ret_se_matrix[,gridret$Grid]
grid.choose <- ret_se_sel[,46963]
clim.ls[[i]] <- grid.choose
names(clim.ls)[i] <- var
}
clim.df <- data.frame(do.call(cbind,clim.ls))
eto.abatz.test <- clim.df[,c(6,5,2,3,8,4)]
colnames(eto.abatz.test) <- c('Tmax','Tmin','RHmax','RHmin','Rs','uz')
eto.abatz.test[,1:2] <- eto.abatz.test[,1:2] - 273
eto.abatz.test[,6] <- eto.abatz.test[,6]*0.0864
time.abatz <- seq.Date(as.Date('1979-01-01','%Y-%m-%d'),
as.Date('1979-12-31','%Y-%m-%d'),'days')
data("climatedata")
data("constants")
data.abatz <- data.frame(
Station = 46963,
Year = year(time.abatz),
Month = month(time.abatz),
Day = day(time.abatz),
eto.abatz.test
)
inputs.abatz <- ReadInputs(varnames = c('Tmax','Tmin','RHmax','RHmin','Rs','uz'),
climatedata = data.abatz,
constants = const.abatz, stopmissing = c(10,10,5))
const.abatz <- list(lat_rad = (34.608337)*pi/180,
Elev = 656.5381,
lambda = constants$lambda,
Gsc = constants$Gsc,
z = 10,
sigma = constants$sigma,
G = 0
)
ETo.package.abatz <- ET.PenmanMonteith(data = inputs.abatz,
constants = const.abatz,
solar = 'data')
ETo.p.abatz.output <- ETo.package.abatz$ET.Daily
head(ETo.p.abatz.output)
head(ETo.abatz.grid[,'46963'])
# Check vpd calculations:
es.abatz <- (sat.vp.fucn(eto.abatz.test$Tmax) + sat.vp.fucn(eto.abatz.test$Tmin))/2
ea.abatz <- (sat.vp.fucn(eto.abatz.test$Tmax)*eto.abatz.test$RHmin/100+
sat.vp.fucn(eto.abatz.test$Tmin)*eto.abatz.test$RHmax/100)/2
vpd.test <- es.abatz - ea.abatz
head(vpd.test)
head(clim.df$mean_vapor_pressure_deficit)
hist(vpd.test - clim.df$mean_vapor_pressure_deficit)
|
215c78461306c0cf31d2a1b4917fbd06ee117cfc | f07a532ea49dce74f41688d0bbd396323e774a05 | /poster2017/8-helper.R | c67e8d5f2732d9a91be04657d62a580a51637fcf | [] | no_license | paul-fink/ISIPTA | 0881663ec16a320888cfd995786a4cdea20012f6 | ce446ee7e7545dd04ef11bb378ee644f2a502322 | refs/heads/master | 2020-12-14T09:50:07.441210 | 2017-08-09T09:19:30 | 2017-08-09T09:19:30 | 95,446,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,006 | r | 8-helper.R | # graph can be 'grpah' for pure ISIPTA
getIdISIPTA <- function(name) {
found <- grep(name, names(V(graph)))
if(length(found)) {
names(found) <- names(V(graph))[found]
return(found)
}
cat(sprintf("Person with name like '%s' not found\n", name))
invisible(integer(0))
}
getNameISIPTA <- function(id) {
found <- names(V(graph))[id]
if(!is.na(found)) {
names(id) <- found
return(id)
}
cat(sprintf("No person with id '%i'\n", id))
invisible(integer(0))
}
getIdECSQARU <- function(name) {
found <- grep(name, names(V(graph2017)))
if(length(found)) {
id <- V(graph2017)$label[found]
names(id) <- names(V(graph2017))[found]
return(id)
}
cat(sprintf("Person with name like '%s' not found\n", name))
invisible(integer(0))
}
getNameECSQARU <- function(id) {
found <- names(V(graph2017))[V(graph2017)$label == id]
if(!is.na(found)) {
names(id) <- found
return(id)
}
cat(sprintf("No person with id '%i'\n", id))
invisible(integer(0))
}
|
3117ccdf955a2b9f3f20f6a268fb703c2d196ab7 | e7db233043b7b70da7020a5560c770d4db41a823 | /Cloud/Codigos/excluir/testes.R | a6a5224a0bcaaf1e137eb0d9ef4d549ad1093b94 | [
"MIT"
] | permissive | jhunufernandes/iot | 5647acb9bd7ea1a05ebfc078cc397dda00e6b752 | db088fbfd0e5535d709842ab7f4beb945bf8d352 | refs/heads/master | 2020-04-09T10:22:52.471266 | 2018-12-04T20:14:09 | 2018-12-04T20:14:09 | 160,269,420 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,861 | r | testes.R |
# showModal(modalDialog(
# title = "Atenção!",
# textInput("txt_sensorID", "Digite o código do seu produto:"),
# textInput("txt_sensorSenha", "Digite senha:"),
#
# easyClose = TRUE,
# footer = tagList(
# actionButton("ok", "OK")
# )
# ))
# tabEvento = readRDS("Dados/dadosEvento.rds")
# tail(tabEvento)
# ## NOTIFICACAO --------------------------------------------------------------------------------------------------
# mensgItem = list()
# #i = "pia4"
# for (i in unique(tabEvento$LOCAL)) {
# #ultMed = tail(subset(tabEvento$MOMENTO, tabEvento$LOCAL==i), na.rm = T)
#
# ultMed = max(subset(tabEvento$MOMENTO, tabEvento$LOCAL==i), na.rm = T)
# dif = difftime(now(tzone = "America/Sao_Paulo"), ultMed, units = "days")
# if(dif >= 3/24 & dif < 1){
# mensgItem[[i]] = messageItem(from = paste0("Sensor de ", pontos$NOME[which(pontos$ID==i)]),
# message = paste0("Não está funcionando há ", round(dif*24), " horas"),
# icon = icon("exclamation-triangle"))
# }else if(dif >= 1){
# mensgItem[[i]] = messageItem(from = paste0("Sensor de ", pontos$NOME[which(pontos$ID==i)]),
# message = paste0("Não está funcionando há ", round(dif), " dias"),
# icon = icon("exclamation-triangle"))
# }
# }
#
# output$messageMenu <- renderMenu({
# dropdownMenu(type = "messages", .list = mensgItem)
# })
#
#
# observe({
#
# #inicio = as.Date("2018-10-1")
# # fim = as.Date(now())
# inicio = as.Date(input$dateIntervalo[1])
# fim = as.Date(input$dateIntervalo[2])
# baseTempo = as.character(input$selBaseTemporal)
#
# if(fim < inicio){
# showModal(modalDialog(
# title = "Atenção!",
# "A data final não pode ser anterior a data data de início!",
# easyClose = TRUE,
# footer = NULL
# ))
# }else{
#
# #imputar = input$ckbPreencherFalt
#
# if(input$ckbPreencherFalt)
# tabHora = readRDS("Dados/dadosHoraImp.rds")
# else
# tabHora = readRDS("Dados/dadosHora.rds")
#
# tab = subset(tabHora, as.Date(tabHora$HORA_REF)<fim & as.Date(tabHora$HORA_REF)>inicio)
#
# try(
# if(baseTempo == "Por período do dia"){
# tab = transfConsumoMedidoPeriodo(tab)
# }
# else if(baseTempo == "Por dia") {
# tab = transfConsumoMedidoDia(tab)
# }
# )
# semZero = input$ckbSemZero
# try(global_carregaConsumo(input, output, tab, inicio, fim, baseTempo, semZero))
# try(torneiras_carregaConsumo(input, output, tab, inicio, fim, baseTempo, semZero))
# try(mictorios_carregaConsumo(input, output, tab, inicio, fim, baseTempo, semZero))
# try(vasos_carregaConsumo(input, output, tab, inicio, fim, baseTempo, semZero))
# try(pessoas_carregaContador(input, output, tab, inicio, fim, baseTempo, semZero))
# try(analise_carrega(input, output, tab, inicio, fim, baseTempo, semZero))
# }
# })
# shinyApp(
# ui = basicPage(
# actionButton("show", "Show modal dialog"),
# verbatimTextOutput("dataInfo")
# ),
#
# server = function(input, output) {
# # reactiveValues object for storing current data set.
# vals <- reactiveValues(data = NULL)
#
# # Return the UI for a modal dialog with data selection input. If 'failed' is
# # TRUE, then display a message that the previous value was invalid.
# dataModal <- function(failed = FALSE) {
# modalDialog(
# textInput("dataset", "Choose data set",
# placeholder = 'Try "mtcars" or "abc"'
# ),
# span('(Try the name of a valid data object like "mtcars", ',
# 'then a name of a non-existent object like "abc")'),
# if (failed)
# div(tags$b("Invalid name of data object", style = "color: red;")),
#
# footer = tagList(
# modalButton("Cancel"),
# actionButton("ok", "OK")
# )
# )
# }
#
# # Show modal when button is clicked.
# observeEvent(input$show, {
# showModal(dataModal())
# })
#
# # When OK button is pressed, attempt to load the data set. If successful,
# # remove the modal. If not show another modal, but this time with a failure
# # message.
# observeEvent(input$ok, {
# # Check that data object exists and is data frame.
# if (!is.null(input$dataset) && nzchar(input$dataset) &&
# exists(input$dataset) && is.data.frame(get(input$dataset))) {
# vals$data <- get(input$dataset)
# removeModal()
# } else {
# showModal(dataModal(failed = TRUE))
# }
# })
#
# # Display information about selected data
# output$dataInfo <- renderPrint({
# if (is.null(vals$data))
# "No data selected"
# else
# summary(vals$data)
# })
# }
# )
# server2 <- function(port = 1901, timeout = 86400, host = "ec2-18-225-10-77.us-east-2.compute.amazonaws.com"){
# library(lubridate)
# library(stringr)
# setwd("/home/laftos/Projetos/MedidorPoluicao/Dados/Desagregado/Sensores/")
# #setwd("C:/Users/Leonardo/OneDrive/USP/Semestre10/Iot/projeto/poluicao/dados/")
# hoje = now(tzone = "America/Sao_Paulo")
# newLine = data.frame(hora = hour(hoje),
# min = minute(hoje),
# seg = second(hoje),
# latit = -23.5591836,
# longi = -46.748765,
# polui = 50,
# tempe = 20)
# while(TRUE){
# writeLines("Listening...")
# con = socketConnection(host=host, port = port, blocking=TRUE,
# server=TRUE, open="r+", timeout= timeout)
#
# data <- readLines(con, 1)
#
# hoje = now(tzone = "America/Sao_Paulo")
# sensor = word(data,1,sep = "\\;")
# nameFile = paste0(sensor, "/", str_pad(day(hoje), 2 , pad="0"),
# str_pad(month(hoje), 2 , pad="0"),
# str_pad(year(hoje), 2 , pad="0"),
# ".csv")
# latit = newLine$latit[1] + as.numeric(word(data,c(2),sep = ";"))
# latit = ifelse(latit<(-47), -24,
# ifelse(latit>(-46),-23, latit))
# longi = newLine$longi + as.numeric(word(data,c(3),sep = ";"))
# longi = ifelse(longi<(-24), -24,
# ifelse(longi>(-23),-23, longi))
# polui = newLine$polui + as.numeric(word(data,c(4),sep = ";"))
# polui = ifelse(polui<(0), 0,
# ifelse(polui>(300),300, polui))
# tempe = newLine$tempe + as.numeric(word(data,c(5),sep = ";"))
# tempe = ifelse(tempe<(-10), -10,
# ifelse(tempe>(45), 45, tempe))
#
# newLine = data.frame(hora = hour(hoje),
# min = minute(hoje),
# seg = second(hoje),
# latit = latit,
# longi = longi,
# polui = polui,
# tempe = tempe)
#
# try(dir.create(sensor))
# try(write.table(newLine, file = nameFile, sep = ";", dec = ",", append = TRUE, quote = FALSE,
# col.names = FALSE, row.names = FALSE))
# close(con)
# }
# }
server3 <- function(port = 1901, timeout = 86400, host = "ec2-18-225-10-77.us-east-2.compute.amazonaws.com"){
library(lubridate)
library(stringr)
setwd("/home/laftos/Projetos/MedidorPoluicao/Dados/Desagregado/Sensores/")
#setwd("C:/Users/Leonardo/OneDrive/USP/Semestre10/Iot/projeto/poluicao/dados/")
while(TRUE){
writeLines("Listening...")
con = socketConnection(host=host, port = port, blocking=TRUE,
server=TRUE, open="r+", timeout= timeout)
data <- readLines(con, 1)
print(data)
close(con)
}
}
server3() |
d847fe20887d70e75ef478f68b473f3664daf2cc | 8357f04a0a50e10697a650ad319b37738dcc2cf8 | /man/get.user.Rd | 4d8d26763c5340753898fd1de370a37d8105c65e | [
"MIT"
] | permissive | prateek05/rgithub | 2a7114a8262cd6abefc8d31f03a907b75f127ba2 | 153bde1466252952e21c1fdb2ff0b452b9c8de99 | refs/heads/master | 2021-01-17T04:24:00.180261 | 2014-07-14T17:24:43 | 2014-07-14T17:24:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 291 | rd | get.user.Rd | \name{get.user}
\alias{get.user}
\title{Get information on some user}
\usage{
get.user(user, ctx = get.github.context())
}
\arguments{
\item{user}{the user}
\item{ctx}{the github context object}
}
\value{
Information about the user
}
\description{
Get information on some user
}
|
6d1689a80fd88058cf74b6c33d08b81803026aa2 | a52b28ece646573da8fd98b4e3933e364f37ca9d | /run_analysis.R | 1cab33046cc62a7c9341f35f0a3524318e862c15 | [] | no_license | settyblue/DataAssignment | a503ca73331ae489e85f03e8064292dae9349454 | 0bf22cb2a48e6a496239af2a8f72e89cf7af4889 | refs/heads/master | 2016-08-07T07:28:28.142990 | 2014-12-22T09:10:08 | 2014-12-22T09:10:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,493 | r | run_analysis.R | run_analysis <- function(){
##Loading the daata from text files to local dataframes.
subject1 <- read.table("UCI HAR Dataset/train/subject_train.txt",sep = "",header=FALSE)
subject2 <- read.table("UCI HAR Dataset/test/subject_test.txt",sep = "",header=FALSE)
y1 <- read.table("UCI HAR Dataset/train/y_train.txt",sep = "",header=FALSE)
y2 <- read.table("UCI HAR Dataset/test/y_test.txt",sep = "",header=FALSE)
x1 <- read.table("UCI HAR Dataset/train/X_train.txt",sep = "",header=FALSE)
x2 <- read.table("UCI HAR Dataset/test/X_test.txt",sep = "",header=FALSE)
features <- read.table("UCI HAR Dataset/features.txt",sep = "",header=FALSE,row.names=1)
##Combining test and train datasets into single variable.
x <- rbind(x1,x2)
y <- rbind(y1,y2)
subject <- rbind(subject1,subject2)
##Formating column labels.
features <- gsub("-","_",features)
features <- sub("()_","_",features,fixed=TRUE)
features <- sub("()","",fixed=TRUE,features)
##Substitute y for their corresponding activity labels.
##function descriptive_activity is below.
activity_labels <- descriptive_activity(y)
##Column bind all variables to create complete DataSet.
complete_dataset <- tbl_df(cbind(subject,activity_labels,x))
colnames(complete_dataset) <- c("Subject","Activity",features)
##Subsetting required features.
features_index <- grep("mean|std",features)
features_index_removed <- grep("meanFreq",features)
features_index <- features_index[!(features_index %in% features_index_removed)]
required_dataset <- complete_dataset[,features_index(1,2,features_index+2)]
##Grouping the required dataset by columns 'Subject' and 'Activity', in that order
##and summarising all the columns.
summarised_dataset <- summarise_each(group_by(required_dataset,Subject,Activity),funs(mean))
##Format the dataset to scientific notation.
summarised_dataset<- format(summarised_dataset,scientific=TRUE)
##Write the dataset to a file in fixed width format.
write.fwf(rbind(c(colnames(summarised_dataset)),summarised_dataset),"Summarised_Dataset.txt",
width=25,quote=FALSE,colnames=FALSE)
}
descriptive_activity <- function(x){
y <- mutate(x,activity = "help")
for(i in 1:nrow(x)){
if(y[i]==1) y[i] <- "WALKING"
else if(y[i]==2) y[i] <- "WALKING_UPSTAIRS"
else if(y[i]==3) y[i] <- "WALKING_DOWNSTAIRS"
else if(y[i]==4) y[i] <- "SITTING"
else if(y[i]==5) y[i] <- "STANDING"
else y[i] <- "LAYING"
}
y
} |
a0bef52a54e57246338970e97858dd08dfd8104f | cff61fbfa0e5729df09ee899d73b5474f360e30e | /tests/testthat/test-get_profs.R | 9e2ebdb3dff944cd587ad52f2ba6d9b19b287a6e | [
"MIT"
] | permissive | stephaniereinders/CyChecks | d6c43887c610b79dd2b505da6b9e6af2622f91ed | f7f5dfeed5e320e1ebe5512b960e18d822154c8a | refs/heads/master | 2022-01-07T23:45:22.799473 | 2019-06-23T18:20:01 | 2019-06-23T18:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 734 | r | test-get_profs.R | context("test-get_profs")
test_that(
"get_profs is working", {
library(checkmate)
library(dplyr)
expect_error(get_profs(data = select(sals_dept, -position))) # no position column
expect_error(get_profs(data = 1:10)) # input not a dataframe
sals_test <- sal_df(fiscal_year = 2018, limit = 10000,
token = "$$app_token=GJISvIEj4Jg2KmwRkV3oCGJrj") %>% dplyr::distinct(name, .keep_all = TRUE) %>%
dplyr::select(-c(name, base_salary_date))
sals18_test <- sals18 %>% dplyr::distinct(id, .keep_all = TRUE) %>%
dplyr::select(-c(base_salary,department, organization, id, base_salary_date))
expect_equal(get_profs(sals18_test),get_profs(sals_test))
}
)
|
dbc859b2f2239263f84ed7bd04c6e5d4928e0981 | 0ebf0950d351f32a25dadb64b4a256a8a9022039 | /man/testSeparation.SegmentedGenomicSignalsInterface.Rd | 5d3c8e6a19e3e2aedffd09d3c7e09e7d2e0c910a | [] | no_license | HenrikBengtsson/aroma.cn.eval | de02b8ef0ae30da40e32f9473d810e44b59213ec | 0462706483101b74ac47057db4e36e2f7275763c | refs/heads/master | 2020-04-26T16:09:27.712170 | 2019-01-06T20:41:30 | 2019-01-06T20:41:30 | 20,847,824 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,290 | rd | testSeparation.SegmentedGenomicSignalsInterface.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% SegmentedGenomicSignalsInterface.testSeparation.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{testSeparation.SegmentedGenomicSignalsInterface}
\alias{testSeparation.SegmentedGenomicSignalsInterface}
\alias{SegmentedGenomicSignalsInterface.testSeparation}
\alias{testSeparation,SegmentedGenomicSignalsInterface-method}
\title{Tests statistically the separation between two states}
\description{
Tests statistically the separation between two states.
}
\usage{
\method{testSeparation}{SegmentedGenomicSignalsInterface}(this, test=c("t.test", "ks.test"), stateIdxs=1:2, ...)
}
\arguments{
\item{test}{A \code{\link[base]{character}} string specifying the statistical test to use.}
\item{stateIdxs}{An \code{\link[base]{integer}} \code{\link[base]{vector}} specifying the indicies of the
two states to be used.}
\item{...}{Not used.}
}
\value{
Returns what test function returns.
}
\author{Henrik Bengtsson, Pierre Neuvial}
\seealso{
\code{\link[aroma.core]{SegmentedGenomicSignalsInterface}}
}
\keyword{internal}
\keyword{methods}
|
f3fe03b75c21b6c03933de998f2eaadf62439487 | 51703d55be207df29decc17441c323be93b8adaf | /HW8/Solutions/7.R | ce9c0564ba290645236a35cb8dc88a84cf4f1d5a | [] | no_license | Mahbodmajid/DataAnalysis | b4ee16f39e91dff0bbeea058e92162f91f28d84c | 127e59a101d4847171fcb7728db38f4405a10e38 | refs/heads/master | 2021-06-10T02:04:52.153255 | 2019-11-25T00:58:18 | 2019-11-25T00:58:18 | 141,756,223 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 591 | r | 7.R | bigrams_separated %>% filter(word1 == "he" | word1 == "she") -> bigrams_he_she
bigram_he_she <- bigrams_he_she %>%
count(word1, word2, sort = TRUE) %>%
group_by(word1) %>%
top_n(wt = n, n = 20)-> top_verbs
ggplot(top_verbs, aes(x = reorder(word2, n), y = n, fill = word1))+
geom_bar(stat = "identity")+
theme(axis.text.y = element_text(angle = 45, vjust = 1,
size = 8, hjust = 1))+
xlab("Verb") +
guides(fill = "none")+
ylab("Frequency") +
ggtitle("Charles Dickens Novels Verbs")+
coord_flip()+ facet_wrap(~word1, scales = "free")
|
c585133222bbee01814d81f866bdbe13efc8f87d | e20e28ae2820b90251878c97b4975b313d5658a1 | /An Introduction to Statistical Learning with Applications in R/8. Support Vector Machine/SVC/Support Vector Classifier.R | 217cf75dd8971d555e2d2620d1915b02a3735a02 | [] | no_license | Manjunath7717/Machine-Learning-Algorithms | 535fa8a4d39872169e3981657a500e33277e6348 | b8fbaf901530454e28a0588cccaba61bf0983225 | refs/heads/master | 2022-12-06T11:32:19.428628 | 2020-08-27T15:45:59 | 2020-08-27T15:45:59 | 263,555,888 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,345 | r | Support Vector Classifier.R | #Support Vector Classifier
set.seed (1)
x=matrix(rnorm(20*2), ncol=2)
y=c(rep(-1,10), rep(1,10))
x[y==1,]=x[y==1,] + 1
plot(x,col=(3-y))
dat=data.frame(x=x, y=as.factor(y))
library(e1071)
svmfit=svm(y~., data=dat, kernel="linear", cost=10,scale=FALSE)
plot(svmfit,dat)
svmfit$index
summary(svmfit)
svmfit=svm(y~., data=dat, kernel="linear", cost=0.1, scale=FALSE)
plot(svmfit , dat)
svmfit$index
#By default, tune() performs ten-fold cross-validation on a set of models of interest.
set.seed (1)
tune.out=tune(svm,y~.,data=dat,kernel="linear", ranges=list(cost=c(0.001, 0.01, 0.1, 1,5,10,100)))
summary(tune.out)
bestmod=tune.out$best.model
summary(bestmod)
xtest=matrix(rnorm(20*2), ncol=2)
ytest=sample(c(-1,1), 20, rep=TRUE)
xtest[ytest==1,]=xtest[ytest==1,] + 1
testdat=data.frame(x=xtest, y=as.factor(ytest))
ypred=predict(bestmod ,testdat)
table(predict=ypred, truth=testdat$y)
#What if we had instead used cost=0.01?
svmfit=svm(y~., data=dat, kernel="linear", cost=.01, scale=FALSE)
ypred=predict(svmfit ,testdat)
table(predict=ypred, truth=testdat$y)
x[y==1,]=x[y==1,]+0.5
plot(x, col=(y+5)/2, pch=19)
dat=data.frame(x=x,y=as.factor(y))
svmfit=svm(y~., data=dat, kernel="linear", cost=1e5)
summary(svmfit)
plot(svmfit , dat)
svmfit=svm(y~., data=dat, kernel="linear", cost=1)
summary(svmfit)
plot(svmfit ,dat)
#Support Vector Machine
|
9dee1ff481cc6485dfa45e48c63a9e3f16810210 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /decido/inst/testfiles/earcut_cpp/libFuzzer_earcut_cpp/earcut_cpp_valgrind_files/1609874675-test.R | c0b0925ddc0be3f2a4d3bf8c218da9e7ab5e439c | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 469 | r | 1609874675-test.R | testlist <- list(holes = integer(0), numholes = integer(0), x = c(0, 0, 0, 0, 1.7272337367567e-77, 2.05226840076865e-289, 8.85466403653989e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(decido:::earcut_cpp,testlist)
str(result) |
793f094acf193236bb2cb1a7d5b283e7a5935c51 | 77fa704a3d3cbaddf8c94ba841ed3da988fea64c | /R/texas_income.R | 5e8f2f54f5791fa4fd6f749806d7a8abdbdeaf35 | [
"MIT"
] | permissive | wilkelab/practicalgg | a80a257b3c57b746110923604875e9217ea293e2 | 5718aad4322ba86967efd6c5d2b31eace0319248 | refs/heads/master | 2021-06-26T22:34:30.561706 | 2021-02-01T23:06:05 | 2021-02-01T23:06:05 | 209,400,266 | 78 | 8 | NOASSERTION | 2019-10-14T19:24:47 | 2019-09-18T20:41:08 | R | UTF-8 | R | false | false | 832 | r | texas_income.R | #' Median income in Texas counties
#'
#' Median income in Texas counties, from the 2015 five-year American Community Survey.
#'
#' @examples
#' library(tidyverse)
#' library(sf)
#' library(cowplot)
#' library(colorspace)
#'
#' # B19013_001: Median household income in the past 12 months (in 2015 Inflation-adjusted dollars)
#'
#' texas_income %>%
#' ggplot(aes(fill = estimate)) +
#' geom_sf(color = "white") +
#' coord_sf(datum = NA) +
#' theme_map() +
#' scale_fill_continuous_sequential(
#' palette = "Teal", rev = TRUE,
#' na.value = "grey50", end = 0.9,
#' name = "income",
#' limits = c(18000, 90000),
#' breaks = 20000*c(1:4),
#' labels = c("$20,000", "$40,000", "$60,000", "$80,000")
#' ) +
#' theme(
#' legend.title.align = 0.5,
#' legend.text.align = 0
#' )
"texas_income"
|
1af5d435e42656a468bcde8f484f6c94a33f34fc | 4780d7b94b2ec4d55334c24c478cf65a84be857f | /install.r | c76b5242fd0e19a3dc06f34f95ae0a000fdf77a6 | [
"MIT"
] | permissive | truongsinh/jdf | 7fb8be77868acca8678fb4a14fcb6e0403d1c2d5 | 8fa255292fc14aa8c1c343ddc02f097b01384deb | refs/heads/master | 2022-12-07T09:16:35.920508 | 2020-08-24T14:39:55 | 2020-08-24T14:39:55 | 289,759,678 | 1 | 0 | MIT | 2020-08-23T20:14:52 | 2020-08-23T20:14:50 | null | UTF-8 | R | false | false | 461 | r | install.r | list.of.packages <- c("rmarkdown", "tinytex")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages, repos = "http://cran.us.r-project.org")
tinytex::install_tinytex(extra_packages=c('changepage', 'eulervm', 'sourcecodepro', 'ly1', 'collection-fontsrecommended', 'csquotes', 'titlesec', 'enumitem', 'footmisc', 'footnotebackref', 'biblatex', 'microtype', 'mathpazo'))
|
6d2fcbb0d461ddb4b46620efe3c59e65a62cf8bb | f72b2f12f5c1cdcaf0e90084ed1eeb4d31ff3c4a | /cooccur_test.R | 09e068051b68dc447ec8d73188e8dfa23a4dd0b8 | [] | no_license | zlswiecki/dissertation | 81e64fa5112e0f52282b8d5ca08948ee2a4d5a33 | fa077437f5475d4b2a801e7934f6b293262211a5 | refs/heads/master | 2022-11-21T23:43:47.666528 | 2020-07-07T14:48:55 | 2020-07-07T14:48:55 | 277,840,629 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,270 | r | cooccur_test.R | # function to get code co-occurence *probability* matrices. Produces 1 code by code matric per person. For codei and code j, cellij is the number
# of lines with code i & code j divided by the number of lines with code i
get_cooccurence_prob_3 = function(data, splitCol, codes, units) {
co_names_mat = combn(codes,2)
co_names = apply(co_names_mat,2,paste,collapse = " & ")
split_result = split(x = data, f = data[, splitCol])
team_adj_list = list()
team_mat_list = list()
for (i in 1:length(split_result)) {
co_adj_list = list()
co_mat_list = list()
mat = split_result[[i]]
mat_units = split(mat,mat[,units])
for(j in 1:length(mat_units)){
co_mat = matrix(rep(0,length(codes)^2),length(codes),length(codes))
for(p in 1:nrow(mat_units[[j]])){
co_mat = (as.numeric(mat_units[[j]][p,codes]) %o% as.numeric(mat_units[[j]][p,codes])) + co_mat
}
line_count = nrow(mat_units[[j]])
co_mat = co_mat/line_count
# code_sums = colSums(mat_units[[j]][,codes])
# zeroes = which(code_sums == 0)
# code_sums[zeroes] = 1
#
# for(k in 1:length(codes)){
#
# co_mat[k,] = co_mat[k,]/code_sums[k]
# }
#
ut = upper.tri(co_mat)
co_mat_prob_adj = co_mat[ut]
rownames(co_mat) = codes
colnames(co_mat) = codes
co_adj_list[[j]] = co_mat_prob_adj
co_mat_list[[j]] = co_mat
}
names(co_adj_list) = names(mat_units)
names(co_mat_list) = names(mat_units)
co_adj_list = bind_cols(co_adj_list)
co_adj_list = t(co_adj_list)
colnames(co_adj_list) = co_names
team_adj_list[[i]] = co_adj_list
team_mat_list[[i]] = co_mat_list
}
names(team_adj_list) = names(split_result)
names(team_mat_list) = names(split_result)
team_mat_adj = do.call("rbind",team_adj_list)
# team_mat_adj = bind_rows(team_adj_list)
normed_co = fun_sphere_norm(team_mat_adj)
rownames(normed_co) = rownames(team_mat_adj)
colnames(normed_co) = colnames(team_mat_adj)
# return(list(raw = co_mat_adj, norm = normed_co))
return(list(team_list = team_adj_list, normed = normed_co, team_matrices = team_mat_list))
}
|
f0c19a332b4b667d6ae6991b52a6ac53c9d14dd8 | d5364db161f73c70ee8dec6f9c2229488dbb8649 | /R3주 수업자료(1)/test12-01.R | eb9dfbb9a29ed499eaad63f36a8e32034a689bd1 | [] | no_license | dksktjdrhks2/R_programming | 26ac74f3044d2267f9e5f0b1a90b694b83b16322 | e2a73997fdccecdd4246bd9ed2e0990d913b0ad6 | refs/heads/master | 2020-12-13T19:43:37.929777 | 2020-02-13T07:52:26 | 2020-02-13T07:52:26 | 234,513,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,275 | r | test12-01.R | ###### 지하철 4호선 역주변의 115평형 아파트의 평균 가격을 구글맵에 표시하시오
library(dplyr) # dplyr 패키지 로드
library(ggmap) # ggmap 패키지 로드
library(ggplot2)
# 지하철 4호선 원시 데이터 가져오기
#csv 파일을 가져와서 변수에 할당
station_data <- read.csv("./Rsung/4호선.csv")
station_data
####### 구글맵 인증키(구글맵 API) 가져오기
register_google(key = 'AIzaSyAGkAU6aogi28q5BI2EHNjlWTRFIWS-yl0')
# 문자형으로 변환한 후 지하철역 좌표 정보 구하기
station_code <- as.character(station_data$"구주소")
is.character(station_data$"구주소")
# 위도와 경도로 변환
station_code <- geocode(station_code)
head(station_code)
# 두 개의 데이터셋을를 합친 후 새로운 데이터셋에에 할당
station_code_final <- cbind(station_data, station_code)
head(station_code_final)
# 아파트 실거래가 데이터 가공하기 전용면적별 거래 가격 csv 파일을 가져와서 변수에 할당
apart_data <- read.csv("./Rsung/노원구.csv")
head(apart_data) #apart_data 앞부분 데이터 확인
#전용 면적의 값을 반올림하여 정수로 표현
apart_data$전용면적 = round(apart_data$전용면적)
head(apart_data) # 데이터 앞부분 확인
# 전용면적을 기준으로 빈도를 구한 후 빈도에 따라 내림차순 정렬
count(apart_data, 전용면적) %>% arrange(desc(n))
# 전용면적이 115인 데이터만 추출하여 apart_data_115에 할당
apart_data_115 <- subset(apart_data, 전용면적 == "115")
head(apart_data_115)
# 아파트 단지별 평균 거래 금액
# 쉼표를 공백("")으로 대체하여 제거
apart_data_115$거래금액 <- gsub(",", "", apart_data_115$거래금액)
head(apart_data_115) # 결과 확인하기
#거래금액을 정수형으로 변환하여 단지명별 평균을 구한 후 apart_data_85_cost 변수에 할당
apart_data_115_cost <- aggregate(as.integer(거래금액) ~ 단지명, apart_data_115, mean)
head(apart_data_115_cost) # apart_data_115_cos 앞부분 확인
# "as.integer(거래금액)"을 "거래금액"으로 변경하여 저장
apart_data_115_cost <- rename(apart_data_115_cost, "거래금액" = "as.integer(거래금액)")
head(apart_data_115_cost) # 결과 확인하기
# 단지명이 중복된 행을 제거하고 apart_data_115에 저장
apart_data_115 <- apart_data_115[!duplicated(apart_data_85$단지명),]
head(apart_data_115) # 결과 확인하기
#"단지명" 을 기준으로 aprt_data_115와 apart_data_115_cost 합치기
apart_data_115 <- left_join(apart_data_115, apart_data_115_cost, by = "단지명")
head(apart_data_115) # 결과 확인하기
# "단지명", "시군구", "번지", "전용면적, "거래금액.y" 만 추출하고 저장
apart_data_115 <- apart_data_115 %>% select("단지명", "시군구", "번지", "전용면적", "거래금액.y")
# "거래금액.y"를 "거래금액"으로 변경한 후 저장
# apart_data_115 <- rename(apart_data_85, "거래금액" = "거래금액.y")
apart_data_115 <- rename(apart_data_115, "거래금액" = "거래금액.y")
head(apart_data_115) # 결과 확인하기
# 시군구와 번지를 하나로 합치기
# "시군구"와 "번지" 열을 합친 후 저장
apart_address <- paste(apart_data_115$"시군구", apart_data_115$"번지")
head(apart_address) # 결과 확인하기
# "시군구"와 "번지" 열을 합친 후 데이터 프레임 구조로 저장
apart_address <- paste(apart_data_115$"시군구", apart_data_115$"번지") %>% data.frame()
head(apart_address) # 결과 확인하기
# "."을 "주소"로 변경하여 저장
apart_address <- rename(apart_address, "주소"= ".")
head(apart_address)
#아파트 주소를 위/경도 변환하여 저장
apart_address_code <- as.character(apart_address$"주소") %>% enc2utf8() %>% geocode()
head(apart_address)
# 데이터 세트를 합친후 일부 열만 저장
apart_code_final <- cbind(apart_data_115, apart_address, apart_address_code) %>%
select("단지명", "전용면적","거래금액", "주소", lon, lat)
head(apart_code_final)
# 노원구 지도 가져와 구글 지도에 지하철역과 아파트가격 표시하기
mapo_map <- get_googlemap('노원구', maptype= 'roadmap', zoom = 12)
ggmap(mapo_map)
# 산점도를 이용한 지하철역 위치표시 및 역명 표시
ggmap(mapo_map) +
geom_point(data = station_code_final, aes(x = lon, y = lat),
colour = "red", size = 3) +
geom_text(data = station_code_final, aes(label = 역명, vjust = -1))
# 노원역 지도 정보를 가져와 변수에 저장
no_map <- get_googlemap("노원역", maptype = "roadmap", zoom = 12)
# 노원역 지도에 지하철 정보 및 아파트 정보 일괄 표시
ggmap(no_map) +
geom_point(data = station_code_final, aes(x = lon, y = lat), colour = "red", size = 3) +
geom_text(data = station_code_final, aes(label = 역명, vjust = -1)) +
geom_point(data = apart_code_final, aes(x = lon, y = lat))+
geom_text(data = apart_code_final, aes(label = 단지명, vjust = -1)) +
geom_text(data = apart_code_final, aes(label = 단지명, vjust = 1))
|
9a568d99b3c52494f351a9dee6af77443332a7ad | d31c09f122f35fa2607b25c0fdfd08966b4724f6 | /man/LIMCaliforniaSediment.Rd | 00ccef3e5eaa205c4c75959bd7a8c765b643dd25 | [] | no_license | cran/LIM | a8468e77cab46c8ca4098e5d42eb5ba62d7c774a | 8083b705559ffddf23267d53917e8e5de64a0026 | refs/heads/master | 2022-06-01T22:58:43.185583 | 2022-05-11T09:10:02 | 2022-05-11T09:10:02 | 17,680,232 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,555 | rd | LIMCaliforniaSediment.Rd | \name{LIMCaliforniaSediment}
\docType{data}
\alias{LIMCaliforniaSediment}
\title{
Linear inverse model specification for the Santa Monica Basin
sediment food web
}
\description{
Linear inverse model specification for the Santa Monica Basin
(California) sediment food web as in Eldridge and Jackson (1993).
The Santa Monica Basin is a hypoxic-anoxic basin located near California.
The model contains both chemical and biological species.
The foodweb comprises 7 functional compartments and five external
compartments, connected with 32 flows.
Units of the flows are mg /m2/day
The linear inverse model LIMCaliforniaSediment is generated from the file
\file{CaliforniaSediment.input} which can be found in
subdirectory \code{/examples/FoodWeb} of the package directory
In this subdirectory you will find many foodweb example input files
These files can be read using \code{\link{Read}} and their output
processed by \code{\link{Setup}} which will produce a linear inverse
problem specification similar to LIMCaliforniaSediment
}
\usage{
data(LIMCaliforniaSediment)
}
\format{
a list of matrices, vectors, names and values that specify the linear
inverse model problem.
see the return value of \code{\link{Setup}} for more information about
this list
A more complete description of this structures is in vignette("LIM")
}
\author{
Karline Soetaert <karline.soetaert@nioz.nl>
Dick van Oevelen <dick.vanoevelen@nioz.nl>
}
\examples{
CaliforniaSediment <- Flowmatrix(LIMCaliforniaSediment)
plotweb(CaliforniaSediment, main = "Santa Monica Basin Benthic web",
sub = "mgN/m2/day", lab.size = 0.8)
\dontrun{
xr <- LIMCaliforniaSediment$NUnknowns
i1 <- 1:(xr/2)
i2 <- (xr/2+1):xr
Plotranges(LIMCaliforniaSediment, index = i1, lab.cex = 0.7,
sub = "*=unbounded",
main = "Santa Monica Basin Benthic web, Flowranges - part1")
Plotranges(LIMCaliforniaSediment, index = i2, lab.cex = 0.7,
sub = "*=unbounded",
main = "Santa Monica Basin Benthic web, Flowranges - part2")}
}
\references{
Eldridge, P.M., Jackson, G.A., 1993. Benthic trophic dynamics in
California coastal basin and continental slope communities inferred
using inverse analysis. Marine Ecology Progress Series 99, 115-135.
}
\seealso{
browseURL(paste(system.file(package="LIM"), "/doc/examples/Foodweb/", sep=""))
contains "CaliforniaSediment.input", the input file; read this with \code{Setup}
\code{\link{LIMTakapoto}}, \code{\link{LIMRigaSummer}} and many others
}
\keyword{datasets}
|
10038f160aa4e737496e1c8b5077dcf20555717c | 7d5968837bec87fcc42bab82f82db8bfa169e7c7 | /man/author.packages.Rd | b7df1ad8ef3ad6c12f03f1c7c9f064d3e1a54f3f | [] | no_license | liuguofang/figsci | ddadb01fae7c208b4ac3505eed5dc831d7de0743 | 076f7dd70711836f32f9c2118ad0db21ce182ea2 | refs/heads/master | 2021-06-04T19:23:34.065124 | 2020-02-12T04:22:11 | 2020-02-12T04:22:11 | 107,945,277 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 487 | rd | author.packages.Rd | \name{author.packages}
\alias{author.packages}
\title{ Search the extended R packages based on an author}
\usage{
author.packages(author,...)
}
\description{
This function can be used to find author's package(s) by means of findFn function of sos package.}
\arguments{
\item{author} {the one name of R developer.}
\item{...} {further argument pass to \code{\link{findFn}} function.}
}
\seealso{
\code{\link{findFn}}.
}
\examples{
pack <- author.packages("Hadley Wickham")
pack
}
|
61717f1abf608382359ce965b9a555f74cc59ae6 | f7a17a0c09546f0b5448921c270cf6dafa899d25 | /R-Programming-Assignment-3/rankhospital.R | 292f524d7380f9cdd05a72ff5f526ba9733d1541 | [] | no_license | daswan1118/Coursera-R-Programming | 9a8016633fe7de3604b387af8a68ab3574271790 | 525e16e5527f8b873673892760ce39bdb28b0bbc | refs/heads/master | 2020-04-06T08:35:05.297253 | 2018-11-13T03:39:13 | 2018-11-13T03:39:13 | 157,308,979 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,408 | r | rankhospital.R | ##### Ranking Hospitals by Outcome in a State #####
## The function reads the outcome-of-care-measures.csv file and returns a character vector with the name
## of the hospital that has the ranking specified by the num argument.
rankhospital <- function (state,condition,rank='best') {
# Read Data
options(warn=-1)
outcome <- read.csv('outcome-of-care-measures.csv', colClasses = 'character')
outcome[,11] <- as.numeric(outcome[,11])
names(outcome) <- tolower(names(outcome))
# Subset Data based on conditions
col_nm <- paste('hospital.30.day.death..mortality..rates.from.', sub(' ', '.', condition), sep='')
outcome <- outcome[,c('hospital.name','state',col_nm)]
outcome <- outcome[complete.cases(outcome),]
outcome[,c(col_nm)] <- as.numeric(outcome[,c(col_nm)])
outcome <- outcome[outcome$state == state,]
# Check that state and outcome are valid
if (!(state %in% state.abb)) return('invalid state')
if (!(condition %in% c('heart attack','heart failure', 'pneumonia'))) return('invalid outcome')
if (rank == 'best') rank <- 1
if (rank == 'worst') rank <- as.numeric(nrow(outcome))
# Return hospital name in the state with given rank
outcome <- outcome[order(outcome[,c(col_nm)],outcome$hospital.name),]
tryCatch(outcome$rank <- 1:nrow(outcome), error=function(err) NA)
return(outcome[which(outcome$rank == rank),]$hospital.name)
options(warn=0)
}
|
051c9ffee10a88862dd1929c79978c76aa92b452 | 56444a4057c4603677582510e9a6e23c036d4abf | /man/FilterContextCytosines.Rd | 9670002ec065b43ad9e6b1dd4a6daf3e1148b545 | [] | no_license | grimbough/SingleMoleculeFootprinting | 304ff5d68b2dedc27feea8e83f18f8dcac959c95 | 01ed0ffba3a4e313dd0b18beac77c80db9ab434a | refs/heads/main | 2023-03-14T01:04:49.610972 | 2021-03-03T14:55:01 | 2021-03-03T14:55:01 | 339,756,377 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 487 | rd | FilterContextCytosines.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/context_methylation_functions.r
\name{FilterContextCytosines}
\alias{FilterContextCytosines}
\title{Filter Cytosines in context}
\usage{
FilterContextCytosines(MethGR, genome, context)
}
\arguments{
\item{MethGR}{Granges obj of average methylation}
\item{genome}{BSgenome}
\item{context}{Context of interest (e.g. "GC", "CG",..)}
}
\value{
filtered Granges obj
}
\description{
Filter Cytosines in context
}
|
c78a4f1eee4559b9addc5368c5cdfe21251a6ac8 | 1d50e1db9cdc6140dcbbf4d3ca507082ee54be08 | /PopGenEx1.R | b4ab74bfcf21596e2b313a06383ed81e92536204 | [] | no_license | jelardaquino/PopGen | fb1089c87627bcec33fa16afc380bbd9a0a08055 | 7cf9654927d072d7bc0253a5e6d25f12f5430fe5 | refs/heads/master | 2020-04-21T04:24:57.388486 | 2019-02-12T20:32:07 | 2019-02-12T20:32:07 | 169,312,328 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,262 | r | PopGenEx1.R | pops_before <- replicate(25, sample(c("CC", "CG", "GG"), 990, replace=TRUE), simplify=FALSE)
names(pops_before)<-paste("pop", 1:25, sep="")
freq_C_before <- lapply(pops_before, function(x) ((sum(x == "CC")*2) + sum(x == "CG"))/1980)
freq_G_before <- lapply(pops_before, function(x) ((sum(x == "GG")*2) + sum(x == "CG"))/1980)
hist(unlist(freq_C_before), breaks=10)
hist(unlist(freq_G_before))
plot(unlist(freq_C_before), unlist(freq_G_before), pch=16, cex=2, col="turquoise", xlab="Freq C", ylab="Freq G")
plot(unlist(freq_C_before), type="b", pch=19, col="red", xlab="Population", ylab="Freq of C", ylim=c(0.46, 0.54))
lines(unlist(freq_C_after), type="b", pch=19, col="blue", lty=2)
legend("topright", legend=c("Before", "after"), col=c("red", "blue"), lty=1:2, cex=0.8)
mean(unlist(freq_C_before))
mean(unlist(freq_C_after))
mean(unlist(freq_G_after))
pops_after <- lapply(pops_before, function(x) sample(x, 742, replace=FALSE))
freq_C_after <- lapply(pops_after, function(x) ((sum(x == "CC")*2) + sum(x == "CG"))/1484)
freq_G_after <- lapply(pops_after, function(x) ((sum(x == "GG")*2) + sum(x == "CG"))/1484)
for(i in 1:25){
sample(pops_before[[i]], 742, replace=FALSE)
}
length(pops_before)
length(pops_before[[2]])
pops_after
pops_before
|
41532433f53f734548f668528edea0668f0cf454 | bffe065b5dca2a1465a3bfed84f97281093d3fb8 | /R-script/functions.R | ceae49a1d105f8cce0efc27c08b6a8f1ff5cfb90 | [] | no_license | ChristophSchwarzkopp/MultiStressorImpacts | 249877808c0c34887aa2320b31e6eaef10f7aa36 | c8ab0d193d8d01e0c997ac12501f8b9d69cac4ab | refs/heads/master | 2022-11-29T02:58:15.888100 | 2020-08-05T08:51:36 | 2020-08-05T08:51:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,509 | r | functions.R | # R functions to estimate and apply the variable transformation
# Remove missing values before running the transformation
# (Section 2 in Guidance document; DOI: 10.13140/RG.2.2.10494.95040)
estimateBC = function(x){
# function to estimate transformation parameters for continuous variable x
require(car)
gamma = min(x, na.rm=T) - 0.001 # offset (min value minus a small number)
x = x - gamma # subtract gamma from x, so that it is strictly positive
lambda = powerTransform(x~1, family="bcPower")$lambda # estimate lambda of Box-Cox transformation...
xT = bcPower(x, lambda=lambda) # apply box-cox transform
xT.mean = mean(xT) # mean of transformed values, for centring
xT.sd = sd(xT) # sd of transformed values, for scaling
# return the transformation parameters
return(c(gamma=gamma, lambda=lambda, xT.mean=xT.mean, xT.sd=xT.sd))
}
applyBC = function(x, P=estimateBC(x)){
# function to transform continuous variable x using transformation parameters P
require(car)
gamma = P[1]
lambda = P[2]
xT.mean = P[3]
xT.sd = P[4]
xT = bcPower(x-gamma, lambda) # apply box-cox transform
xT = (xT-xT.mean)/xT.sd # centre and scale
return(xT)
}
backBC = function(xT, P){ # function to back transform transformed variable xT using transformation parameters P
gamma=P[1]
lambda=P[2]
xT.mean=P[3]
xT.sd=P[4]
xT.unscaled = xT*xT.sd + xT.mean
x.original = exp(log(lambda*xT.unscaled + 1)/lambda) + gamma
return(x.original)
}
|
fc3bdb11a2308b470643e163b1726f0492387172 | 0067835d0512fea1a427f074ef4c956848d613e0 | /RandomForest.R | c6a5e771ba3447592af932aa21d2bbcdb4543251 | [] | no_license | gammalight/Modeling-Code | c1c4ca278852b355ffeda8c11150c02a03263318 | e4a832706f07afb7b4bc43763c70fe2beff6bf18 | refs/heads/master | 2020-04-20T05:09:29.977082 | 2019-03-08T03:29:06 | 2019-03-08T03:29:06 | 168,648,322 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,081 | r | RandomForest.R | #this script is used to predict customer churn
#the data was downloaded off of Kaggle
#load packages that I will need to
#read in data
#conduct exploratory analysis
#manipulate data
#build several models
#load necessary packages
library(dplyr)
library(reshape)
library(ggplot2)
library(caret)
library(caretEnsemble)
library(RColorBrewer)
library(pROC)
library(data.table)
library(randomForest)
library(caTools)
library(gtools)
VarSelectME<- function(checkDF){
### paramaters below are auto set - no need to manually code unless you would like to override
N_trees <- 500 ## enter the number of Random Forest Trees you want built - if you are not sure 100 is ussually enough to converge nicely when doing variable selection only
Recs<-length(Final[,1]) ## total records in the file
Recs_Dep1 <-sum(Final$DEP) ## how many total records where DEP=1 (dependent variable equals 1)
Node_Size<-round(50/(Recs_Dep1/Recs)) ## auto calculation for min terminal node size so on average at least 50 DEP=1s are present in each terminal node '
Max_Nodes<-NULL ## maximum number of terminal nodes. 20 to 25 is
Sample_Size<-round(.3*Recs) ## iteration sample size - 20% is usually good
set.seed(100)
temp <- randomForest(checkDF[,indvarsc],checkDF$DEP
,sampsize=c(Sample_Size),do.trace=TRUE,importance=TRUE,ntree=N_trees,replace=FALSE,forest=TRUE
,nodesize=Node_Size,maxnodes=Max_Nodes,na.action=na.omit)
RF_VARS <- as.data.frame(round(importance(temp), 2))
RF_VARS <- RF_VARS[order(-RF_VARS$IncNodePurity) ,]
best_vi=as.data.frame(head(RF_VARS,N_Vars))
topvars <-as.vector(row.names(best_vi))
## topvars now contains the top N variables
return(topvars)
}
###################################################################################################
#####################################################################################
## function to split out the binary attributes
createBinaryDF <- function(depVar, checkDF){
binaryCols <- c(depVar)
nameVecBin<- names(checkDF)
for (n in nameVecBin){
if (n != depVar){
checkBinary<-summary(checkDF[,n])
# c("Min.", "1st Qu.", "Median", "Mean", "3rd Qu.", "Max.")
isBinary<- ifelse((checkBinary["Min."] == 0 & checkBinary["Max."] == 1
& (checkBinary["Median"]== 0 ||checkBinary["Median"]== 1)
& (checkBinary["1st Qu."]== 0 ||checkBinary["1st Qu."]== 1)
& (checkBinary["3rd Qu."]== 0 ||checkBinary["3rd Qu."]== 1)),"yes","no")
if (isBinary == "yes") {
binaryCols<- append(binaryCols, n)
print(here<- paste("Adding binary: ",n ,sep=""))
}
}
}
return(checkDF[,binaryCols])
}
#####################################################################################
######################################################################################
### Scaling function
scaleME <- function(checkDF){
require(stats)
## center and scale the vars
checkDF<- as.data.frame(scale(checkDF),center=TRUE,scale=apply(seg,1,sd,na.rm=TRUE))
## take the cubed root
cube_root<- function(x){x^1/3}
checkDF<-as.data.frame(cube_root(checkDF))
## run softmax - convert all vars to a range of 0 to 1
## 2 lines below do not work for some reason so needed to run the loop
## range01 <- function(x){(x-min(x))/(max(x)-min(x))}
## checkDF <- range01(checkDF)
nameVecBin<- names(checkDF)
for (n in nameVecBin) {
checkDF[,n]<-(checkDF[,n]-min(checkDF[,n]))/(max(checkDF[,n])-min(checkDF[,n]))
}
return(checkDF)
}
#Calculate Logloss
LogLoss <- function(DEP, score, eps=0.00001) {
score <- pmin(pmax(score, eps), 1-eps)
-1/length(DEP)*(sum(DEP*log(score)+(1-DEP)*log(1-score)))
}
######################################################################################
#read in the customer churn csv data file
churnData <- read.csv("C:\\Users\\Kevin Pedde\\Documents\\R\\Work\\CustomerChurn\\customerchurn\\TelcoCustomerChurn.csv")
###############################################################
### Feature Engineering ###
#will get to this later, just want to build some models to get baseline
#ideas for new variables:
# - Phone only
# - Internet only
# - paperless billing and auto pay
#Okay lets create these variables
churnData <- churnData %>%
mutate(PhoneOnly = if_else(PhoneService == 'Yes' & InternetService == 'No', 'Yes', 'No'),
InternetOnly = if_else(PhoneService == 'No' & InternetService != 'No', 'Yes', 'No'),
PhoneInternet = if_else(PhoneService == 'Yes' & InternetService != 'No', 'Yes', 'No'),
PaperlessAutoPay = if_else(PaperlessBilling == 'Yes' &
PaymentMethod %in% c("Bank transfer (automatic)","Credit card (automatic)"), 'Yes', 'No'),
churn = if_else(Churn == 'Yes',1,0))
#first drop all tenure 0 people
churnData <- churnData %>%
select(-customerID) %>% #deselect CustomerID
filter(tenure > 0) %>%
droplevels()
## Create Dummy Variables ##
dmy <- dummyVars(" ~ gender + Partner + Dependents + PhoneService +
MultipleLines + InternetService + OnlineSecurity +
OnlineBackup + DeviceProtection + TechSupport +
StreamingTV + StreamingMovies + Contract + PaperlessBilling +
PaymentMethod + PhoneOnly + InternetOnly + PaperlessAutoPay +
PhoneInternet",
data = churnData,
fullRank = FALSE)
dmyData <- data.frame(predict(dmy, newdata = churnData))
#print(head(dmyData))
#strip the "." out of the column names
colClean <- function(x){ colnames(x) <- gsub("\\.", "", colnames(x)); x }
dmyData <- colClean(dmyData)
#lets combine the new dummy variables back with the original continuous variables
churnDataFinal <- cbind(dmyData, churnData[,c(2,5,18,19,25)])
#lets get a traing and test data set using the createPartition function from Caret
set.seed(420)
inTrain <- createDataPartition(churnDataFinal$churn, p = 4/5, list = FALSE, times = 1)
trainchurnData <- churnDataFinal[inTrain,]
testchurnData <- churnDataFinal[-inTrain,]
inputdf <- rename(trainchurnData, c(churn="DEP"))
inputdf_test <- rename(testchurnData, c(churn="DEP"))
#Get names of columns
names1 <- names(inputdf)
#fix(names1)
names2 <- c("genderFemale", "genderMale", "PartnerNo", "PartnerYes", "DependentsNo",
"DependentsYes", "PhoneServiceNo", "PhoneServiceYes", "MultipleLinesNo",
"MultipleLinesNophoneservice", "MultipleLinesYes", "InternetServiceDSL",
"InternetServiceFiberoptic", "InternetServiceNo", "OnlineSecurityNo",
"OnlineSecurityNointernetservice", "OnlineSecurityYes", "OnlineBackupNo",
"OnlineBackupNointernetservice", "OnlineBackupYes", "DeviceProtectionNo",
"DeviceProtectionNointernetservice", "DeviceProtectionYes", "TechSupportNo",
"TechSupportNointernetservice", "TechSupportYes", "StreamingTVNo",
"StreamingTVNointernetservice", "StreamingTVYes", "StreamingMoviesNo",
"StreamingMoviesNointernetservice", "StreamingMoviesYes", "ContractMonthtomonth",
"ContractOneyear", "ContractTwoyear", "PaperlessBillingNo", "PaperlessBillingYes",
"PaymentMethodBanktransferautomatic", "PaymentMethodCreditcardautomatic",
"PaymentMethodElectroniccheck", "PaymentMethodMailedcheck", "PhoneOnlyNo",
"PhoneOnlyYes", "InternetOnlyNo", "InternetOnlyYes", "PaperlessAutoPayNo",
"PaperlessAutoPayYes", "PhoneInternetNo", "PhoneInternetYes",
"SeniorCitizen", "tenure", "MonthlyCharges", "TotalCharges",
"DEP")
inputdf_1 <- inputdf[names2]
inputdf_1[is.na(inputdf_1)] <- 0
inputdf_test1 <- inputdf_test[names2]
inputdf_test1[is.na(inputdf_test1)] <- 0
#summary(inputdf_1)
################################################################
## 2)split out the binary attributes
## see bottom section for the function called here (createBinaryDF)
## droppedBindf contains the X and DEP variables as well
droppedBinDF <-createBinaryDF("DEP", inputdf_1)
droppedBinDF_test <-createBinaryDF("DEP", inputdf_test1)
## now create the file with all non-binary attributes
delVar <- names(droppedBinDF)
## delVar <- delVar[delVar != "X"] ## Keep X
## delVar <- delVar[delVar != "DEP"] ## Keep DEP
mydropvars <- !((names(inputdf_1)) %in% (delVar))
inputdf2 <- inputdf_1[mydropvars]
inputdf_test2 <- inputdf_test1[mydropvars]
################################################################
## 3)scale the non-binary attributes
## see bottom section for the function called here (scaleME)
#inputdf2 <-scaleME(inputdf2)
#inputdf3<- data.frame(scale(inputdf2, center=TRUE, scale=TRUE))
################################################################
## 4)Calculate transformations of non-binary attributes
## see bottom section for the function called here (scaleME)
#trans_vars <-transformME(inputdf2)
################################################################
## 5)Final File: combine all attributes
Final <- cbind(droppedBinDF, inputdf2)
Final_test <- cbind(droppedBinDF_test, inputdf_test2)
#Final$DEP <- as.factor(as.character(Final$DEP))
#Final_test$DEP <- as.factor(as.character(Final_test$DEP))
summary(Final_test)
summary(Final)
## drop all interim data
rm(droppedBinDF)
rm(trans_vars)
rm(inputdf2)
rm(delvar)
rm(mydropvars)
###################################################################################
## Build the Models: Random Forest ##
###################################################################################
##################################################################################
## Create the independent variable string (indvarsc)
## save(Final, file = "Final.RData")
## load("Final.RData")
myvars <- names(Final) %in% c("Row.names","DEP")
tmp <- Final[!myvars]
indvarsc <- names(tmp)
rm(myvars)
rm(tmp)
rm(decile_report)
rm(auc_out)
rm(RF_VARS)
rm(pred)
rm(rank)
###################################################################################
## determine the top N variables to use using Random Forest - if you want to use all the variables skip this step
## you can think of this technique as similar tostepwise in regression
## manually enter the number of top variables selected you would like returned (must be <= total predictive vars)
N_Vars <- 15
### Run the variable selection procedure below. The final top N variables will be returned
Top_N_Vars<-VarSelectME(Final)
names(Top_N_Vars)
fix(Top_N_Vars)
Top_N_Vars <- c("ContractMonthtomonth", "tenure", "OnlineSecurityNo", "TechSupportNo",
"InternetServiceFiberoptic", "TotalCharges", "MonthlyCharges",
"PaymentMethodElectroniccheck", "ContractTwoyear", "InternetServiceDSL",
"ContractOneyear", "OnlineBackupNo", "DeviceProtectionNo", "OnlineSecurityYes",
"PaperlessBillingYes")
###################################################################################
## build random forest model based on top N variables
### paramaters below are auto set - no need to manually code unless you would like to override
N_trees <- 500 ## enter the number of Random Forest Trees you want built - if you are not sure 500 is usually enough to converge nicely when doing variable selection only
Recs<-length(Final[,1]) ## total records in the file
Recs_Dep1 <-sum(Final$DEP) ## how many total records where DEP=1 (dependent variable equals 1)
Node_Size<-round(50/(Recs_Dep1/Recs)) ## auto calculation for min terminal node size so on average at least 50 DEP=1s are present in each terminal node '
Max_Nodes<-20 ## maximum number of terminal nodes. 20 to 25 is
Sample_Size<-round(.7*Recs) ## iteration sample size - 20% is usually good
set.seed(100)
Final_RF<- randomForest(Final[,Top_N_Vars],Final$DEP
,sampsize=c(Sample_Size)
,do.trace=TRUE
,importance=TRUE
,ntree=N_trees
,replace=FALSE
,forest=TRUE
,nodesize=Node_Size
,maxnodes=Max_Nodes
,na.action=na.omit)
RF_VARS <- as.data.frame(round(importance(Final_RF), 2))
RF_VARS <- RF_VARS[order(-RF_VARS$IncNodePurity) ,]
#save(Final_RF, file = "Final_RF_2011.RData")
#load("Final_RF_2011.RData")
# Score
pred <- data.frame(predict(Final_RF,Final[,Top_N_Vars]),type="prob") ##type= options are response, prob. or votes
pred <- pred[c(-2)]
names(pred) <- "score"
summary(pred)
# Apply Deciles
# 0.1 option makes 10 equal groups (.25 would be 4). negative option (-pred$score) makes the highest score equal to 1
rank <- data.frame(quantcut(-pred$score, q=seq(0, 1, 0.1), labels=F))
names(rank) <- "rank"
# apply the rank
Final_Scored <- cbind(Final,pred,rank)
#Run AUC
#use the two different ways
auc_out <- colAUC(Final_Scored$score, Final_Scored$DEP, plotROC=TRUE, alg=c("Wilcoxon","ROC"))
rocObj <- roc(Final_Scored$DEP, Final_Scored$score)
auc(rocObj)
#Run Decile Report: do average of all model vars, avg DEP and min score, max score and avg score
decile_report <- sqldf("select rank, count(*) as qty, sum(DEP) as Responders, min(score) as min_score,
max(score) as max_score, avg(score) as avg_score
from Final_Scored
group by rank")
write.csv(decile_report,"decile_report.csv")
#Calculate the Logloss metric
LogLoss(Final_Scored$DEP,Final_Scored$score)
#0.4132963
#find the Youden index to use as a cutoff for cunfusion matrix
coords(rocObj, "b", ret="t", best.method="youden") # default
#0.3083464
#Classify row as 1/0 depending on what the calculated score is
#play around with adjusting the score to maximize accuracy or any metric
Final_Scored <- Final_Scored %>%
mutate(predClass = if_else(score > 0.51, 1, 0),
predClass = as.factor(as.character(predClass)),
DEPFac = as.factor(as.character(DEP)))
#Calculate Confusion Matrix
confusionMatrix(data = Final_Scored$predClass,
reference = Final_Scored$DEPFac)
#Now score the test set and find all the metrics (AUC, Logloss, etc)
#now predict on the test set and compare results
preds_test <- data.frame(predict(Final_RF, Final_test))
#preds <- preds[c(-1)]
names(preds_test) <- "score"
summary(preds_test)
# 0.1 option makes 10 equal groups (.25 would be 4). negative option (-pred$score) makes the highest score equal to 1
rank <- data.frame(quantcut(-preds_test$score, q=seq(0, 1, 0.1), labels=F))
names(rank) <- "rank"
predDataFinal_test_rf <- cbind(Final_test, preds_test, rank)
rocObj <- roc(predDataFinal_test_rf$DEP, predDataFinal_test_rf$score)
auc(rocObj)
#0.8461
#Run Decile Report: do average of all model vars, avg DEP and min score, max score and avg score
decile_report_test <- sqldf("select rank, count(*) as qty, sum(DEP) as Responders, min(score) as min_score,
max(score) as max_score, avg(score) as avg_score
from predDataFinal_test_rf
group by rank")
write.csv(decile_report,"decile_report.csv")
#Calculate the Logloss metric
LogLoss(predDataFinal_test_rf$DEP,predDataFinal_test_rf$score)
#0.4095148
#find the Youden index to use as a cutoff for cunfusion matrix
coords(rocObj, "b", ret="t", best.method="youden") # default
#0.3687526
#Classify row as 1/0 depending on what the calculated score is
#play around with adjusting the score to maximize accuracy or any metric
predDataFinal_test_rf <- predDataFinal_test_rf %>%
mutate(predClass = if_else(score > 0.51, 1, 0),
predClass = as.factor(as.character(predClass)),
DEPFac = as.factor(as.character(DEP)))
#Calculate Confusion Matrix
confusionMatrix(data = predDataFinal_test_rf$predClass,
reference = predDataFinal_test_rf$DEPFac)
|
59a617871824eb28a52735b99d3ab0d0afc3df68 | 91afedcaabe90ba77ae4e412f9254eaedcc7663d | /R/robust_smooth.R | 6f8bcf9f5c283e0d50036464783729115a6da61b | [] | no_license | DongyueXie/smashrgen | 6300d84af5815ce2ec9846b7e874dcaf398684c7 | 647039a0f5a1b78328bd2aca06dea4fa65b7bff7 | refs/heads/master | 2023-07-09T12:06:36.613067 | 2023-06-25T11:34:52 | 2023-06-25T11:34:52 | 133,297,527 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,197 | r | robust_smooth.R | #'@title empirical Bayes robust smoothing
#'@import ebnm
robust_smooth = function(y,sigma2,
mu_init=NULL,
ebnm_control = list(),
tol=1e-5,maxiter=100){
ebnm_controls = robust_smooth_ebnm_control_default()
ebnm_controls = modifyList(ebnm_controls,ebnm_control,keep.null = TRUE)
mu = mu_init
if(is.null(mu)){
mu = rep(mean(y),length(y))
}
mu_old = mu
for(i in 1:maxiter){
outlier_res = ebnm(y-mu,sqrt(sigma2),
prior_family = ebnm_controls$prior_family,
mode = ebnm_controls$mode,
scale = ebnm_controls$scale,
g_init = ebnm_controls$g_init,
fix_g = ebnm_controls$fix_g)
smooth_res = smash_dwt(y-outlier_res$posterior$mean,sqrt(sigma2))
mu = smooth_res$posterior$mean
if(norm(mu-mu_old,'2') < tol){
break
}
mu_old = mu
}
return(list(smooth_res=smooth_res,outlier_res=outlier_res))
}
robust_smooth_ebnm_control_default = function(){
return(list(prior_family = "point_laplace",
mode = 0,
scale = "estimate",
g_init = NULL,
fix_g = FALSE))
}
|
d48fe900abfec0096ef26dbd446aa76774859c83 | fdf195f36083f010e9a5e81828c93e7741464e3e | /R/gera_figura_legenda_factor.R | acf5b746cc064adcf725d6aa86de1e85790c6ab6 | [] | no_license | cran/spGoogle | ce788f658ee5af81ddcc57165b9db98d9a09f46e | e105758efbb06feeb750d2ac0f9ce4e2673aded7 | refs/heads/master | 2016-09-06T00:47:43.086533 | 2012-07-31T00:00:00 | 2012-07-31T00:00:00 | 17,719,422 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 849 | r | gera_figura_legenda_factor.R | gera_legenda_factor <-
function(brks, cols, savekml) {
if (!is.null(savekml)){
dest.fig = tempfile(pattern = "legenda", fileext = ".png", tmpdir = getwd())
} else {
dest.fig = tempfile(pattern = "legenda", fileext = ".png")
}
brks <- as.character(brks)
ncat <- length(brks)
#dest.fig <- tempfile(pattern = "legenda", fileext = ".png")
dim1 <- nchar(brks[ncat])
dim2 <- nchar("Legenda")
dim <- ifelse(dim1<=dim2, dim2, dim1)
width = (dim+0.4)*12
height = (ncat+2)*12*1.4
png(filename = dest.fig, width = width, height = height, bg = "#FFFFFF00", pointsize = 10)
op <- par(bg="transparent", mar=c(0, 0, 0, 0))
plot(1,1,col="white",axes=F,col.axis="white",xlab="",ylab="")
legend(
"center",
brks,
fill = c(cols),
cex = 1.2,
bg = "#FFFFFF")
dev.off()
return (c(dest.fig, width, height))
}
|
4a6a6d1880756e7dbe83d2a767e83afb6118de28 | a1cc22bafb4429b53898962b1131333420eddf05 | /example-models/pk2cpt_ode/pk2cpt_ode.init.R | 67d6d3bd8240323ab748417dcb446666a0493dcb | [
"BSD-3-Clause"
] | permissive | metrumresearchgroup/Torsten | d9510b00242b9f77cdc989657a4956b3018a5f3a | 0168482d400e4b819acadbc28cc817dd1a037c1b | refs/heads/master | 2023-09-01T17:44:46.020886 | 2022-05-18T22:46:35 | 2022-05-18T22:46:35 | 124,574,336 | 50 | 18 | BSD-3-Clause | 2023-09-09T06:32:36 | 2018-03-09T17:48:27 | C++ | UTF-8 | R | false | false | 139 | r | pk2cpt_ode.init.R | CL <- 7.4367958406427
ka <- 1.0811298754049
Q <- 28.0799996152587
sigma <- 0.589695154260051
V1 <- 78.4460632446725
V2 <- 68.1255965629187
|
d92f572ba93d4bd4eaa12f2fd711ed43ba99ee15 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/GtkAccessible.Rd | 01ceb38596147f4ea024911c037b7c2469e0cf33 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 539 | rd | GtkAccessible.Rd | \alias{GtkAccessible}
\name{GtkAccessible}
\title{GtkAccessible}
\description{Accessibility support for widgets}
\section{Methods and Functions}{
\code{\link{gtkAccessibleConnectWidgetDestroyed}(object)}\cr
}
\section{Hierarchy}{\preformatted{GObject
+----AtkObject
+----GtkAccessible}}
\section{Structures}{\describe{\item{\verb{GtkAccessible}}{
\emph{undocumented
}
}}}
\references{\url{https://developer-old.gnome.org/gtk2/stable/GtkAccessible.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
428e34a50c370e954aeb11c1e21afd5f8194b626 | 416ff0b5405f13630300b3d12cda8b0b01f0666f | /man/str_fo_parse.Rd | 8fa08b52481a22721bfc918d1e9cc0512fbd48ae | [] | no_license | biodatacore/biodatacoreUtils | 4d51b842490469df0330a32eb2998a3b0627613c | 3177e37f58eaf988045e3b08ada54002666437f9 | refs/heads/master | 2021-09-02T20:55:05.294114 | 2018-01-03T22:18:24 | 2018-01-03T22:18:24 | 108,178,354 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 285 | rd | str_fo_parse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/substitute.R
\name{str_fo_parse}
\alias{str_fo_parse}
\title{Parses character vectors for substitution}
\usage{
str_fo_parse(x)
}
\description{
Parses character vectors for substitution
}
\keyword{internal}
|
f37632cb903260b0cf6c41f8f8805bf1e5df866a | ba8d32cd53ab5446095c68b688812509c2bd2383 | /man/merge_tides.Rd | aaadd4f981c6aaf2942117c1d886a6b5a02215cf | [
"MIT"
] | permissive | mkoohafkan/rremat | d1950d110524b6f8bde57769c1f0f89f690d51bb | 5ee5737372287ac84c9dfe5635b428ab91766184 | refs/heads/master | 2021-07-11T02:49:53.448671 | 2021-07-01T02:17:15 | 2021-07-01T02:17:15 | 37,560,366 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 672 | rd | merge_tides.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rreformat.r
\name{merge_tides}
\alias{merge_tides}
\title{Combine Tide Records}
\usage{
merge_tides(tides, datetimecol = "Date.Time",
heightcol = "Water.Level", sigmacol = "Sigma")
}
\arguments{
\item{tides}{A list of dataframes of tide data.}
\item{datetimecol}{The column name containing timestamps.}
\item{heightcol}{The column name containing water levels.}
\item{sigmacol}{The column name containing sigma values.}
}
\value{
A single dataframe of tide data.
}
\description{
Combine data files produced by \code{download_tides}.
}
\seealso{
download_tides
}
|
1e6b4dfb47f726eb8076624a6a97fb06de77e2f2 | 892c2bae8ccb0b98b9e03d1b95a5d0301be11301 | /run_analysis.R | 2533b843dc8073b3c80b45b3d273e527a8d7be06 | [] | no_license | boukhrisescandon/Getting-and-Cleaning-Data---Course-Project | 1811573bcff73f3d2904b0cf4a1a710fc5794e74 | 19f04307d066e19d09508a7e395a5ddce5b32905 | refs/heads/master | 2016-08-11T20:16:22.703744 | 2015-11-22T20:44:56 | 2015-11-22T20:44:56 | 46,283,990 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,543 | r | run_analysis.R | ## Getting and Cleaning Data - Course Project
## November 16, 2015
######### Clear workspace, set working directory, and initialize needed libraries #########
rm(list = ls(all = TRUE))
setwd("~/Desktop/Johns Hopkins - Data Science Certification/03_Getting and Cleaning Data/Course Project")
library(data.table)
library(plyr)
library(markdown)
library(knitr)
######### Load data #########
#Training data
x_train <- read.table("UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE)
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE)
#Testing data
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE)
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE)
#Features list for the column names
data_features <- read.table("UCI HAR Dataset/features.txt", header = FALSE)
subject <- rbind(subject_train, subject_test)
features <- rbind(x_train, x_test)
activity <- rbind(y_train, y_test)
#Modifiy the column names
names(subject) <- c("subject")
names(activity) <- c("activity")
names(features) <- data_features$V2
######### Merge the training and test sets into one data set #########
subject_activity <- cbind(subject, activity)
merged <- cbind(features, subject_activity)
######### Only keep mean and standard deviation #########
mean_std_names <- data_features$V2[grep("mean\\(\\)|std\\(\\)", data_features$V2)]
include_names <- c(as.character(mean_std_names), "subject", "activity")
merged <- subset(merged, select=include_names)
######### Label the data set with descriptive variable names #########
names(merged) <- gsub("Acc", "Acceleration", names(merged))
names(merged) <- gsub("Mag", "Magnitude", names(merged))
names(merged) <- gsub("Gyro", "Gyroscope", names(merged))
names(merged) <- gsub("^t", "Time", names(merged))
names(merged) <- gsub("^f", "Frequency", names(merged))
names(merged) <- gsub("BodyBody", "Body", names(merged))
######### Create a second independent tidy data set with the average of each variable #########
######### for each activity and each subject. #########
merged_final <- aggregate(. ~subject + activity, merged, mean)
tidy_data <- merged_final[order(merged_final$subject, merged_final$activity), ]
write.table(tidy_data, file = "tidy_data.txt", row.names = FALSE)
######### Create Codebook #########
knit2html("code_book.Rmd");
|
e577bf6cbaab104c03d3773721e3d16fd13e29b3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/soundgen/examples/getRMS.Rd.R | 5ac7b1b020e4142e7f49b0aaa735b5174c0f707d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 575 | r | getRMS.Rd.R | library(soundgen)
### Name: getRMS
### Title: RMS amplitude per frame
### Aliases: getRMS
### ** Examples
s = soundgen() + .1 # with added DC offset
plot(s, type = 'l')
r = getRMS(s, samplingRate = 16000,
windowLength = 40, overlap = 50, killDC = TRUE,
col = 'green', lty = 2, main = 'RMS envelope')
# short window = jagged envelope
r = getRMS(s, samplingRate = 16000,
windowLength = 5, overlap = 0, killDC = TRUE,
col = 'green', lty = 2, main = 'RMS envelope')
## Not run:
##D r = getRMS('~/Downloads/temp/032_ut_anger_30-m-roar-curse.wav')
## End(Not run)
|
452d953bc27a822cd5620a1814b4d4092134fb43 | 671d6806a9baad686e1121fbf5d769e61728b399 | /data-raw/cellAge_load.R | 39eeacd67a7db589371063d4d25f320bab0f6973 | [] | no_license | maglab/HAGR-R | 35401e5dab2b8dae2bafbb7ebf8422f40f8a0e10 | 4e83b50142ea4b8c6625a7a86b6005ff3f606ed6 | refs/heads/master | 2023-08-13T21:22:51.849753 | 2021-10-12T12:50:20 | 2021-10-12T12:50:20 | 358,014,995 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 641 | r | cellAge_load.R | # cellAge_load.R
library(tidyverse)
cellAge_gene_interventions_raw <- readr::read_csv("data-raw/CellAge/gene_interventions.csv")
cellAge_signatures_raw <- readr::read_csv("data-raw/CellAge/signatures.csv")
# Load cellage gene interventions and signatures table
cellAge_gene_interventions <- cellAge_gene_interventions_raw[,]
cellAge_signatures <- cellAge_signatures_raw[2:8]
cellAge_gene_interventions <- readr::type_convert(cellAge_gene_interventions)
cellAge_signatures <- readr::type_convert(cellAge_signatures)
usethis::use_data(cellAge_gene_interventions,overwrite = TRUE)
usethis::use_data(cellAge_signatures,overwrite = TRUE) |
7805e81b8cce037b1bddf97a5e1e0acf4b761e13 | 2a7655dc0c233967a41b99369eed3eb4a6be3371 | /3-Get_Earth_Observations/Meteorological_variables/Not_main_workflow/convert_grib1to2_function.R | f1b778e684fb21f2b5880ef58e697c8eafee5ab3 | [
"MIT"
] | permissive | earthlab/Western_states_daily_PM2.5 | 0977b40d883842d7114139ef041e13a63e1f9210 | 3f5121cee6659f5f5a5c14b0d3baec7bf454d4bb | refs/heads/master | 2023-02-25T14:32:20.755570 | 2021-02-04T00:08:03 | 2021-02-04T00:08:03 | 117,896,754 | 2 | 1 | null | 2021-01-27T22:19:14 | 2018-01-17T21:48:29 | R | UTF-8 | R | false | false | 1,319 | r | convert_grib1to2_function.R | convert_grib1to2.fn <- function(this_model.info, this_file_type) {
print("Starting convert_grib1to2.fn")
if (this_file_type == "grib1") { # convert grib1 files to grib2 and then run GribInfo
# run the script to convert to grib2
# ./grb1to2.pl namanl_218_20080101_0000_000.grb
system(paste("./grb1to2.pl",this_model.info[[1]]$file.name,sep = " "))
grib2_file_name <- paste(this_model.info[[1]]$file.name,".grb2",sep = "")
if (file.exists(grib2_file_name) == TRUE) { # check whether the converted file was successfully created
thisGribInfo <- GribInfo(grib.file = grib2_file_name, file.type = "grib2")
} else { # if (file.exists(grib2_file_name) == TRUE) { # check whether the converted file was successfully created
thisGribInfo <- NULL
print(paste("*** convert_grib1to2.fn failed to convert ",grib2_file_name," ***"))
} # if (file.exists(grib2_file_name) == TRUE) { # check whether the converted file was successfully created
} else if (this_file_type == "grib2") { # run GribInfo
thisGribInfo <- GribInfo(grib.file = this_model.info[[1]]$file.name, file.type = this_file_type)
} else {
error("Invalid this_file_type. It should be either grib1 or grib2. See convert_grib1to2_function.R")
} # end if
return(thisGribInfo)
} # end function |
bc6a0fd6bf9b2a554ef22bc4f098195e4cedbaee | d0dbe7f22cbb23e1174e2107fa72d2ef51afc8e3 | /plotDE.R | 94b9479eb9a157d713154eddc9e3900482856708 | [] | no_license | maryHoekstra/CISC500 | 2381332a13e2b852e6009551b98fef42ca217974 | 05f4d8766289f0054a50f5f5e8eb3b4fed5c40bb | refs/heads/master | 2020-03-08T20:52:29.271719 | 2018-04-06T12:52:14 | 2018-04-06T12:52:14 | 128,393,847 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,722 | r | plotDE.R | # PLOT DE
# graph number of differentially expressed genes between baseline and the other time points for each group
# last modified: April 3rd, 2018
# find number of DE genes from one timepoint (earlyDay) to another (lateDay) for a specific group (code)
findNumberDE <- function(earlyDay,lateDay,code) {
earlyVsLateESet <- getESet(filtered.eset,earlyDay,lateDay)
design <- createDesign(earlyVsLateESet,earlyDay,lateDay)
diffs <- getDiffs(earlyVsLateESet,groupCode=code,design)
geneList <- getEntrezIDs(diffs)
numDE <- length(geneList)
return(numDE)
}
# find number of DE genes between baseline and each timepoint
earlyDay <- "day 1"
code="A"
numDE_A_day3 <- findNumberDE(earlyDay,"day 3",code) #47
numDE_A_day7 <- findNumberDE(earlyDay,"day 7",code) #327
numDE_A_day14 <- findNumberDE(earlyDay,"day 14",code) #30
numDE_A_day21 <- findNumberDE(earlyDay,"day 21",code) #19
numDE_A_day28 <- findNumberDE(earlyDay,"day 28",code) #2
earlyDay <- "day 1"
code="B"
numDE_B_day3 <- findNumberDE(earlyDay,"day 3",code) #230
numDE_B_day7 <- findNumberDE(earlyDay,"day 7",code) #355
numDE_B_day14 <- findNumberDE(earlyDay,"day 14",code) #347
numDE_B_day21 <- findNumberDE(earlyDay,"day 21",code) #110
numDE_B_day28 <- findNumberDE(earlyDay,"day 28",code) #6
numDE_A <- c(0,numDE_A_day3,numDE_A_day7,numDE_A_day14,numDE_A_day21,numDE_A_day28)
numDE_B <- c(0,numDE_B_day3,numDE_B_day7,numDE_B_day14,numDE_B_day21,numDE_B_day28)
# manually enter numbers if they are known
numDE_A = c(0,47,327,30,19,2)
numDE_B = c(0,230,355,347,110,6)
# create pretty plots
library(ggplot2)
library(ggthemes)
library(extrafont)
Timepoint = c("Day 1","Day 3","Day 7","Day 14","Day 21","Day 28","Day 1","Day 3","Day 7","Day 14","Day 21","Day 28")
numDE = c(numDE_A,numDE_B)
Group = c("Control","Control","Control","Control","Control","Control","Lactoferrin","Lactoferrin","Lactoferrin","Lactoferrin","Lactoferrin","Lactoferrin")
df <- data.frame(Group,Timepoint,numDE)
# line plot
title = "Differential expression over time"
ggplot(data = df, stat="identity",aes(y = numDE, x = Timepoint,colour = Group,group = Group)) +
geom_line() + geom_point() +
scale_x_discrete(limits=c("Day 1","Day 3","Day 7","Day 14","Day 21","Day 28")) +
ggtitle(title) + labs(y = "Number of Genes") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
# bar plot
title = "Differential expression over time"
ggplot(data = df,aes(y = numDE, x = Timepoint,fill = Group)) +
geom_bar(stat="identity",position=position_dodge()) +
scale_x_discrete(limits=c("Day 1","Day 3","Day 7","Day 14","Day 21","Day 28")) +
ggtitle(title) + labs(y = "Number of Genes") +
theme_minimal() +
theme(plot.title = element_text(hjust = 0.5))
|
e55e8e28781bbd77c86b705ec002026b71ae2c36 | 4f8636a29a581a9637d069fe81a9bc3d8d46a56b | /Project5-Capstone/Anomalous/Radhey/Radhey_Random_Forest_Edit2.R | fdda229c76b93cc412e2503e174fe4a583abe315 | [] | no_license | jeperez/bootcamp005_project | 55ecb745469947ded31883703f5f5e6f7abe73e5 | 936fee3e4f7a2b6b1872c20718fe42ac976e3f8a | refs/heads/master | 2021-01-11T07:03:27.124107 | 2016-08-04T21:49:29 | 2016-08-04T21:49:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,605 | r | Radhey_Random_Forest_Edit2.R | #EDA
#logistic
#Ridge
#K-means clustering
setwd("~/Documents/Network_Fraud/")
FieldNames <-read.csv("Field Names.csv", header = FALSE,
stringsAsFactors = FALSE)
KDD.test <-read.csv("KDDTest+.csv", header = FALSE,
stringsAsFactors = FALSE)
KDD.train <-read.csv("KDDTrain+.csv", header = FALSE,
stringsAsFactors = FALSE)
column.names <- FieldNames[,1] #41 columns
colnames(KDD.test) <- column.names # rename columns
colnames(KDD.train)<- column.names
colnames(KDD.train)[42] <- 'outcome'
KDD.train$outcome <- as.factor(KDD.train$outcome)
KDD.train$outcome.response <- ifelse(KDD.train$outcome == 'normal',0,1)
View(KDD.train) #44 cols 0.465% are malicious
View(KDD.test)
#Dealing with 3 Categorical Variables, 0/1, expanding ncols, replace into new.KDD.train
library(nnet)
service_<-as.data.frame(class.ind(KDD.train$service))
protocol_type_<-as.data.frame(class.ind(KDD.train$protocol_type))
flag_<-as.data.frame(class.ind(KDD.train$flag))
new_ <- cbind(service_, protocol_type_, flag_) #84
new.KDD.train <-cbind(duration=KDD.train$duration, new_, KDD.train[,5:41], outcome.response=KDD.train[,44])
dim(new.KDD.train) #[1] 125973 123
View(new.KDD.train)
# Above Transformation on Test Data Set
##############################################################
# Random Trees !!!
#############################################################
##################################
#####Bagging & Random Forests#####
##################################
library(randomForest)
random_col=c(1:41,44)
###########################################
####Random Forest ########
#############################################
set.seed(0)
KDD.train.random$service = as.numeric(KDD.train.random$service)
rf.KDD = randomForest(outcome.response ~ ., data = KDD.train.random,ntree=100, importance = TRUE)
rf.KDD2 = randomForest(outcome.response ~ ., data = KDD.train.random,ntree=500, importance = TRUE)
importance(rf.KDD)
varImpPlot(rf.KDD)
importance(rf.KDD2)
varImpPlot(rf.KDD2)
set.seed(0)
oob.err = numeric(18)
temp=1
for (mtry in 4:21) {
fit = randomForest(outcome.response ~ ., data = KDD.train.random, mtry=mtry)
oob.err[temp] = fit$err.rate[500]
temp=temp+1
cat("We're performing iteration", mtry, "\n")
}
plot(4:21, oob.err, pch = 16, type = "b",
xlab = "Variables Considered at Each Split",
ylab = "OOB Error Rate ",
main = "Random Forest OOB Error Rates\nby # of Variables")
best_rf=randomForest(outcome.response ~ ., data = KDD.train.random, mtry=13)
importance(best_rf)
varImpPlot(best_rf)
######################################################
# Tranforming the Test data as same steps as train data
############################################################
colnames(KDD.test)[42] <- 'outcome'
KDD.test$outcome <- as.factor(KDD.test$outcome)
KDD.test$outcome.response <- ifelse(KDD.test$outcome == 'normal',0,1)
###########################################
#Finding the output !!!
#######################################
y_test=KDD.test[,44]
yhat = predict(best_rf, newdata = KDD.test.random)
# c_full=c(1:41)
# fact_vec=c(2,3,4,7,12,14,15,21,22)
# fact_vec_1=c(2,3,4)
# cont_vec=c(1,5,6,8,9,10,11,13,16,17,18,19,20,23:41)
# cont_vec_1=c(1,5,6,8)
# lsm=FactoMineR::PCA(X=KDD.train.random_PCA, scale.unit = TRUE, ncp = 5,
# quanti.sup = cont_vec, quali.sup = fact_vec, row.w = NULL,
# col.w = NULL, graph = FALSE)
##########################################
#################XGBoost
#############################################
library(xgboost)
library(caret)
|
138d93f3ac410b130c1a4f1cec594812a4d472db | 33fd29142f89a0b31132081990b34c2e52d84601 | /samples/7_fig4_rc-irt_vs_wordfish_dotplot.R | 42892fc37f0446f0657ea2e2be1858766672c482 | [] | no_license | gpfarina/sym-core-R | 7e18e5aecb238710429bd37fd0f1fdd7c2e4321d | 6cd5a445d1a7c28480640f97a03a388111af0090 | refs/heads/master | 2020-05-02T10:41:01.725872 | 2019-03-27T02:32:52 | 2019-03-27T02:32:52 | 177,904,211 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,684 | r | 7_fig4_rc-irt_vs_wordfish_dotplot.R |
###############################################################################################
# Fig.4: Dot plot IRT vs. Wordfish scalings
###############################################################################################
# !!! First run R scripts in folders "fig1&4_rollcall_irt" and "fig3_wordfish_scalings" !!!
# After that:
load("./fig1&4_rollcall_irt/fig1&4_rc-irt.RData")
load("./fig3_wordfish_scalings/energy-debate.corpus.RData")
library(ggplot2)
# Merge Wordfish and IRT scalings into one file
combined.idealpoints.energy <- merge(wfish.energy, idealpoints.01.022.de.500w, by.x="docs", by.y="mp_id", all.x=T)
combined.idealpoints.energy$color[combined.idealpoints.energy$party_abbreviation.y=="CVP"] <- "orange"
combined.idealpoints.energy$color[combined.idealpoints.energy$party_abbreviation.y=="EVP"] <- "turquoise"
combined.idealpoints.energy$color[combined.idealpoints.energy$party_abbreviation.y=="FDP-Liberale"] <- "blue"
combined.idealpoints.energy$color[combined.idealpoints.energy$party_abbreviation.y=="GPS"] <- "green2"
combined.idealpoints.energy$color[combined.idealpoints.energy$party_abbreviation.y=="SP"] <- "red"
combined.idealpoints.energy$color[combined.idealpoints.energy$party_abbreviation.y=="SVP"] <- "darkgreen"
# draw dot plot
ggplot(combined.idealpoints.energy, aes(x=Mean, y=theta)) +
geom_point(shape=16, aes(color=party_abbreviation.y)) +
scale_colour_manual(limits = combined.idealpoints.energy$party_abbreviation.y, values = combined.idealpoints.energy$color) +
#geom_smooth(method=lm) +
xlab("IRT ideal points") +
ylab("Wordfish ideal points") +
theme(legend.key = element_blank()) +
theme_bw()
|
ec479ce7d3b604b4242ebfd50488d52b54135bb2 | 1d05b08a44ad53b845a1e773a54be2e51e6aa604 | /Week02/HW02/exercise_3.R | 4b6e18ac463bf0e85135d7302288ded39f7abc12 | [
"MIT"
] | permissive | emiltj/cds-spatial | 7fc14f5b7d6131ee23f8757185bb4477f94c3f0f | 6bd9900e7ce60a27af12c075f85dbaa40f456a7e | refs/heads/main | 2023-04-09T10:03:24.149055 | 2021-04-24T09:38:06 | 2021-04-24T09:38:06 | 337,713,596 | 0 | 0 | MIT | 2021-02-10T12:13:19 | 2021-02-10T12:13:18 | null | UTF-8 | R | false | false | 2,487 | r | exercise_3.R | ##-----------------------------------------------##
## Author: Adela Sobotkova ##
## Institute of Culture and Society ##
## Aarhus University, Aarhus, Denmark ##
## adela@cas.au.dk ##
##-----------------------------------------------##
#### Goals ####
# - Learn about Classification methods
#### Required R libraries ####
# We will use the sf, raster, and tmap packages.
# Additionally, we will use the spData and spDataLarge packages that provide new datasets.
# These packages have been preloaded to the worker2 workspace.
library(sf)
library(raster)
library(tmap)
library(spData)
library(spDataLarge)
#### Data sets ####
# We will use a single data set: `nz`. It is contained by the libraries
# It is an sf object with polygons representing the 16 regions of New Zealand.
#### Existing code ####
# Here are some examples of plotting population in New Zealand.
# Your role is to create a map based on the suggestions below,
# selecting the most meaningful classification style.
# Look at NZ population distribution
hist(nz$Population)
# This line of code applies the 'pretty' style rounding legend numbers. Try different numbers of classes.
tm_shape(nz) + tm_polygons(col = "Population", style = "pretty", n = 4)
# "Jenks" style further smooths over the gaps
tm_shape(nz) + tm_polygons(col = "Population", style = "jenks", n = 5)
# quantile style divides into 5 even groups
tm_shape(nz) + tm_polygons(col = "Population", style = "quantile", n=5)
# Equal interval style divides the distribution into even groups
tm_shape(nz) + tm_polygons(col = "Population", style = "equal", n = 5)
# Write maps above to objects and plot them side by side
# with tmap_arrange() for better comparison
tmap_arrange(___,___,___,____)
#### Exercise I ####
# 1. What are the advantages and disadvantages of each classification method?
# 2. Choose the best classification and create a map with easily legible legend and all other essentials.
# (Select a suitable color palette from http://colorbrewer2.org/, north arrow, scale, map title,
# legend title, reasonable number of breaks in the classification )
# 3. Which method and how many classes did you end up using to display
# your data? Why did you select that method?
# 4. What principles did you use in arranging the parts of your map layout the way you
# did and how did you apply these principles?
# Your solution
# /Start Code/ #
# /End Code/ # |
91bd56dfc0d0de8a3b0d06652d98c1ca9d5a8658 | 66a0ec34e12db82c69fdee5d27c6d5cca2717faa | /best.R | 5e4fc9bf2d079aac7b29ab04e05a008e08b86c83 | [] | no_license | milbak/dsc_p2 | 0a1ae4cdc52c52656cb8ede46dc3564c3ef62cab | daefdcb6881158e1d8407eec74aa3fac84d63986 | refs/heads/master | 2021-01-13T13:56:08.336238 | 2016-11-05T21:50:40 | 2016-11-05T21:50:40 | 72,955,181 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 978 | r | best.R | best <- function(state, outcome) {
## Read outcome data
df <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state is valid
if(!is.element(state, state.abb)){
stop("invalid state")
}
## Set outcome column based or stop if invalid
col <- NULL
if(outcome == "heart attack"){
col <- 11
} else if(outcome == "heart failure") {
col <- 17
} else if(outcome == "pneumonia") {
col <- 23
} else {
stop("invalid outcome")
}
## Return hospital name in that state with lowest 30-day death
## rate
## coerce outcome column to numeric
df[, col] <- suppressWarnings(as.numeric(df[, col]))
## subset state, and remove NA's
oc <- df[!is.na(df[, col]) & df[,7] == state, c(2, col)]
## return first result
head(oc[order(oc[,2],oc[,1]),1],1)
} |
6579e3098292e9c9f6a5328b17e4be17683be369 | 20af98473356f07b03bb63c3684c03a00189a18e | /man/plotConcordanceDendrogram.Rd | 3b6d1ab295ec68f1f01c9642973cdd784e122a21 | [] | no_license | changrong1023/benchdamic | bc7ccc46ad012aabe7d62f7ffedf2faf0275e63c | 828eee182e9d2194df0e4e830c2a35a96d55a22c | refs/heads/master | 2023-08-29T10:54:36.593924 | 2021-10-15T15:49:16 | 2021-10-15T15:49:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 725 | rd | plotConcordanceDendrogram.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotConcordance.R
\name{plotConcordanceDendrogram}
\alias{plotConcordanceDendrogram}
\title{plotConcordanceDendrogram}
\usage{
plotConcordanceDendrogram(hc, direction = "v", cols)
}
\arguments{
\item{hc}{Hierarchical clustering results produced in
\code{\link{plotConcordance}} function.}
\item{direction}{vertical (default \code{direction = "v"}) or horizontal
(\code{direction = "h"}).}
\item{cols}{A named vector containing the color hex codes.}
}
\value{
a \code{ggplot2} object
}
\description{
Plots the method's dendrogram of concordances.
}
\seealso{
\code{\link{createConcordance}} and \code{\link{plotConcordance}}
}
\keyword{internal}
|
f5926e2e2573719cdef0fe6ac083392cb75c9ad9 | 09ac9a5775ba63160d7a8f712095a99f1f1d90f8 | /R/glayout.R | cd933d228855e2fecd672e878d7ec72d727703fb | [] | no_license | gwidgets3/gWidgetsRGtk2 | 7d12c2527018442d5b7e5072d020f05f7340279c | 76aced9a02f8d3598efa6ae28d761d3e4dfe9570 | refs/heads/master | 2022-11-18T02:52:41.871884 | 2020-07-10T22:14:07 | 2020-07-10T22:14:07 | 278,742,208 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,844 | r | glayout.R | setClass("gLayoutRGtk",
contains="gContainerRGtk",
prototype=prototype(new("gContainerRGtk"))
)
## an gWidget for tables
setMethod(".glayout",
signature(toolkit="guiWidgetsToolkitRGtk2"),
function(toolkit,
homogeneous = FALSE,
spacing = 10, # amount (pixels) between row, cols, NULL=0
container = NULL, ...
) {
force(toolkit)
tbl <- gtkTableNew(homogeneous = homogeneous)
## homogeneous spacing
tbl$SetRowSpacings(spacing)
tbl$SetColSpacings(spacing)
obj <- as.gWidgetsRGtk2(tbl)
tag(obj, "childlist") <- list()
if (!is.null(container)) {
if(is.logical(container) && container == TRUE)
container = gwindow()
add(container, obj,...)
}
invisible(obj)
})
as.gWidgetsRGtk2.GtkTable <- function(widget, ...) {
obj = new("gLayoutRGtk", block=widget, widget=widget,
toolkit=guiToolkit("RGtk2"))
return(obj)
}
### The add method is a stub so that this works with same
## approach as gWidgetstcltk
setMethod(".add",
signature(toolkit="guiWidgetsToolkitRGtk2", obj="gLayoutRGtk", value="gWidgetRGtk"),
function(obj, toolkit, value, ...) {
## stub
})
## retrieve values
setMethod("[",
signature(x="gLayoutRGtk"),
function(x, i, j, ..., drop=TRUE) {
.leftBracket(x, x@toolkit, i, j, ..., drop=drop)
})
setMethod(".leftBracket",
signature(toolkit="guiWidgetsToolkitRGtk2",x="gLayoutRGtk"),
function(x, toolkit, i, j, ..., drop=TRUE) {
l <- tag(x, "childlist")
ind <- sapply(l, function(comp) {
i[1] %in% comp$x && j[1] %in% comp$y
})
if(any(ind))
return(l[ind][[1]]$child) # first
else
NA
})
## how we populate the table
setReplaceMethod("[",
signature(x="gLayoutRGtk"),
function(x, i, j,..., value) {
.leftBracket(x, x@toolkit, i, j, ...) <- value
return(x)
})
setReplaceMethod(".leftBracket",
signature(toolkit="guiWidgetsToolkitRGtk2",x="gLayoutRGtk"),
function(x, toolkit, i, j, ..., value) {
if(missing(i))
i <- dim(x)[1] + 1
if(missing(j)) {
cat(gettext("glayout: [ needs to have a column specified."))
return(x)
}
## check that all is good
if(is.character(value)) {
## wrap characters into labels
value <- glabel(value,...)
}
## widgets
tbl <- getWidget(x)
child <- getBlock(value)
theArgs <- list(...)
## get expand, anchor, fill
expand <- getWithDefault(theArgs$expand, FALSE)
if(!is.null(theArgs$align))
theArgs$anchor <- theArgs$align
anchor <- getWithDefault(theArgs$anchor, NULL)
if(!is.null(anchor)) { # put in [0,1]^2
anchor <- (anchor+1)/2 # [0,1]
anchor[2] <- 1 - anchor[2] # flip yalign
}
default_fill <- getWithDefault(tag(value, "default_fill"), "both")
fill <- getWithDefault(theArgs$fill, default_fill) # "", x, y or both
## we do things differently if there is a gtkAlignment for a block
if(is(child, "GtkAlignment")) {
if(expand && (fill =="both" || fill == "x")) {
child['xscale'] <- 1
}
if(expand && (fill == "both" || fill == "y")) {
child['yscale'] <- 1
}
if(expand && fill == "") {
child['xscale'] <- child['yscale'] <- 1
}
if(!is.null(anchor)) {
child['xalign'] <- anchor[1]
child['yalign'] <- anchor[2]
}
} else {
## in gtkstuff
setXYalign(child, getWidget(value), anchor)
}
## fix up number of columns
d <- dim(x)
nr <- max(i); nc <- max(j)
if( nr > d[1] || nc > d[2])
tbl$Resize(max(max(i), nr), max(max(j), nc))
if(expand)
opts <- c("fill","expand","shrink")
else
opts <- c("fill")
child <- getBlock(value)
tbl$Attach(child,
min(j)-1, max(j), min(i)-1, max(i),
xoptions=opts,yoptions=opts)
## store for [ method
l <- tag(x, "childlist")
l[[as.character(length(l) + 1)]] <- list(x=i, y=j, child=value)
tag(x, "childlist") <- l
return(x)
})
## inherits delete method for containers
## replaced
## We like visible, return it. Unlike delete it only hides the widget
## setReplaceMethod(".visible",
## signature(toolkit="guiWidgetsToolkitRGtk2",obj="gLayoutRGtk"),
## function(obj, toolkit, ..., value) {
## gwCat(gettext("visible<- is now redundant for glayout in RGtk2"))
## return(obj)
## })
## get number of rows and columns
setMethod(".dim",
signature(toolkit="guiWidgetsToolkitRGtk2",x="gLayoutRGtk"),
function(x,toolkit) {
tbl <- getWidget(x)
return(c(nrow=tbl$GetNrows(), ncol=tbl$GetNcols()))
})
|
de036945c6d5199fb4aa4e2f5830d0c1cab1670d | 685506162657fca95a06e96ef5cde375cf7293f7 | /dyntrace/vignettes.R | 6c14bb9dfb62dd4f7eaa0a9f68abcb2e47ec08b2 | [] | no_license | aviralg/promise-dyntracing-experiment | 96e9d0deb1eb8720d33ec3c806ef61818254a18b | c480632189f2c74e1985e7767cd2f96281e9e73f | refs/heads/master | 2021-08-08T04:18:11.901033 | 2017-11-09T14:45:23 | 2017-11-09T14:45:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,845 | r | vignettes.R | suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("dplyr"))
suppressPackageStartupMessages(library("stringr"))
suppressPackageStartupMessages(library("compiler"))
#packages = commandArgs(trailingOnly = TRUE)
#cmd <- "~/workspace/R-dyntrace/bin/R CMD BATCH" #paste(shQuote(file.path(R.home("bin"), "R")))
#sys.env <- as.character(c("R_KEEP_PKG_SOURCE=yes", "R_ENABLE_JIT=0"))
root_dir = paste("traces",
"promises",
format(Sys.time(), "%Y-%m-%d-%H-%M-%S"),
sep="/")
option_list <- list(
make_option(c("-c", "--command"), action="store", type="character", default="~/workspace/R-dyntrace/bin/R CMD BATCH",
help="Command to execute", metavar="command"),
make_option(c("-o", "--output-dir"), action="store", type="character", default=root_dir,
help="Output directory for results (*.sqlite, etc) [default].", metavar="output_dir"),
make_option(c("--compile"), action="store_true", default=FALSE,
help="compile vignettes before execution [default]", metavar="compile")
)
cfg <- parse_args(OptionParser(option_list=option_list), positional_arguments=TRUE)
instrumented.code.dir <- paste(cfg$options$`output-dir`, "vignettes", sep="/")
dir.create(instrumented.code.dir, recursive = TRUE, showWarnings = FALSE)
log.dir <- paste(cfg$options$`output-dir`, "logs", sep="/")
dir.create(log.dir, recursive = TRUE, showWarnings = FALSE)
rdt.cmd.head <- function(database_filepath, verbose=TRUE)
paste(
"library(promisedyntracer)\n",
"\n",
"dyntracer <- create_dyntracer('", database_filepath, "',", verbose, ")\n",
"dyntrace(dyntracer, {\n",
sep="")
rdt.cmd.tail<- function(path)
paste("\n})\n",
"destroy_dyntracer(dyntracer)\n",
"write('OK', '", path, "')\n",
sep="")
instrument.vignettes <- function(packages) {
new_packages <- setdiff(packages, rownames(installed.packages()))
if (length(new_packages) > 0) {
install.packages(new_packages,
repos='http://cran.us.r-project.org',
dependencies = c("Depends",
"Imports",
"LinkingTo",
"Suggests",
"Enhances"))
}
i.packages <- 0
n.packages <- length(packages)
total.vignettes <- 0
instrumented.vignette.paths <- list()
for (package in packages) {
i.packages <- i.packages + 1
write(paste("[", i.packages, "/", n.packages, "] Instrumenting vignettes for package: ", package, sep=""), stdout())
result.set <- vignette(package = package)
vignettes.in.package <- result.set$results[,3]
i.vignettes = 0
n.vignettes = length(vignettes.in.package)
for (vignette.name in vignettes.in.package) {
tracer.output.path <- paste(cfg$options$`output-dir`, "/data/", package, "-", vignette.name, ".sqlite", sep="")
tracer.ok.path <- paste(cfg$options$`output-dir`, "/data/", package, "-", vignette.name, ".OK", sep="")
i.vignettes <- i.vignettes + 1
total.vignettes <- total.vignettes + 1
write(paste("[", i.packages, "/", n.packages, "::", i.vignettes, "/", n.vignettes, "/", total.vignettes, "] Instrumenting vignette: ", vignette.name, " from ", package, sep=""), stdout())
one.vignette <- vignette(vignette.name, package = package)
vignette.code.path <- paste(one.vignette$Dir, "doc", one.vignette$R, sep="/")
dir.create(instrumented.code.dir)
instrumented.code.path <- paste(instrumented.code.dir, "/", package, "-", vignette.name, ".R", sep="")
write(paste("[", i.packages, "/", n.packages, "::", i.vignettes, "/", n.vignettes, "/", total.vignettes, "] Writing vignette to: ", instrumented.code.path, sep=""), stdout())
vignette.code <- readLines(vignette.code.path)
instrumented.code <- c(rdt.cmd.head(tracer.output.path, verbose = FALSE),
paste0(" ", vignette.code),
rdt.cmd.tail(tracer.ok.path))
write(instrumented.code, instrumented.code.path)
write(paste("[", i.packages, "/", n.packages, "::", i.vignettes, "/", n.vignettes, "/", total.vignettes, "] Done instrumenting vignette: ", vignette.name, " from ", package, sep=""), stdout())
if (cfg$options$compile) {
instrumented.code.path.compiled <- paste(tools::file_path_sans_ext(instrumented.code.path), "Rc", sep=".")
cmpfile(instrumented.code.path, instrumented.code.path.compiled)
instrumented.code.path.loader <- paste(tools::file_path_sans_ext(instrumented.code.path), "load", "R", sep=".")
write(paste("loadcmp('", tools::file_path_as_absolute(instrumented.code.path.compiled), "')", sep=""), file=instrumented.code.path.loader)
instrumented.vignette.paths[[ total.vignettes ]] <- c(package, vignette.name, instrumented.code.path.loader)
} else {
instrumented.vignette.paths[[ total.vignettes ]] <- c(package, vignette.name, instrumented.code.path)
}
}
write(paste("[", i.packages, "/", n.packages, "] Done vignettes for package: ", package, sep=""), stdout())
}
instrumented.vignette.paths
}
# instrument.and.aggregate.vignettes <- function(packages) {
# i.packages <- 0
# n.packages <- length(packages)
#
# instrumented.vignette.paths <- list()
#
# for (package in packages) {
# i.packages <- i.packages + 1
#
# write(paste("[", i.packages, "/", n.packages, "] Instrumenting vignettes for package: ", package, sep=""), stdout())
#
# result.set <- vignette(package = package)
# vignettes.in.package <- result.set$results[,3]
#
# instrumented.code.path <- paste(instrumented.code.dir, "/", i.packages, "_", package, ".R", sep="")
# tracer.output.path <- paste(cfg$options$`output-dir`, "/", package, ".sqlite", sep="")
#
# i.vignettes = 0
# n.vignettes = length(vignettes.in.package)
#
# write(rdt.cmd.head(i.vignettes == 1, tracer.output.path), instrumented.code.path, append=FALSE)
#
# for (vignette.name in vignettes.in.package) {
# i.vignettes <- i.vignettes + 1
#
# write(paste("[", i.packages, "/", n.packages, "::", i.vignettes, "/", n.vignettes, "] Appending vignette: ", vignette.name, " from ", package, sep=""), stdout())
#
# one.vignette <- vignette(vignette.name, package = package)
# vignette.code.path <- paste(one.vignette$Dir, "doc", one.vignette$R, sep="/")
#
# write(paste("[", i.packages, "/", n.packages, "::", i.vignettes, "/", n.vignettes, "] Appending vignette to: ", instrumented.code.path, sep=""), stdout())
#
# vignette.code <- readLines(vignette.code.path)
# write(vignette.code, instrumented.code.path, append=TRUE)
#
# write(paste("[", i.packages, "/", n.packages, "::", i.vignettes, "/", n.vignettes, "] Done appending vignette: ", vignette.name, " from ", package, sep=""), stdout())
# }
#
# write(rdt.cmd.tail, instrumented.code.path, append=TRUE)
# instrumented.vignette.paths[[i.packages]] <- c(package, "all_vignettes", instrumented.code.path)
#
# write(paste("[", i.packages, "/", n.packages, "] Done vignettes for package: ", package, sep=""), stdout())
# }
#
# instrumented.vignette.paths
# }
execute.external.programs <- function(program.list, new.process=FALSE) {
i.programs <- 0
n.programs <- length(program.list)
for(program in program.list) {
i.programs <- i.programs + 1
package.name <- program[1]
vignette.name <- program[2]
program.path <- program[3]
write(paste("[", i.programs, "/", n.programs, "] Executing file: ", program.path, sep=""), stdout())
log.out.path <- paste(log.dir, "/", i.programs, "_", package.name, "_", vignette.name, ".out", sep="")
log.err.path <- paste(log.dir, "/", i.programs, "_", package.name, "_", vignette.name, ".err", sep="")
if(new.process) {
cmd.with.args <- paste(cfg$options$command, program.path, log.out.path, log.err.path, sep=" ")
system2(cmd.with.args, env=sys.env, wait=TRUE)
write(cmd.with.args, stdout())
} else {
source(program.path, local=new.env()) #local=attach(NULL))
}
write(paste("[", i.programs, "/", n.programs, "] Done executing file: ", program.path, sep=""), stdout())
}
}
#run <- function(..., separately=TRUE)
# execute.external.programs((if(separately) instrument.vignettes else instrument.and.aggregate.vignettes)(list(...)))
if (length(cfg$args) > 0)
execute.external.programs(instrument.vignettes(packages=cfg$args), new.process = FALSE)
|
0bbb22175f7e4ea72efe324dc3123493f1cf129d | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.database/man/rds_modify_custom_db_engine_version.Rd | 4f06ad2e64ac8b4cda242e5c7fc579bf26fa6e48 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,939 | rd | rds_modify_custom_db_engine_version.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rds_operations.R
\name{rds_modify_custom_db_engine_version}
\alias{rds_modify_custom_db_engine_version}
\title{Modifies the status of a custom engine version (CEV)}
\usage{
rds_modify_custom_db_engine_version(
Engine,
EngineVersion,
Description = NULL,
Status = NULL
)
}
\arguments{
\item{Engine}{[required] The DB engine. The only supported values are \code{custom-oracle-ee} and
\code{custom-oracle-ee-cdb}.}
\item{EngineVersion}{[required] The custom engine version (CEV) that you want to modify. This option is
required for RDS Custom for Oracle, but optional for Amazon RDS. The
combination of \code{Engine} and \code{EngineVersion} is unique per customer per
Amazon Web Services Region.}
\item{Description}{An optional description of your CEV.}
\item{Status}{The availability status to be assigned to the CEV. Valid values are as
follows:
\strong{available}
You can use this CEV to create a new RDS Custom DB instance.
\strong{inactive}
You can create a new RDS Custom instance by restoring a DB snapshot with
this CEV. You can't patch or create new instances with this CEV.
You can change any status to any status. A typical reason to change
status is to prevent the accidental use of a CEV, or to make a
deprecated CEV eligible for use again. For example, you might change the
status of your CEV from \code{available} to \code{inactive}, and from \code{inactive}
back to \code{available}. To change the availability status of the CEV, it
must not currently be in use by an RDS Custom instance, snapshot, or
automated backup.}
}
\description{
Modifies the status of a custom engine version (CEV). You can find CEVs to modify by calling \code{\link[=rds_describe_db_engine_versions]{describe_db_engine_versions}}.
See \url{https://www.paws-r-sdk.com/docs/rds_modify_custom_db_engine_version/} for full documentation.
}
\keyword{internal}
|
2af1cc6306bef3c91c21812d0b77527b69ff862c | 43eae47269ee5a073218dcda8eed82e3e18c6312 | /man/dir_name.Rd | c72c4d935c25b8661950cfb7aa964e066f24a1b7 | [] | no_license | wlandau/fbseqStudies | 4e494d25165130f95a983ee64d751ba3fb24bd3d | 0169ac5a00d457a261401f926b37e08587eace64 | refs/heads/main | 2021-01-19T04:39:24.890203 | 2017-10-21T02:36:07 | 2017-10-21T02:36:07 | 45,758,088 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 275 | rd | dir_name.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util-dir.R
\name{dir_name}
\alias{dir_name}
\title{Function \code{dir_name}}
\usage{
dir_name()
}
\value{
directory name
}
\description{
name a new directory for storing simulations and results
}
|
4cc7cf16550870c5e30814f23f7cca9a8333b4ef | 660e39446906d71751b6d31f3d7597e7397a7ce8 | /David Causeur/Session 2/SLBDsession2.R | d0fe1dd715085e8fe16588b3d15ccfc3708efb66 | [] | no_license | AgroSTAT2122/Elias | bde7bad87828a8972280e64c6d34b23cb5eb396e | 4937a80dae6f806422fd3c1529f4a69c03c44797 | refs/heads/main | 2023-09-04T07:52:05.294544 | 2021-11-10T15:19:59 | 2021-11-10T15:19:59 | 422,130,624 | 0 | 1 | null | null | null | null | ISO-8859-1 | R | false | false | 7,888 | r | SLBDsession2.R |
# Set working directory
setwd("C:/Users/David/Dropbox/ADB_2021/Cours/Session2")
# Required packages
install.packages("nnet") # Installation is only needed if the package is missing
install.packages("leaps")
install.packages("RcmdrMisc")
install.packages("pls")
install.packages("groupdata2")
install.packages("boot")
require(nnet) # Multinomial logistic regression
require(leaps) # For regsubsets
require(RcmdrMisc) # For Stepwise
require(pls) # For segments
require(groupdata2) # For fold
require(boot) # For cv.glm
# Regression with a real-valued response
## Import 'invasive probe' dataset
pig = read.table("./data/invasive.txt")
str(pig) # Overview of data
dim(pig) # Number of rows and columns in pig
summary(pig) # Provides a columnwise summary of the data table
## Exhaustive search of the best model
### Rank the models according to their RSS (using package leaps)
select = summary(regsubsets(LMP~.,data=pig,nvmax=11))
### Boolean selection matrix for best submodels
select$which
### Best sub-model with one variable
colnames(select$which)[select$which[1,]]
### Best sub-model with two variables
colnames(select$which)[select$which[2,]]
### RSS plot for exhaustive feature selection
plot(1:11,select$rss,type="b",pch=16,xlab="Number of variables in the submodel",
ylab="Residual sum-of-squares",main="Quality of fit of best submodels",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25,lwd=2,col="darkgray",bty="l")
grid()
### Equivalent R2 plot
plot(1:11,select$rsq,type="b",pch=16,xlab="Number of variables in the submodel",
ylab=expression(R^2),main="Quality of fit of best submodels",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25,lwd=2,col="darkgray",bty="l")
grid()
## Prediction performance of the best submodels
### Best submodel with one explanatory variable
mod = glm(LMP~.,data=pig[,select$which[1,]])
### 10-fold CV PRESS
cvmod = cv.glm(pig[,select$which[1,]],mod,K=10)
cvmod$delta # MSEP
nrow(pig)*cvmod$delta # PRESS
select$rss[1] # RSS
press = rep(0,11) # vector of PRESS for best sub-models
for (j in 1:11) {
mod = glm(LMP~.,data=pig[,select$which[j,]])
cvmod = cv.glm(pig[,select$which[j,]],mod,K=10)
press[j] = nrow(pig)*cvmod$delta[2]
}
# PRESS plot for exhaustive feature selection
plot(1:11,select$rss,type="b",pch=16,xlab="Number of variables in the submodel",
ylab="Residual sum-of-squares",main="Quality of fit of best submodels",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25,lwd=2,col="darkgray",bty="l")
lines(1:11,press,type="b",col="blue",pch=15,lwd=2)
legend("topright",lwd=2,pch=c(16,15),legend=c("Internal validation","Cross validation"),
bty="n",cex=1.25,col=c("darkgray","blue"))
grid()
### BIC - AIC
bic = select$bic # BIC
aic = bic - (log(nrow(pig))-2)*(2:12) # AIC
# BIC-AIC plot for exhaustive feature selection
plot(1:11,bic,pch=16,bty="l",type="b",xlab="Number of explanatory variables",
ylab="Information criterion",ylim=range(c(aic,bic)),col="darkgray",
main="Exhaustive model selection",cex.lab=1.25,cex.axis=1.25,cex.main=1.25,lwd=2)
lines(1:11,aic,type="b",pch=17,lwd=2,col="coral1")
legend("topleft",lwd=2,lty=1,pch=c(16,17),col=c("darkgray","coral1"),bty="n",cex=1.25,legend=c("BIC","AIC"))
grid()
selected = select$which[which.min(bic),] # Indices of selected variables (+ LMP)
bestmod = glm(LMP~.,data=pig[,selected]) # Fits the best submodel
coef(bestmod)
## Prediction performance of the best submodel (minimizing AIC)
n = nrow(pig) # Sample size
segments = cvsegments(k=10,N=n) # Defines a list of 10 random segments
segments
cvpredictions = rep(0,n) # Initialize a n-vector of predicted LMP
for (k in 1:10) {
train = pig[-segments[[k]],] # Training dataset
test = pig[segments[[k]],] # Test dataset
select = summary(regsubsets(LMP~.,data=train,nvmax=11)) #Application de la sélection sur le jeu de données d'apprentissage
bic = select$bic # BIC
selected = select$which[which.min(bic),] # Indices of selected variables (+ LMP), sélection du meilleur BIC
bestmod = glm(LMP~.,data=train[,selected]) # Fits the best submodel
cvpredictions[segments[[k]]] = predict(bestmod,newdata=test)
}
PRESS = sum((pig$LMP-cvpredictions)^2)
# PRESS plot for exhaustive feature selection
plot(1:11,press,type="b",pch=16,xlab="Number of variables in the submodel",
ylab="Residual sum-of-squares",main="Quality of fit of best submodels",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25,lwd=2,col="darkgray",bty="l")
abline(h=PRESS,col="blue",pch=15,lwd=2)
legend("topright",lwd=2,pch=c(16,16),legend=c("Best submodels","Best submodel"),
bty="n",cex=1.25,col=c("darkgray","blue"))
grid()
# Regression with a K-class response
## Import coffee data
coffee = read.table("./data/coffee.txt")
dim(coffee) # Number of rows and columns in data
str(coffee) # Overview of data
coffee$Localisation = factor(coffee$Localisation) # Convert 'Localisation' into a factor
summary(coffee) # Provides a columnwise summary of the data table (8 first columns)
## ML fit the most complete model for 'Localisation'
mod = multinom(Localisation~.,data=coffee,maxit=200,trace=FALSE)
# ML fit of the logistic model
## Stepwise search of the best model using package RcmdrMisc
select = stepwise(mod,direction="forward/backward",criterion="AIC",steps=1)
select = stepwise(mod,direction="forward/backward",criterion="AIC",steps=2)
select = stepwise(mod,direction="forward/backward",criterion="AIC",steps=3)
select = stepwise(mod,direction="forward/backward",criterion="AIC")
select = stepwise(mod,direction="forward/backward",criterion="BIC")
## Accuracy for the best sub-models
observed = coffee$Localisation
### Accuracy values for best submodels
acc = rep(0,5) # Initialize a vector of accuracy values
for (k in 1:5) {
select = stepwise(mod,direction="forward/backward",criterion="AIC",steps=k,trace=0)
predictions = predict(select,type="class")
acc[k] = mean(predictions==observed)
}
### 10-fold cross-validated accuracy values for best submodels
cvacc = rep(0,5) # Initialize a vector of accuracy values
folds = fold(coffee,k=10,cat_col="Localisation")$".folds" # Create balanced segments
folds
cvpredictions = rep("1",nrow(coffee)) # Initialize a vector of predicted classes
for (k in 1:5) {
select = stepwise(mod,direction="forward/backward",criterion="AIC",steps=k,trace=0)
for (j in 1:10) {
train = coffee[folds!=j,]
test = coffee[folds==j,]
submod = multinom(formula(select),data=train,trace=FALSE,maxit=200)
cvpredictions[folds==j] = predict(submod,newdata=test,type="class")
}
cvacc[k] = mean(cvpredictions==coffee$Localisation)
}
### Accuracy plot for stepwise feature selection
plot(1:5,acc,type="b",pch=16,xlab="Number of variables in the submodel",
ylab="Accuracy",main="Quality of fit of best submodels",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25,lwd=2,col="darkgray",bty="l")
lines(1:5,cvacc,type="b",col="blue",pch=15,lwd=2)
legend("topleft",lwd=2,pch=c(16,15),legend=c("Internal validation","Cross validation"),
bty="n",cex=1.25,col=c("darkgray","blue"))
grid()
### 10-fold cross-validated accuracy for best submodel
folds = fold(coffee,k=10,cat_col="Localisation")$".folds" # Create balanced segments
folds
cvpredictions = rep("1",nrow(coffee)) # Initialize a vector of predicted classes
for (j in 1:10) {
train = coffee[folds!=j,]
test = coffee[folds==j,]
mod = multinom(Localisation~.,data=train,trace=FALSE,maxit=200)
select = stepwise(mod,direction="forward/backward",criterion="AIC",trace=0)
cvpredictions[folds==j] = predict(select,newdata=test,type="class")
print(paste("Segment ",j,sep=""))
}
mean(cvpredictions==coffee$Localisation)
|
76db7659fe6d8752fc5d81c595b16c4a89aa52aa | 5bccacb147fda0d1dff650916280e13d51f530f5 | /man/echo_class.Rd | a97078b7ccbbe56355d2145cd390f1cf9750c2cc | [
"MIT"
] | permissive | gaoce/raddins | 359d10e1a0f5144c5395f640354ef717f0cc081d | cb511c436808161b7b33932b13daa806def1d081 | refs/heads/master | 2021-01-12T17:06:23.620667 | 2017-05-09T17:53:46 | 2017-05-09T17:53:46 | 69,052,774 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 275 | rd | echo_class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspection.R
\name{echo_class}
\alias{echo_class}
\title{Echo the Class of Object Under Cursor to Console.}
\usage{
echo_class()
}
\description{
Echo the Class of Object Under Cursor to Console.
}
|
75e15fc831e98befb61ef3da1fb9878ba58d1e82 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SHELF/examples/plotQuartiles.Rd.R | 35e2418cb899fc7373e7fbaf74a24c288601270a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 395 | r | plotQuartiles.Rd.R | library(SHELF)
### Name: plotQuartiles
### Title: Plot elicted quartiles, median and plausible range for each
### expert
### Aliases: plotQuartiles
### ** Examples
## Not run:
##D l <- c(2, 1, 5, 1)
##D u <- c(95, 90, 65, 40)
##D v <- matrix(c(15, 25, 40,
##D 10, 20, 40,
##D 10, 15, 25,
##D 5, 10, 20),
##D 3, 4)
##D plotQuartiles(vals = v, lower = l, upper = u)
## End(Not run)
|
cb9237c23e9c4dc54635976806155f0a4ec48ee1 | 3c25f49d8592847a741b9324b482eb769721d985 | /R/composite.R | 3286dff9bdbe8cefe88920f39dd71abc2f6b9a80 | [] | no_license | talbano/equate | 5de3d041aab6817dfad9b2fef9a37ca87321aeef | 3583de82faf337c4c9e0651db9293ed8b8a768c5 | refs/heads/master | 2022-12-01T17:23:14.460656 | 2022-12-01T16:12:50 | 2022-12-01T16:12:50 | 44,709,479 | 7 | 4 | null | null | null | null | UTF-8 | R | false | false | 5,169 | r | composite.R | #' Composite Linking and Equating
#'
#' This function creates a composite linking or equating as a combination of
#' two or more other linking or equating functions.
#'
#' Composite linking and equating create a single linking or equating function
#' as a weighted combination of two or more other linking or equating
#' functions. See Holland and Strawderman (2011) for details.
#'
#' @param x for the default method, \code{x} is a matrix of equating functions,
#' with one function per column. Otherwise, \code{x} is a list of equatings,
#' where each element is an object of class \dQuote{\code{\link{equate}}}.
#' @param wc vector of weights for creating the composite. \code{length(wc)}
#' should match either \code{ncol(x)} for the default method or
#' \code{length(x)}.
#' @param name an optional name, used to label the output. If missing, a name
#' will be created using \code{x}.
#' @param p integer specifying the type of circle used to define symmetry.
#' @param symmetric logical, with default \code{FALSE}, indicating whether or
#' not weights \code{wc} should be modified to create symmetric weights. Only
#' supported for composites of linear functions.
#' @param verbose logical, with default \code{TRUE}, indicating whether or not
#' full output should be returned. When \code{FALSE}, only the equated scores
#' are returned.
#' @param \dots further arguments passed to or from other functions.
#' @return For the default method, and when \code{verbose = FALSE}, a vector of
#' composite equated scores is returned. Otherwise, a list of equating output
#' is returned, including output for the composite and each function being
#' combined.
#' @author Anthony Albano \email{tony.d.albano@@gmail.com}
#' @seealso \code{\link{equate}}
#' @references Holland, P. W., and Strawderman, W. E. (2011). How to average
#' equating functions, if you must. In A. A. von Davier (Ed.), Statistical
#' models for test equating, scaling, and linking (pp. 89-107). New York, NY:
#' Springer.
#' @keywords methods
#' @examples
#' # See vignette("equatevignette") for additional examples
#'
#' # Example from the equate help file, without the bootstrapping
#' # Random groups equating for (1) identity, (2) mean,
#' # (3) linear, (4) equipercentile with loglinear
#' # smoothing, and (5) a composite of mean and identity
#' rx <- as.freqtab(ACTmath[, 1:2])
#' ry <- as.freqtab(ACTmath[, c(1, 3)])
#' set.seed(2007)
#'
#' req1 <- equate(rx, ry, type = "i")
#' req2 <- equate(rx, ry, type = "m")
#' req3 <- equate(rx, ry, type = "l")
#' req4 <- equate(rx, ry, type = "e", smooth = "loglin",
#' degrees = 3)
#' req5 <- composite(list(req1, req2), wc = .5, symmetric = TRUE)
#'
#' # Compare equating functions
#' plot(req1, req2, req3, req4, req5[[1]], addident = FALSE)
#'
#' @export composite
composite <- function(x, ...) UseMethod("composite")
#' @describeIn composite Default method for a matrix of equating functions,
#' one per column.
#' @export
composite.default <- function(x, wc, ...) return(x %*% wc)
#' @describeIn composite Create composite across functions in
#' \dQuote{\code{equate.list}} object.
#' @export
composite.equate.list <- function(x, wc, name, symmetric = FALSE,
p = 1, verbose = TRUE, ...) {
if(missing(wc))
wc <- rep(1/length(x), length(x))
if(symmetric) {
if(!all(sapply(x, function(z) z$type) %in%
c("identity", "mean", "linear", "general"))) {
warning("all components must be linear to create ",
"symmetric weights")
wcs <- wc
symmetric <- FALSE
}
else {
slopes <- sapply(x, function(z) z$coef[2])
wcs <- (wc*(1 + slopes^p)^-(1/p))/
sum(wc*(1 + slopes^p)^-(1/p))
}
}
else wcs <- wc
yx <- composite.default(sapply(x, function(z) z$conc$yx),
wcs)
if(verbose) {
if(missing(name))
name <- paste("Composite Equating:",
strsplit(x[[1]]$name, ": ")[[1]][2])
out <- list(name = name, type = "composite",
design = x[[1]]$design, x = x[[1]]$x, y = x[[1]]$y,
concordance = data.frame(scale = x[[1]]$conc$scale,
yx = yx), wc = wc, symmetric = symmetric)
if(symmetric) out$wcs <- wcs
out <- as.composite(out)
out <- as.equate.list(c(list(out), x))
}
else out <- yx
return(out)
}
#' @describeIn composite Create composite across functions in
#' \dQuote{\code{list}} object.
#' @export
composite.list <- function(x, wc, name, symmetric = FALSE,
p = 1, verbose = TRUE, ...) {
if(!all(sapply(x, function(z) is.equate(z))))
stop("all elements of 'x' must be class 'equate'")
return(composite(as.equate.list(x), wc, name,
symmetric, p, verbose, ...))
}
as.composite <- function(x) {
class(x) <- c("composite", "equate")
return(x)
}
is.composite <- function(x) {
return(class(x)[1] == "composite")
}
#' @export
print.composite <- function(x, ...) {
cat("\n")
cat(x$name, "\n\n")
cat("Design:", x$design, "\n\n")
stats <- rbind(x = summary(margin(x$x)),
y = summary(margin(x$y)),
yx = summary(as.freqtab(cbind(x$concordance[, 2],
c(margin(x$x))))))
cat("Summary Statistics:\n")
print(round(stats, 2))
cat("\n")
if(!is.null(x$coef)) {
cat("Coefficients:\n")
print(round(x$coef, 4))
cat("\n")
}
invisible(x)
}
|
86716f71a7007dec20eb9e1c0ec7a1105297ebc8 | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googleyoutubev3.auto/man/InvideoTiming.Rd | adf7d0e376ab0b74d0627d46ad16b01455742bb3 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 676 | rd | InvideoTiming.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_objects.R
\name{InvideoTiming}
\alias{InvideoTiming}
\title{InvideoTiming Object}
\usage{
InvideoTiming(durationMs = NULL, offsetMs = NULL, type = NULL)
}
\arguments{
\item{durationMs}{Defines the duration in milliseconds for which the promotion should be displayed}
\item{offsetMs}{Defines the time at which the promotion will appear}
\item{type}{Describes a timing type}
}
\value{
InvideoTiming object
}
\description{
InvideoTiming Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Describes a temporal position of a visual widget inside a video.
}
|
55df90a95dadea3474b62c551a233b04b29299d7 | 7cf945ec1e65a9e11f415a27972e3bb25f7fa47d | /server.R | 469a0caed350d28e791583353ef88a3184e03e93 | [] | no_license | ThomasCharuel/DataAnalytics_R_Lab_Shiny | b4c7a3861feff551571c28f5f46cae19ee9603e5 | 9a49fef94c03fa6ca83216b892f803b07d76ca6c | refs/heads/master | 2021-08-17T00:00:31.865584 | 2017-11-20T14:59:58 | 2017-11-20T14:59:58 | 111,424,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 328 | r | server.R | library(shiny)
library(readxl)
shinyServer(function(input, output) {
output$summary <- renderPrint({
# Get input file
inFile <- input$dataset_file
if(is.null(inFile))
return(NULL)
# Load dataset
userdata <- read_excel(inFile$datapath)
summary(userdata[userdata$User==input$user_id, ])
})
}) |
5253eea766dcc06beca113bee1562dbbc45baff2 | 50bebbcb9bd031384f66e5c96ed2f8e51880930c | /man/retrieves.Rd | ebd6555afc14f729ed2c77c45809fdde59336e74 | [] | no_license | TaliaferroLab/FeatureReachR | 6222fe11af7e01e64d0148633b575c8531154196 | 3ec193c250b3b3414b4cbd3a776a09ec7555da88 | refs/heads/master | 2023-07-10T15:29:55.792272 | 2021-08-12T18:41:31 | 2021-08-12T18:41:31 | 229,128,257 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,136 | rd | retrieves.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getLengthorGC.R
\name{get_length}
\alias{get_length}
\alias{get_GC}
\title{Get length or GC of each sequence in a DNAStringSet object}
\usage{
get_length(DNAStringSet)
get_GC(DNAStringSet)
}
\arguments{
\item{DNAStringSet}{A DNAStringSet Object. Use
\code{\link[Biostrings]{readDNAStringSet}} on a fasta file to create.}
}
\value{
A data frame with the length or GC content for each sequence in a
DNAStringSet object
}
\description{
These functions return data frames containing the length or GC content for
each sequence in a DNAStringSet object.
}
\section{Functions}{
\itemize{
\item \code{get_length}: the length of each sequence in a
DNAStringSet object and returns a dataframe.
\item \code{get_GC}: both the length and GC content of each
sequence in a DNAStringSet object and returns a dataframe.
}}
\examples{
case_fasta <- Biostrings::readDNAStringSet("example_case.fa")
get_length(case_fasta)
get_GC(case_fasta)
}
\seealso{
\code{\link{write_Sequence}}, \code{\link[Biostrings]{readDNAStringSet}},
\code{\link[Biostrings]{letterFrequency}}
}
|
d332fd6a5d9d064910961ad2db3e6c1c18547017 | 01b60ed02d615d68ed1cf4bd97a6145b36c98a1b | /exploratory/pb_autocor_notbrms.R | a44570ff575d8acf618c82eb6d2260c0ae7b710a | [] | no_license | patdumandan/RodentPhenology | 64f5f4af0a1298bb62b8afd13b0589ce23e23858 | 6114ee75803a3553011d5f15bc7d23199f9458e1 | refs/heads/main | 2023-07-25T11:46:25.764409 | 2023-07-07T16:45:15 | 2023-07-07T16:45:15 | 315,314,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,387 | r | pb_autocor_notbrms.R | total_proportion=read.csv("./reconfigured_data/raw_cleaned/reprod_propn_male.csv")
PB_dat_M=total_proportion%>%filter(species=="PB", !(treatment=="spectabs"))
PB_dat_M[is.na(PB_dat_M)] <- 0 #set non-detects to 0
PB_dat_M$trt<-ifelse(PB_dat_M$treatment=="control", 0, 1)
PB_dat_M$years=(PB_dat_M$year-mean(PB_dat_M$year))/(2*sd(PB_dat_M$year)) #standardize year
PBprop=PB_dat_M$proportion
PBrep=PB_dat_M$reproductive
dat_list=list(
N=length(PB_dat_M$month),
y=PB_dat_M$reproductive,
n=PB_dat_M$abundance,
year=PB_dat_M$years,
treatment=PB_dat_M$trt,
mon_cos=PB_dat_M$mon_cos,
mon_sin=PB_dat_M$mon_sin,
Nmon=length(unique(PB_dat_M$month)),
Nsp=length(unique(PB_dat_M$species)))
pbyr_autocor=stan(model_code=
"functions {
matrix cov_matrix_ar1(real ar, real sigma, int nrows) {
matrix[nrows, nrows] mat;
vector[nrows - 1] gamma;
mat = diag_matrix(rep_vector(1, nrows));
for (i in 2:nrows) {
gamma[i - 1] = pow(ar, i - 1);
for (j in 1:(i - 1)) {
mat[i, j] = gamma[i - j];
mat[j, i] = gamma[i - j];
}
}
return sigma^2 / (1 - ar^2) * mat;
}
}
data {
int<lower=1> N; // total number of observations
// vector [N] year;// year
int <lower=0> y[N]; // reproductive indivs
int <lower=0> n[N]; // total males
}
transformed data {
vector[N] se2 = rep_vector(0, N);
}
parameters {
real alpha;
// real year_eff;
real<lower=0> sigma; // residual SD
real <lower=-1,upper=1> phi; // autoregressive effects
real <lower=0, upper=1> pred_repro[N] ;//proportion of reproductive indivs
real <lower=0>psi;//overdispersion param
}
transformed parameters{
vector [N] repro_mu; //so we can add statement describing proportion (not able to do in parameters block)
vector[N] A;
vector [N] B;
//model:
for (i in 1:N){
repro_mu[i]= inv_logit(alpha);
}
A = repro_mu * psi;
B = (1 - repro_mu)* psi;
}
model {
matrix[N, N] res_cov_matrix;
matrix[N, N] Sigma;
res_cov_matrix = cov_matrix_ar1(phi, sigma, N);
Sigma = res_cov_matrix + diag_matrix(se2);
Sigma = cholesky_decompose(Sigma);
//likelihood:
alpha~normal(0,1);
// year_eff~ normal (0,1);
psi~normal(0,1);
sigma ~ cauchy(0,5);
pred_repro ~beta (A,B);
y~ binomial(n, pred_repro);
}
generated quantities {
real pred_y [N];//predictions on proportions
real log_lik [N];// for looic calculations
pred_y = beta_rng(A, B);
for (x in 1:N){
log_lik[x]= beta_lpdf(pred_repro[x]| A[x], B[x]);}
}", data=dat_list, chains=2, iter=300)
launch_shinystan(pbyr_autocor)
saveRDS(pbyr_autocor, "pbyr_autocor1.RDS")
pbyr_autocor1=readRDS("pbyr_autocor1.RDS")
yrep=rstan::extract(pbyr_autocor1)$pred_y
con_pb=yrep[,which(PB_dat_M$treatment=="control"& PB_dat_M$month==3)]
con_pbmat=as.matrix(con_pb)
con_pbs=con_pbmat[1:300,]
matplot(t(con_pbs), type="l", col="grey", main="PB control (March)")
mean_con_pb=apply(con_pb, 2, mean)
con_pb_obs=PB_dat_M%>%filter(treatment=="control"& month==3)
lines(mean_con_pb~c(1:length(mean_con_pb)), col="white")
points(con_pb_obs$proportion, col="black", cex=1 )
ppc_dens_overlay(y, yrep)
require(lmtest)
res=rstan::extract(pbyr_autocor)$pred_y
res1=apply(res,2,mean)
resq=cbind(res1, PB_dat_M$proportion)
str(resq)
resq=as.data.frame(resq)
resq$diff=resq$V2-resq$res1
dwtest(resq$diff~PB_dat_M$year) #1.99; P=0.42
|
b941eb432b0c797a29bf6c0198f2b0077e588238 | 76af661268ac6bad81a21749a03f50b998701019 | /courses/PB - statistics/exercise 1/functions.R | 32f111fd4f0e8218d94e318d0c0bf5f6ad4bc8b2 | [] | no_license | siemionides/learning-data-sience | 8136e3e002c6b13341e463d18322bd272e6cacb5 | 5439c8111054f7b76379aec121d7b0a991b36fa5 | refs/heads/master | 2020-05-21T02:10:14.066505 | 2017-04-12T17:19:12 | 2017-04-12T17:19:12 | 84,557,190 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,242 | r | functions.R | m = function(name, ...){
message(paste(name, ...))
}
descriptive_statistics = function(zmienna, name){
#m('analysis of: ', name)
#sprawdzenie statystyk opisoych
summary(zmienna)
#m("Min.:", summary(zmienna)['Min.'])
#m("1st Qu.:", summary(zmienna)['1st Qu.'])
#message(paste("Median:", summary(zmienna)['Median']))
#message(paste("Mean:", summary(zmienna)['Mean']))
#message(paste("3rd Qu.:", summary(zmienna)['3rd Qu.']))
#message(paste("Max.:", summary(zmienna)['Max.']))
m('variance', var(zmienna))
m('standard dev', sd(zmienna))
# check modality
# skoścność z pakietu e1071
m('skewness', skewness(zmienna), '(jeżeli dodatnie to prawy brzeg jest dłuższy, jeżeli ujemne - to lewy)')
# kurtoza z pakietu e1071
m('kurtoza',kurtosis(zmienna), '+ : wystaje ponad normalny; - : poniżej normalnego')
# sprawdź czy zmienne mają rozkład normalny (jezeli p-value < 0.05, to NIE JEST normalny)
m('shapiro: p-value: ',shapiro.test(zmienna)[2], '(if p-value < 0.05 - NOT NORMAL)')
#quartz()
# histogram
#par(mfrow=c(1,2))
#hist(zmienna, plot = TRUE, main = paste('histogram:',name))
#skrzynka z wąsami
# quartz()
#boxplot(zmienna, main = paste('boxplot:',name))
}
|
30299c6d3fddb0d45c7ce3ed530098f6f2613b8e | 52cf2b700d2271b3346580b342252498762fc652 | /Chicago/man/estimateBrownianComponent.Rd | 9b27f6bd8464355cdd3edbf53f583405d4176b75 | [] | no_license | dovetail-genomics/chicago | 985e077782f7bcad5e8c8e65c62fdebe62a5aff0 | 81bbe3164b012a96f17af84aee04daade794a8c3 | refs/heads/main | 2023-06-15T17:57:23.506628 | 2021-07-03T20:00:22 | 2021-07-03T20:00:22 | 379,648,353 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,519 | rd | estimateBrownianComponent.Rd | \name{estimateBrownianComponent}
\alias{estimateBrownianNoise}
\alias{estimateBrownianComponent}
\title{
Estimate Brownian background component.
}
\description{
Estimates the dispersion, and adds a a \code{Bmean} column giving the expected number of Brownian reads.
Usually, the dispersion is not calculated on the full dataset - rather, a subsample of baits is taken, and the dispersion is calculated on that. The number of baits used is taken from \code{brownianNoise.subset} (with an \code{NA} value meaning that the entire dataset is used, and no subsampling is performed).
(Note that the alias \code{estimateBrownianNoise()} is provided for back-compatibility.)
}
\usage{
estimateBrownianNoise(cd)
}
\arguments{
\item{cd}{A \code{chicagoData} object.}
}
\section{Warning}{
The object \code{intData(x)} is updated by reference. Thus, \code{intData(cd)} will be altered. See vignette for further information.
}
\value{
An object of class \code{chicagoData}.
}
\author{
Mikhail Spivakov, Jonathan Cairns, Paula Freire Pritchett
}
\seealso{
\code{\link{chicagoPipeline}}
}
\examples{
data(cdUnitTest)
##modifications to cdUnitTest, ensuring it uses correct design directory
designDir <- file.path(system.file("extdata", package="Chicago"), "unitTestDesign")
cdUnitTest <- modifySettings(cd=cdUnitTest, designDir=designDir)
##make cdUnitTest use the full subset of baits
cdUnitTest <- modifySettings(cd=cdUnitTest, settings=list(brownianNoise.subset=NA))
cdUnitTest <- estimateBrownianComponent(cdUnitTest)
}
|
3c8ae8e4e65442db36c1471d69a6e946ae6acd46 | 9b321a21075bd95826406adfa7f0905211a298c4 | /man/simpleCap.Rd | 3b05edcc90fc71679ce5805f2c7799b7f4f604d5 | [] | no_license | ColoradoDemography/ProfileDashboard | e1db02af72924db6b81a4811f514d4a77423f000 | 0743aa1575f460510eddca786029e2335e50a002 | refs/heads/master | 2021-08-08T15:29:49.894394 | 2018-10-09T21:27:13 | 2018-10-09T21:27:13 | 111,599,362 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 337 | rd | simpleCap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simpleCap.R
\name{simpleCap}
\alias{simpleCap}
\title{simpleCap convers strings to proper case,}
\usage{
simpleCap(x)
}
\arguments{
\item{x}{input string}
}
\value{
String formatted in Proper Case
}
\description{
stolen from Stackoverflow.
}
|
4180452717ef9c6e1751bcc62ef23202b9f3a08a | 8130e4802356a44450750d6f006c780fde71c64a | /统计实验/赶火车问题.R | 21e1bf73d928beb91dad7f83086246c1e81faf88 | [] | no_license | chenbingshun98/bsdzp | ba40ab069a4a9abb06abdd8b619b153a87bd132f | e5404f88fd93d6bb752f237429e896207f6074e3 | refs/heads/master | 2021-10-12T05:41:59.539543 | 2021-10-04T06:02:29 | 2021-10-04T06:02:29 | 246,439,915 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,159 | r | 赶火车问题.R | windows(7, 3)
prb = replicate(100, { #括号内程序重复100次
x = sample(c(0, 5, 10), 1, prob = c(0.7, 0.2, 0.1))
y = sample(c(28, 30, 32, 34), 1, prob = c(0.3, 0.4, 0.2, 0.1))
plot(0:40, rep(1, 41), type = "n", xlab = "time", ylab = "",
axes = FALSE)
axis(1, 0:40)
r = rnorm(1, 30, 2)
points(x, 1, pch = 15)
i = 0
while (i <= r) {
i = i + 1
segments(x, 1, x + i, 1)
if (x + i >= y)
points(y, 1, pch = 19)
Sys.sleep(0.1)
}
points(y, 1, pch = 19)
title(ifelse(x + r <= y, "poor... missed the train!", "Bingo!
catched the train!"))
Sys.sleep(4)
x + r > y
})
mean(prb)
library(tidyverse)
tidy_data <- tibble(
x = replicate(
100,sample(c(0, 5, 10), 1, prob = c(0.7, 0.2, 0.1))
),
y = replicate(
100,sample(c(28, 30, 32, 34), 1, prob = c(0.3, 0.4, 0.2, 0.1))
)
)
tidy_data
tidy_data <- tidy_data %>% mutate(r=rnorm(100, 30, 2)) %>%
mutate(bool=if_else(x+r>y,1,0))
tidy_data %>% pull(bool) %>% mean()
tidy_data %>%
summarise(
across(bool,.names="{.fn}_{.col}",list(平均=mean))
)
tidy_data %>%
summarise(
across(bool,.names="{.fn}_{.col}",~mean(.))
)
?summarise
|
d8a5032a0beaa0d0c50fc8cfeb3389134cc38255 | 07dce07a38713513603901a3daa5dd23d264a093 | /server.R | a627a9c2257ff28414663329105d527389a1a9ff | [] | no_license | avonholle/int-and-conf | 614ae4210df031a3559e7e81b28d21d011598029 | 5da68a23c74e589cb21b6fb67f8540f2136b7ea4 | refs/heads/master | 2021-03-12T21:35:04.627508 | 2015-09-20T12:25:24 | 2015-09-20T12:25:24 | 31,473,633 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,585 | r | server.R | library(shiny)
library(stargazer)
library(contrast)
library(Epi)
library(reshape)
library(ggplot2)
library(reshape2)
library(diagram)
# Parameters for function below..................
# ns: number of people in sample
# beta0: coefficient for intercept
# beta1: coefficient for x term
# beta2: coefficient for z term
# beta3: coefficient for interaction term between x and z
# confound: indicator variable (F=no, T=yes) for confounding
created = function(ns, beta0, beta1, beta2, beta3, confound=F, interact=F){
# Get exposure, x, and covariate, z.
# if confounder, it is dependent on x with some random error
x.1 <- sample(c(0,1), ns, replace=T, prob=c(0.5,0.5))
ez <- rnorm(ns, mean=0, sd=0.01) # random error term for z
z.1 <- ifelse(rep(confound,ns),
rbinom(ns, 1, expit(-(round(3*x.1 + ez, 1)))),
sample(c(0,1), ns, replace=T, prob=c(0.5,0.5))
) # return confounder if confound=T,
# otherwise return a dichotomous variable with 50/50 distirbution
e4 <- rnorm(ns, mean=0, sd=0.01) # random error term
lp <- ifelse(rep(interact, ns),
beta0 + beta1*x.1 + beta2*z.1 + beta3*x.1*z.1 + e4,
beta0 + beta1*x.1 + beta2*z.1 + e4)
# linear predictor (lp)
pr <- expit(lp) # apply inverse logit function to lp to get probability
y <- rbinom(ns, 1, pr) # generate bernoulli response variable with probability pr.4 for n1 number of observations.
df <- data.frame(y=y, x=x.1, z=z.1)
return(df)
}
expit <- function(x) 1/(1+exp(-x))
shinyServer(function(input, output) {
withMathJax()
# take parameters entered with the slider bar and use to simulate data
ldata <- reactive({
created(input$n1, input$beta0, input$beta1, input$beta2, input$beta3, input$conf, input$interact)
})
# get a summary of the simulated data
output$table1 <- renderTable(print(ldata()[1:10,]))
# get 2x2 tables by strata
# This method doesn't work in putting the object into the server.R
# #########################################################
# tbl.1 <- reactive({
# by(ldata(), ldata()$z, function(x) twoby2(table(x$y, x$x)))
# })
#
# output$twobytwo.z0 <- renderTable({ tbl.1()$"0"$table })
# output$twobytwo.z1 <- renderTable({ tbl.1()$"1"$table })
# alternate way of getting two by two table
output$table1alt <- renderText ({
(with(ldata(), table(z, y, x)))
}) # doesn't work so well
# alternate way of getting two by two table
output$table2alt <- renderTable ({
dcast(ldata(), z + y ~ x, value.var="z", fun.aggregate=length)
})
# Use simulated data to run logistic regression
results1 <- reactive({
glm( y ~ x*z, family="binomial", data=ldata() )
})
# use simulated data to run logistic regression without interaction -- just crude estimate
# ##################################################################
results.none <- reactive({
glm( y ~ x , family="binomial", data=ldata() )
})
res.none.check <- reactive({
res.none <- glm( y ~ x , family="binomial", data=ldata() )
o.x.crude <- contrast(res.none,
list(x = 1),
list(x = 0)) # difference in log odds of x=1 and x=0
return(list(results.none=res.none, odds.x.crude=o.x.crude))
})
output$check.odds.crude.2 <- renderText({
stargazer(res.none.check()$odds.x.crude$Contrast, type="html")
})
output$check.odds.crude.3 <- renderText({
stargazer(res.none.check()$results.none, type="html")
})
compare <- reactive({
odds.x.crude <- contrast(results.none(),
list(x = 1),
list(x = 0)) # difference in log odds of x=1 and x=0
odds.x.z0 <- contrast(results1(),
list(x=1, z=0),
list(x=0, z=0))
odds.x.z1 <- contrast(results1(),
list(x=1, z=1),
list(x=0, z=1))
return(list(crude.odds=odds.x.crude, z0.odds=odds.x.z0, z1.odds=odds.x.z1))
})
output$compare.odds.crude <- renderText(
paste("Crude odds is:",
round(exp(compare()$crude.odds$Contrast), digits=2))
)
output$check.odds.crude <- renderText({
stargazer(results.none(), type="html")
})
output$compare.odds.z0 <- renderText(
paste("Odds at strata z=0 is:",
round(exp(compare()$z0.odds$Contrast), 2))
)
output$compare.odds.z1 <- renderText(
paste("Odds at strata z=1 is:",
round(exp(compare()$z1.odds$Contrast), 2))
)
# Output results to a table
# ############################
output$summary <- renderTable({
summary(results1())
})
output$summary.2 <- renderText({
stargazer(results1(), type="html")
})
# plot odds
# ########################
# extract data
odds <- reactive({
odds.4.00 <- contrast(results1(), a = list(x = 0, z=0))
odds.4.01 <- contrast(results1(), a = list(x = 0, z=1))
odds.4.10 <- contrast(results1(), a = list(x = 1, z=0))
odds.4.11 <- contrast(results1(), a = list(x = 1, z=1))
do <- cbind.data.frame(
type=c("00", "01", "10", "11"),
rbind.data.frame( odds.4.00[c(3:6)],
odds.4.01[c(3:6)],
odds.4.10[c(3:6)],
odds.4.11[c(3:6)]))
do$type <- factor(do$type, labels=c("x=0 and z=0",
"x=0 and z=1",
"x=1 and z=0",
"x=1 and z=1"))
return(do)
})
# Make plot
output$oddsplot <- renderPlot({
ggplot(odds(), aes(y=Contrast, x=type)) +
geom_point() +
geom_errorbar(aes(ymin=Lower, ymax=Upper), width=0.2) +
# scale_y_log10() +
geom_hline(yintercept = 0, linetype=2) +
coord_flip() +
labs(list(
title = 'log(odds) by group',
x = "Groups",
y = "log(odds)")) +
theme_bw()
})
# extract data for 2nd iteration of plot -- with odds ratios
# .........................................................
# Make a plot of marginal odds ratios and crude odds ratio
# ..............................................
# extract data
odds.2 <- reactive({
# get estimate of odds ratios with se for intervals in plot
odds.crude <- contrast(results.none(),
list(x = 1),
list(x = 0)) # difference in log odds of x=1 and x=0
odds.z0 <- contrast(results1(),
list(x = 1, z=0),
list(x = 0, z=0)) # difference in log odds of x=1 and x=0
odds.z1 <- contrast(results1(),
list(x = 1, z=1),
list(x = 0, z=1)) # difference in log odds of x=1 and x=0
odds.2 <- cbind.data.frame(
type=c( "Strata, z=0", "Crude", "Strata, z=1"),
rbind.data.frame( odds.z0[c(2:5)],
odds.crude[c(1:4)],
odds.z1[c(2:5)])
)
odds.2 = within(odds.2, {
OR.lower = exp(Contrast-1.96*SE)
OR = exp(Contrast)
OR.upper = exp(Contrast+1.96*SE)
})
odds.2$type = factor(odds.2$type, levels=c("Strata, z=0",
"Crude",
"Strata, z=1")) # change order of type
return(odds.2)
})
# Make 2nd plot
output$oddsplot.2 <- renderPlot({
ggplot(odds.2(), aes(y=OR, x=type)) +
geom_point() +
geom_errorbar(aes(ymin=OR.lower, ymax=OR.upper), width=0.2) +
scale_y_log10() +
geom_hline(yintercept = 0, linetype=2) +
coord_flip() +
labs(list(
title = 'Odds ratios by strata and crude in model with multiplicative interaction.',
x = "Groups",
y = "Odds ratios")) +
theme_bw()
})
# Create a dag to include near model
# see http://shiny.rstudio.com/articles/images.html
output$myImage <- renderImage({
# A temp file to save the output.
# This file will be removed later by renderImage
outfile <- tempfile(fileext='.png')
# Generate the PNG
png(outfile, width=400, height=300)
# revised version to match what I need for the html
par(mar = c(0.5, 0.5, 0.5, 0.5))
openplotmat()
elpos <- coordinates (c(1, 2), mx = 0.1, my = -0.1)
straightarrow(from = elpos[1, ], to = elpos[2, ], lty = 1, lcol = 1, arr.width=0.5, arr.length=0.5, arr.type="triangle")
straightarrow(from = elpos[2, ], to = elpos[3, ], lty = 1, lcol = 1, arr.width=0.5, arr.length=0.5, arr.type="triangle")
straightarrow(from = elpos[1, ], to = elpos[3, ], lty = 1, lcol = 1, arr.width=0.5, arr.length=0.5, arr.type="triangle")
textrect (elpos[1,], 0.05, 0.05, lab = "z", cex = 1.5)
textrect (elpos[2,], 0.05, 0.05, lab = "x", cex = 1.5)
textrect (elpos[3,], 0.05, 0.05, lab = "y", cex = 1.5)
dev.off()
# Return a list containing the filename
list(src = outfile,
contentType = 'image/png',
width = 400,
height = 300,
alt = "DAG")
}, deleteFile = TRUE)
# output odds ratios to table by strata of x and z
# ###########################################################
## odds and 95% CI
# ########################
tableodds <- reactive({
odds.4.00 <- contrast(results1(), a = list(x = 0, z=0))
odds.4.01 <- contrast(results1(), a = list(x = 0, z=1))
odds.4.10 <- contrast(results1(), a = list(x = 1, z=0))
odds.4.11 <- contrast(results1(), a = list(x = 1, z=1))
odds <- cbind.data.frame(
type=c("00", "01", "10", "11"),
rbind.data.frame(
odds.4.00[c(3:6)],
odds.4.01[c(3:6)],
odds.4.10[c(3:6)],
odds.4.11[c(3:6)])) # this is estimate of log odds
odds$divide <- exp(odds$Contrast -odds$Contrast[1]) # this is odds ratio (log difference in log odds relative to odds of 00, considered baseline)
# make groups for or
odds$xvar <- ifelse(odds$type %in% c("10", "11"), 1 ,0)
odds$zvar <- ifelse(odds$type %in% c("01", "11"), 1 ,0)
# both estimates are differences in log odds. take exp of qty to get odds ratio
# make a 2 by 2 table of odds ratios
# ##################################
dcast(odds, xvar~zvar, value.var="divide", fun.aggregate=mean)
})
output$to.1 <- renderTable({
tableodds()
})
# ###############################################
# Get an icr value based on odds ratios
# make ICR
# #####################
icr <- reactive({
or.01 <- contrast(results1(), list(x = 0, z=1),
list(x = 0, z=0))
or.10 <- contrast(results1(), list(x = 1, z=0),
list(x = 0, z=0))
or.11 <- contrast(results1(), list(x = 1, z=1),
list(x = 0, z=0))
icr.1 = exp(or.11$Contrast) - exp(or.10$Contrast) - exp(or.01$Contrast) + 1
icr.1
expected.11 <- exp(or.10$Contrast) + exp(or.01$Contrast) - 1
return(list(icr.1=icr.1, expected.11 = expected.11, or.11=or.11$Contrast))
})
output$texticr <- renderText({
paste("The ICR is: ", round(icr()$icr.1,digits=2))
})
output$texticr.2 <- renderText({
paste("The expected OR for '11' group is: ", round(icr()$expected.11, digits=2),
", and the observed OR for '11' group (with no confounding) is: ", round(exp(icr()$or.11), digits=2))
})
# Output parameter values
# ##############################
output$textn <- renderText({
paste("The sample size is: ", input$n1)
})
output$text0 <- renderUI({
x<-input$beta0
y<-input$beta1
z<-input$beta2
z2 <- input$beta3
withMathJax(
sprintf("\\(
\\beta_0 = %.02f ,
\\beta_1 = %.02f ,
\\beta_2 = %.02f ,
\\beta_3 = %.02f \\)", x, y, z, z2)
)
})
output$text0i <- renderUI({
x <- exp(input$beta0)
y <- exp(input$beta1)
z <- exp(input$beta2)
z2 <- exp(input$beta3)
withMathJax(
sprintf("\\(
exp(\\beta_0) = %.02f = \\text{odds of y at x=0 and z=0,}
\\)",
x))
})
output$text1i <- renderUI({
y <- exp(input$beta1)
withMathJax(
sprintf("\\(
exp(\\beta_1) = %.02f = \\text{odds ratio for x=1 vs x=0 with no interaction,}
\\)",
y))
})
output$text2i <- renderUI({
z <- exp(input$beta2)
withMathJax(
sprintf("\\(
exp(\\beta_2) = %.02f = \\text{odds ratio for z=1 vs z=0 with no interaction,}
\\)",
z))
})
output$text3i <- renderUI({
z2 <- exp(input$beta3)
withMathJax(
sprintf("\\(
exp(\\beta_3) = %.02f = \\text{interaction term}
\\)",
z2))
})
output$textc <- renderText({
paste("Confounding status is: ", input$conf)
})
output$texti <- renderText({
paste("Interaction status is: ", input$interact)
})
#output$eqn2 <- renderText("y = β_1*x")
# see http://shiny.rstudio.com/gallery/mathjax.html
# have to be careful with font sizes.
output$eqn1 <- renderUI({
withMathJax(
helpText('\\( \\text{logit(p) = } \\left(\\frac{p}{1-p}\\right) \\text{ = } \\beta_0 + \\beta_1 \\cdot x + \\beta_2 \\cdot z + \\beta_3 \\cdot x \\cdot z\\)')
)
})
output$eqn2 <- renderUI({
withMathJax(
helpText('\\( \\text{logit(p) = } \\left(\\frac{p}{1-p}\\right) \\text{ = } \\beta_0 + \\beta_1 \\cdot x + \\beta_2 \\cdot z
\\)')
)
})
output$textconf <- renderUI({
withMathJax(
helpText('\\( \\text{logit(z) = 0 + 3}\\cdot\\text{x} \\)'
))
})
}) |
679d0c39bc53fa6e994d7cd47a3be1c300790a50 | a3da395d683014c2f04a4491f5cf3214076a82f6 | /Extracting locations.R | e3ec13753647565caf1ae25eb2012991aad43ca8 | [] | no_license | VetMomen/UBM | f2f9ba780a8ce09aba3a2c64c29ba06d93e49a37 | d4af7f433144b4f7bf62335ee8e4ad6bb1997495 | refs/heads/master | 2020-04-02T02:25:25.759831 | 2019-03-30T13:44:54 | 2019-03-30T13:44:54 | 151,887,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 367 | r | Extracting locations.R | gpx<-read_html(x = './data sets/locations.gpx')
Names<-gpx%>%html_nodes(xpath = '//wpt/name')%>%html_text()
loc<-gpx%>%html_nodes(xpath = '//wpt')%>%html_attrs()%>%data.frame()%>%t()
rownames(loc)<-NULL
Locations<-data.frame(Names,loc)
Sys.setlocale('LC_ALL','Arabic')
writeWorksheetToFile(file = './data sets/Locations.xlsx',data = Locations,sheet = 'locations')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.