blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5e6185937fd38446d3bef13a2f885d83d1e3999f | 435bc43332fbfda1be7d5e0ffde618114409adee | /r/关系网6.R | 88a71f5bfb74b960afe8b106b66e550ce465987e | [] | no_license | saffronzhang/REmotionSIR | cb81925a8d0b761b40c5f3820538720a8f64c256 | 3cc38b3fa1923a1633ea8ef1b582d95c6a828e3e | refs/heads/master | 2021-01-06T20:35:36.557598 | 2017-08-07T03:01:14 | 2017-08-07T03:01:14 | 99,526,595 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 355 | r | 关系网6.R | init.data <- read.csv(file = paste("/Users/xu/Desktop/1.csv", sep = ""), header = F)
data <- data.frame(from = init.data[,2], to = init.data[,1])
g <- init.igraph(data, dir = F,rem.multi = T)
svg(filename = paste("/Users/xu/Desktop/1.svg",width=200,height= 200))
plot(g, vertex.size = 1, layout= layout.fruchterman.reingold, vertex.label = NA)
dev.off()
|
7cf0f0adfb2d3b8630f61040a9a877e1d8340260 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/fpu/fpu-10Xh-error01-uniform-depth-23/fpu-10Xh-error01-uniform-depth-23.R | 57a14fa5322ee2043d0000fc634c795e620c33d0 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 89 | r | fpu-10Xh-error01-uniform-depth-23.R | cc3c32f4b3e07a2ad50fc0ebef57c1e3 fpu-10Xh-error01-uniform-depth-23.qdimacs 619393 1654101 |
b5dac81336e75139708680208361d5d4f9538898 | 8dda6c31530411c6328481a3b63ac0bb27320741 | /R scripts/code_for_week8_part2.R | 0e46b300510cffd8f719173ecf870b2f6ab4280e | [
"MIT"
] | permissive | guhanavel/DSA1101-Introduction-to-Data-Science | fc1ccf855a2313a9ab8e04a03094cd9031755ae3 | 2c5e7f1ef53c2cac77ab3ad20c6d9e2a78e2ac15 | refs/heads/master | 2022-04-17T16:18:39.661539 | 2020-03-17T15:24:39 | 2020-03-17T15:24:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 523 | r | code_for_week8_part2.R | library("rpart") # load libraries
library("rpart.plot")
play_decision <- read.table("DTdata.csv",header=TRUE,sep=",")
head(play_decision)
fit <- rpart(Play ~ Outlook + Temperature + Humidity + Wind,
method="class",
data=play_decision,
control=rpart.control(minsplit=1),
parms=list(split='information'))
rpart.plot(fit, type=4, extra=2)
#Prediction
newdata <- data.frame(Outlook="rainy", Temperature="mild",
Humidity="high", Wind=FALSE)
predict(fit,newdata=newdata,type="prob")
predict(fit,newdata=newdata,type="class") |
511c5aac1736e01403e6dd3f62936420264b7ba5 | 2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89 | /man/isSnpChip.AffymetrixCdfFile.Rd | 3442376d4ef41caa1f4b29fd536fa493d3eec8ff | [] | no_license | HenrikBengtsson/aroma.affymetrix | a185d1ef3fb2d9ee233845c0ae04736542bb277d | b6bf76f3bb49474428d0bf5b627f5a17101fd2ed | refs/heads/master | 2023-04-09T13:18:19.693935 | 2022-07-18T10:52:06 | 2022-07-18T10:52:06 | 20,847,056 | 9 | 4 | null | 2018-04-06T22:26:33 | 2014-06-15T03:10:59 | R | UTF-8 | R | false | false | 971 | rd | isSnpChip.AffymetrixCdfFile.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% AffymetrixCdfFile.SNPs.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{isSnpChip.AffymetrixCdfFile}
\alias{isSnpChip.AffymetrixCdfFile}
\alias{AffymetrixCdfFile.isSnpChip}
\alias{isSnpChip,AffymetrixCdfFile-method}
\title{Static method to check if a chip is a mapping (SNP) chip}
\description{
Static method to check if a chip is a mapping (SNP) chip.
}
\usage{
\method{isSnpChip}{AffymetrixCdfFile}(this, ...)
}
\arguments{
\item{...}{Not used.}
}
\value{
Returns \code{\link[base:logical]{TRUE}} if the chip type refers to a SNP array, otherwise \code{\link[base:logical]{FALSE}}.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{AffymetrixCdfFile}}.
}
\keyword{internal}
\keyword{methods}
|
b696e9f4871daef8c56e122865e22d3ecfdfbba4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/gets/examples/printtex.Rd.R | 17e1674e86de0b995a573bb43293f4a447af546a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 619 | r | printtex.Rd.R | library(gets)
### Name: printtex
### Title: Generate LaTeX code of an estimation result
### Aliases: printtex
### Keywords: Statistical Models Time Series Econometrics Financial
### Econometrics
### ** Examples
##simulate random variates, estimate model:
y <- rnorm(30)
mX <- matrix(rnorm(30*2), 30, 2)
mymod <- arx(y, mc=TRUE, ar=1:3, mxreg=mX)
##print latex code of estimation result:
printtex(mymod)
##add intercept, at the end, to regressor matrix:
mX <- cbind(mX,1)
colnames(mX) <- c("xreg1", "xreg2", "intercept")
mymod <- arx(y, mxreg=mX)
##set intercept location to 3:
printtex(mymod, intercept=3)
|
7956e643d1b93a87f90074c6fef8c8396ab4e69a | bb2ed5d41d05ed3e2b5b0b9375705a8412e006cd | /man/angle.Rd | be5a032f43641af052ed9bf791358fea1f58a53d | [] | no_license | cran/press | 1a155ff65de1a4f4f3604dd52092073fd3556873 | ce9a27232d0314e3cb562be03788b9e449efed8f | refs/heads/master | 2016-09-06T10:31:46.961260 | 2011-04-26T00:00:00 | 2011-04-26T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 818 | rd | angle.Rd | \name{angle}
\alias{angle}
\title{angle}
\description{A function for calculating bond angle and 1-3 distance for any pdb file.}
\usage{angle(id,pdb)}
\details{
An internal function, to do calculations of virtual angles and virtual 1-3 distances for an uploaded protein structure data.
}
\arguments{
\item{id}{a character string to describe the protein name, sometimes of length 4 and refer the protein ID as shown in PDB }
\item{pdb}{a .pdb file downloaed from PDB or generated by users}
}
\value{
\item{Angle}{ a numeric matrix including the columns for the computed virtual angles, res1-res3 distance, and the columns for the corresponding residue name}
}
\author{Yuanyuan Huang, Stephen Bonett, and Zhijun Wu}
\examples{
id<-"1ABA"
pdb<-read.pdb(id)
angle(id,pdb)
}
\keyword{internal function virtual bond angle}
|
ba59bd850394260fd0d4f74d095158ba3395b14f | 29585dff702209dd446c0ab52ceea046c58e384e | /pastecs/R/plot.turnpoints.R | 6401ba81dcadf701bc41eabeabd6e163d3bfc5b3 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 594 | r | plot.turnpoints.R | "plot.turnpoints" <-
function(x, level=0.05, lhorz=TRUE, lcol=2, llty=2, type="l", xlab="data number", ylab=paste("I (bits), level = ", level*100, "%", sep=""), main=paste("Information (turning points) for:",x$data), ...) {
# The next function actually draws the graph
turnpoints.graph <- function(X, Level, Lhorz, Lcol, Llty, Type, Xlab, Ylab, Main, Sub, ...) {
plot(X$tppos, X$info, type=Type, xlab=Xlab, ylab=Ylab, main=Main, ...)
abline(h=-log(Level, base=2), lty=Llty, col=Lcol)
}
invisible(turnpoints.graph(x, level[1], lhorz, lcol, llty, type, xlab, ylab, main, ...))
}
|
4816a3088ff100680dc3b50fd1713ca5e6023cdf | 97ae070890cec937092422654cbad941773f63ad | /man/pmmlTransformations.NormDiscreteXform.Rd | d4535c30adb527f906a2372b6c8034f63e672051 | [] | no_license | cran/pmmlTransformations | e98bd242edf6aec468d9d97e6c92c784bae31874 | 839fc91fa9bc3e84e3aceef76aacbafe0d422989 | refs/heads/master | 2021-01-10T20:36:17.055136 | 2019-06-11T21:20:04 | 2019-06-11T21:20:04 | 17,698,616 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,574 | rd | pmmlTransformations.NormDiscreteXform.Rd | \name{NormDiscreteXform}
\alias{NormDiscreteXform}
\title{
Normalize discrete values in accordance to the PMML element:\cr \bold{NormDiscrete}
}
\description{
Define a new derived variable for each possible value of a categorical variable. Given a categorical
variable \bold{catVar} with possible discrete values \bold{A} and \bold{B}, this will create 2
derived variables \bold{catVar_A} and \bold{catVar_B}. If, for example, the input value of \bold{catVar}
is \bold{A} then \bold{catVar_A} equals 1 and
\bold{catVar_B} equals 0.
}
\usage{
NormDiscreteXform(boxdata, xformInfo=NA,
inputVar=NA, mapMissingTo=NA, ...)
}
\arguments{
\item{boxdata}{
the wrapper object obtained by using the WrapData function on the raw data.
}
\item{xformInfo}{
specification of details of the transformation: the name of the input variable to be transformed.
}
\item{inputVar}{
the input variable name in the data on which the transformation is to be applied
}
\item{mapMissingTo}{value to be given to the transformed variable if the value of the input variable is missing.}
\item{\dots}{
further arguments passed to or from other methods.
}
}
\details{
Given an input variable, \bold{InputVar} and \bold{missingVal}, the desired value of the transformed variable
if the input variable value is missing, the NormDiscreteXform command including all
optional parameters is in the format:
xformInfo="inputVar=input_variable, mapMissingTo=missingVal"
There are two methods in which the input variable can be referred to. The first method is to use its
column number; given the \bold{data} attribute of the \bold{boxData} object, this would be the order at
which the variable appears. This can be indicated in the format "column#". The second method is to refer
to the variable by its name.
The \bold{xformInfo} and \bold{inputVar} parameters provide the same information. While either one may be used
when using this function, at least one of them is required. If both parameters are given, the \bold{inputVar}
parameter is used as the default.
The output of this transformation is a set of transformed variables, one for each possible value of
the input variable. For example, given possible values of the input variable \bold{val1}, \bold{val2}, ... these
transformed variables are by default named \bold{InputVar_val1}, \bold{InputVar_val2}, ...
}
\value{
R object containing the raw data, the transformed data and data statistics.
}
\author{
Tridivesh Jena, Zementis, Inc.
}
\seealso{
\code{\link{WrapData}}
}
\examples{
# Load the standard iris dataset, already available in R
data(iris)
# First wrap the data
irisBox <- WrapData(iris)
# Discretize the "Species" variable. This will find all possible
# values of the "Species" variable and define new variables. The
# parameter name used here should be replaced by the new preferred
# parameter name as shown in the next example below.
#
# "Species_setosa" such that it is 1 if
# "Species" equals "setosa", else 0;
# "Species_versicolor" such that it is 1 if
# "Species" equals "versicolor", else 0;
# "Species_virginica" such that it is 1 if
# "Species" equals "virginica", else 0
irisBox <- NormDiscreteXform(irisBox,inputVar="Species")
# Exact same operation performed with a different parameter name.
# Use of this new parameter is the preferred method as the previous
# parameter will be deprecated soon.
irisBox <- WrapData(iris)
irisBox <- NormDiscreteXform(irisBox,xformInfo="Species")
}
\keyword{ manip }
|
3655be583ef977d67601180b99d859f0ff64fb4f | e4c44366d606a27749d0d230e316ef46cf41d6af | /ImmoDaten/Immo_SAR_benchmark_v2.R | e0949e8c93952183c72de243440c67fa46de85cc | [] | no_license | JohannesJacob/APA-SatelliteImages | fe53a34b0301a05a9618f8f206743bce10104242 | 8487188ae1d3e2e8cb4da5ee5d2df4a96bc14853 | refs/heads/master | 2020-04-05T12:09:02.217440 | 2017-08-31T13:40:12 | 2017-08-31T13:40:12 | 95,221,425 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,206 | r | Immo_SAR_benchmark_v2.R | # SAR benchmark code mit lagsarlm function
library(spdep)
df_street_noNA <- read.csv("Immo_preProcessed.csv")
df_street_noNA <- df_street_noNA[,-1]
# Convert data frame to a spatial object
spdf_street <- SpatialPointsDataFrame(coords = df_street_noNA[, c("lng", "lat")],
proj4string = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"),
data = df_street_noNA)
coords <- coordinates(spdf_street)
# get distance matrix
IDs <- row.names(as(spdf_street, "data.frame"))
Sy8_nb <- knn2nb(knearneigh(coords, k = 1), row.names = IDs)
dsts <- unlist(nbdists(Sy8_nb, coords))
max_1nn <- max(dsts)
nb.1.5 <- dnearneigh(coords, d1 = 0, d2 = 1.5 * max_1nn, row.names = IDs)
knn.10 <- knearneigh(coords, k = 10)
knn.10 <- knn2nb(knn.10, row.names = IDs)
# plotting results
plot(nb2listw(knn.10, style="W"), coords)
# calculate matrix
#COL.lag.eig <- lagsarlm(price ~.,
# data=df_street_noNA[, -c(1:5,81,92,118:121,128,130,134,137:140,143,146,149,152,155:158,
# 126, 162,165,168,171,174,177:180,186,189,192:199)],
# nb2listw(knn.10, style="W"), method="eigen", quiet=FALSE)
#summary(COL.lag.eig, correlation=TRUE)
# df without multi-collinaer variables
df_fix <- df_street_noNA[, -c(1:5,81,92,118:121,128,130,134,137:140,143,146,149,152,155:158,
126, 162,165,168,171,174,177:180,186,189,192:199)]
write.csv(df_fix, "Immo_fix.csv")
# model definition
#simple ols
lm_model <- lm(price ~.,
data=df_fix)
#SAR model
spdf_sar <- lagsarlm(price ~.,
data=df_fix,
nb2listw(knn.10, style="W"), tol = 1.0e-30)
summary(spdf_sar)
summary.sarlm(spdf_sar, Nagelkerke = T) # Spatial Lag is significant and some of the features
# Checking whether we have auto-correlation
moran.mc(summary(spdf_sar)$residuals, nb2listw(knn.10, style="W"), 999)
# Yes, we have autocorrelation with a significance at the 1% level
# Save the model
saveRDS(spdf_sar, "models/sar_knn10.rds")
saveRDS(spdf_sar, "models/sar_delauney.rds")
saveRDS(lm_model, "models/simple_ols.rds")
|
069822e4778ad8c1b22a1a457f9dbddbe4de53f0 | 5e09e974799ee70418fcdaef875d7de122c93a49 | /plot4.r | 8bc9d26e05e2db80c8d72e512c60bc885688f087 | [] | no_license | kevinpatricksmith/ExData_Plotting1 | cdccb4bf77cc457a2c4384f628df72eeec1e4305 | d5a8bfe739f4ba9e3c64e544bfd7a627f4bb63e2 | refs/heads/master | 2021-01-12T21:52:45.458159 | 2015-03-07T17:04:38 | 2015-03-07T17:07:20 | 23,742,543 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,730 | r | plot4.r | ##########################################
# Reading the data and selecting a subset
##########################################
dataFileName <- "household_power_consumption.txt"
hpc <- read.delim(dataFileName, sep=";", na.strings=c("?"))
# We are only interested in data for two dates
hpcSubset <- subset(hpc, hpc$Date == "1/2/2007" | hpc$Date == "2/2/2007")
# Convert the Date and Time variables to a combined DateTime variable
hpcSubset$DateTime <- strptime(paste(hpcSubset$Date,hpcSubset$Time), "%d/%m/%Y %H:%M:%S")
########################################
# Construct the plot on the 480x480 png
# This avoids cut-offs with legends etc.
########################################
png("plot4.png", width = 480, height = 480)
# Grid of 2 x 2 plots
par(mfrow=c(2,2))
# Top left Plot
plot(hpcSubset$DateTime, hpcSubset$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
# Top right plot
plot(hpcSubset$DateTime, hpcSubset$Voltage, type="l", ylab="Voltage", xlab="datetime")
# Bottom left plot
xlim <- range(c(hpcSubset$DateTime))
ylim <- range(c(hpcSubset$Sub_metering_1, hpcSubset$Sub_metering_2, hpcSubset$Sub_metering_3))
plot(hpcSubset$DateTime, hpcSubset$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="", ylim=ylim)
lines(hpcSubset$DateTime, hpcSubset$Sub_metering_2, col="red")
lines(hpcSubset$DateTime, hpcSubset$Sub_metering_3, col="blue")
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=c(1,1,1), cex=0.75)
# Bottom right plot
plot(hpcSubset$DateTime, hpcSubset$Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="datetime")
#################
# Save PNG file
#################
dev.off() |
d820003df2a374dc760b69f579e4b3123cae3659 | cb4830d307b9d076cbf80aa4a6db570b41dfa3c4 | /LearningR/Clase9/Modulo VII_EJERCICIOS.R | 007777d4da356592ce4cc5249f6df2a9fd8cc779 | [] | no_license | saulo-valdivia-v/BigDataAndAnalytics | e613d52188135842b1a7caf682bb17927971b144 | 5ec16fc467f48a4bf0897a2477fd68dbaaee6a9d | refs/heads/main | 2023-07-01T13:34:13.983630 | 2021-08-07T15:45:56 | 2021-08-07T15:45:56 | 393,715,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 457 | r | Modulo VII_EJERCICIOS.R | #Ejercicio Modulo VII: RMarkdown
#Convierte en documento HTML los ejercicios del dia 10.12.2020 con RMarkdown
encuesta <- read.table("encuesta.dat", header=T, sep="\t",dec=',')
#A?ade funciones generales en el metadata
#1.1. Haz un an?lisis que ponga a prueba si la fluidez verbal en espa?ol
#puede predecir la fluidez verbal en ingles
#Crea chunks para cada uno de los apartados de los analisis
#Proporciona una interpretacion escrita de los resultados |
6908d63c867987634805cf76e15833b68a3608be | 1cd0a4fb4fd5cf694b80927f1a652b20154cec27 | /man/enaUtility.Rd | 1417d44ddf34918d96850d935ceca2bfaa50d6fc | [] | no_license | enaVerse/enaR | 2b72e5e20a5c0f456eb2eca445706927513c9cc1 | 4ab6d0c1b7ef092f35c6936c181a7e8fc17021ce | refs/heads/master | 2021-07-04T07:24:32.270078 | 2017-03-10T20:47:55 | 2017-03-10T20:47:55 | 105,017,727 | 0 | 0 | null | 2017-09-27T12:51:13 | 2017-09-27T12:51:13 | null | UTF-8 | R | false | true | 4,475 | rd | enaUtility.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enaUtility.R
\name{enaUtility}
\alias{enaUtility}
\title{enautility --- utility analysis of a flow network
INPUT = network object
OUTPUT = list of utility statistics}
\usage{
enaUtility(x, type = c("flow", "storage"), eigen.check = TRUE,
balance.override = FALSE, tol = 10)
}
\arguments{
\item{x}{a network object. This includes all weighted flows into and out of
each node. For the storage utility analysis this must also include the
amount of energy--matter stored at each node (biomass).}
\item{type}{Determines whether the flow or storage utility analysis is
returned.}
\item{eigen.check}{LOGICAL: should the dominant eigenvalue be checked. Like
enaFlow and enaStorage analyses, enaUtility analysis considers the utility
propigated over path lengths ranging for zero to infinity. For utility
analysis to work properly, the path sequence must converge. enaUtility
checks to see if the utility path sequence is convergent by finding the
dominant eigenvalue of the direct utility matrix. If this eigenvalue is
less than 1, the sequence is convergent and the analysis can be applied; if
the dominant eigenvalue is greater than one, then the anlysis cannot be
applied. By default, the function will not return utility values if the
eigenvalue is larger than one; however, if eigen.check is set to FALSE, then
the function will be applied regardless of the mathematic validity.}
\item{balance.override}{LOGICAL: should model balancing be ignored.
enaUtility assumes that the network model is at steady-state. The default
setting will not allow the function to be applied to models not at
steady-state. However, when balance.override is set to TRUE, then the
function will work regardless.}
\item{tol}{The integral utility matrix is rounded to the number of digits
specified in tol. This approximation eleminates very small numbers
introduced due to numerical error in the ginv function. It does not
eliminate the small numerical error introduced in larger values, but does
truncate the numbers.}
}
\value{
\item{D}{Direct flow utility intensity matrix. (fij-fji)/Ti for
i,j=1:n} \item{U}{Nondimensional integral flow utility} \item{Y}{Dimensional
integral flow utility} \item{ns}{If type is set to 'flow', this is a list of
flow utility network statistics including: the dominant eigenvalue of D
(lambda\_1D), flow based network synergism (synergism.F), and flow based
network mutualism (mutualism.F).} \item{DS}{Direct storage utility intensity
matrix. (fij-fji)/xi for i,j=1:n} \item{US}{Nondimensional integral storage
utility} \item{YS}{Dimensional integral storage utility} \item{ns}{If type
is set to 'storage', this is a list of storage utility network statistics
including: the dominant eigenvalue of DS (lambda_1DS), storage based network
synergism (synergism.S), and storage based network mutualism (mutualism.S).}
}
\description{
M. Lau | July 2011
---------------------------------------------------
enautility --- utility analysis of a flow network INPUT = network object
OUTPUT = list of utility statistics
}
\details{
M. Lau | July 2011 ---------------------------------------------------
enautility --- utility analysis of a flow network INPUT = network object
OUTPUT = list of utility statistics
M. Lau | July 2011 ---------------------------------------------------
Utility Analysis of Ecological Networks
Performs the flow and storage based utility analysis developed for
input-output network models of ecosystems. It returns a set of matrices for
the direct and integral utilities as well as a set of utility based network
statistics.
}
\examples{
data(troModels)
U <- enaUtility(troModels[[6]], type = "flow", eigen.check = FALSE)
attributes(U)
US <- enaUtility(troModels[[6]], type = "storage", eigen.check = FALSE)
}
\references{
Fath, B.D. and Patten, B.C. 1998. Network synergism: emergence
of positive relations in ecological systems. Ecol. Model. 107:127--143.
Fath, B.D. and Borrett, S.R. 2006. A Matlab function for Network Environ
Analysis. Environ. Model. Soft. 21: 375--405.
Patten, B.C. 1991. Network ecology: Indirect determination of the
life-environment relationship in ecosystems. In: Higashi, M. and Burns, T.
(eds). Theoretical Studies of Ecosystems: The Network Perspective. Cambridge
University Press. New York.
}
\seealso{
\code{\link{enaFlow},\link{enaStorage},\link{enaMTI}}
}
\author{
Matthew K. Lau Stuart R. Borrett
}
|
e01835d154d854e7e4096b23235e3b5f8723308f | 4bd57b8501d4326ecc06c1d1ea499935e1668d95 | /MASH-dev/SeanWu/MBITES/man/mbites_pReFeed_batch.Rd | 435f5726b2aca313c1870eb95bc4e08dfab20d43 | [] | no_license | aucarter/MASH-Main | 0a97eac24df1f7e6c4e01ceb4778088b2f00c194 | d4ea6e89a9f00aa6327bed4762cba66298bb6027 | refs/heads/master | 2020-12-07T09:05:52.814249 | 2019-12-12T19:53:24 | 2019-12-12T19:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 649 | rd | mbites_pReFeed_batch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MBITES-Oogenesis.R
\name{mbites_pReFeed_batch}
\alias{mbites_pReFeed_batch}
\title{MBITES: Probability of Refeeding as Function of Egg Batch Size}
\usage{
mbites_pReFeed_batch()
}
\description{
Probability to re-enter blood feeding cycle after incomplete blood feeding given by \eqn{ \frac{2+rf_{b}}{1+rf_{b}}-\frac{e^{rf_{a}\times \frac{batch}{batch_{max}}}}{rf_{b}+e^{rf_{a}\times \frac{batch}{batch_{max}}}} }
This models mosquito propensity to take more blood if the egg batch is too small.
\itemize{
\item This method is bound to \code{MosquitoFemale$pReFeed()}
}
}
|
41178603f05cdcff8f3adefa0bfa255112501757 | 205bad3b5b7aeb85300e947a8db358b9536cb7b1 | /ensg/miashs/install.R | cf26fa0ef076986d5f7cd05e3b7617fab6e595fe | [] | no_license | philippe-preux/philippe-preux.github.io | 744e93a59076a74caf5aeec02fedb383257592f6 | 1e34ad659c214b215134177d8e01f3c4052bfef1 | refs/heads/master | 2023-08-31T03:37:21.383038 | 2023-08-18T13:00:36 | 2023-08-18T13:00:36 | 150,579,919 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,780 | r | install.R | paquets.a.installer.pour.pp <- c ("abind",
"acepack",
"ada",
"akima",
"alabama",
"animation",
"anim.plots",
"ape",
"aplpack",
"arules",
"autoencoder",
"bayesm",
"bdsmatrix",
"BH",
"bigmemory.sri",
"bitops",
"blowtorch",
"Bolstad",
"brew",
"cairoDevice",
"car",
"caTool",
"chron",
"CircStats",
"classInt",
"clue",
"cluster.datasets",
"clusterGeneration",
"coda",
"codetools",
"colorout",
"colorspace",
"combinat",
"cubature",
"darch",
"data.table",
"date",
"deepnet",
"Defaults",
"deldir",
"DEoptim",
"deSolve",
"DiagrammeR",
"dichromat",
"digest",
"discretization",
"doParallel",
"DPpackage",
"dr",
"e1071",
"eco",
"elasticnet",
"ElemStatLearn",
"ellipse",
"energy",
"evaluate",
"faraway",
"fda",
"fdrtool",
"FeatureHashing",
"fields",
"flsa",
"FNN",
"foba",
"foreach",
"formatR",
"Formula",
"gam",
"gamlss.data",
"gbm",
"gclus",
"gdata",
"gee",
"genalg",
"getopt",
"ggplot2",
"glasso",
"glmpath",
"GPArotation",
"gpclib",
"gplots",
"gridBase",
"gridExtra",
"gsl",
"gtable",
"gtools",
"gWidgets",
"HDclassif",
"highr",
"HistData",
"htmltools",
"htmlwidgets",
"httpuv",
"igraph",
"ineq",
"irlba",
"isa2",
"Iso",
"ISwR",
"iterators",
"jpeg",
"jsonlite",
"Kendall",
"kernlab",
"klaR",
"knitr",
"kohonen",
"KRLS",
"labeling",
"lars",
"lattice",
"latticeExtra",
"lava",
"leaps",
"LearnBayes",
"LiblineaR",
"linprog",
"lmeSplines",
"lmtest",
"locfit",
"LogicReg",
"logspline",
"lpSolve",
"lubridate",
"magrittr",
"maps",
"maptools",
"markdown",
"Matrix",
"matrixcalc",
"MatrixModels",
"maxLik",
"mboost",
"mcmc",
"MCMCpack",
"mda",
"mime",
"minqa",
"misc3d",
"miscTools",
"mitools",
"mix",
"mlbench",
"mlmRev",
"mnormt",
"MNP",
"modeltools",
"MPV",
"multcomp",
"multicool",
"multicore",
"munsell",
"mvnmle",
"mvtnorm",
"neuralnet",
"NMFN",
"nnls",
"nodeHarvest",
"nor1mix",
"nortest",
"np",
"numDeriv",
"nws",
"onion",
"optimbase",
"optimsimplex",
"optparse",
"outliers",
"pamr",
"partDSA",
"pbivnorm",
"pcaPP",
"penalized",
"penalizedLDA",
"permute",
"pkgmaker",
"plm",
"plotmo",
"plotrix",
"pls",
"plyr",
"pmml",
"png",
"polspline",
"polycor",
"PolynomF",
"ppls",
"ProDenICA",
"prodlim",
"profr",
"proto",
"proxy",
"pscl",
"pspline",
"psy",
"psych",
"qrnn",
"quadprog",
"quantmod",
"quantreg",
"quantregForest",
"R6",
"RandomFields",
"RandomFieldsUtils",
"randomForest",
"randtoolbox",
"RANN",
"RArcInfo",
"rARPACK",
"RColorBrewer",
"Rcpp",
"RcppArmadillo",
"RcppEigen",
"Rcsdp",
"rCUR",
"RCurl",
"rda",
"readxl",
"recommenderlab",
"recommenderlabBX",
"registry",
"relations",
"relaxo",
"relimp",
"reshape",
"reshape2",
"rFerns",
"rggobi",
"rgl",
"Rglpk",
"RGtk2",
"rlecuyer",
"rmeta",
"R.methodsS3",
"rngtools",
"rngWELL",
"R.oo",
"RRF",
"Rsolnp",
"rstudioapi",
"Rsymphony",
"RUnit",
"R.utils",
"rworldmap",
"SAENET",
"sandwich",
"scales",
"scatterplot3d",
"SDDA",
"segmented",
"SenSrivastava",
"seriation",
"sets",
"sfsmisc",
"shapefiles",
"shiny",
"slam",
"sm",
"smoothSurv",
"som",
"sos",
"sp",
"spam",
"sparseLDA",
"SparseM",
"spdep",
"splancs",
"spls",
"stabs",
"statmod",
"stepPlr",
"stringi",
"stringr",
"strucchange",
"subselect",
"superpc",
"svd",
"svmpath",
"synchronicity",
"tcltk2",
"TeachingDemos",
"tensorA",
"TH.data",
"timeDate",
"timeSeries",
"tis",
"tkrplot",
"tm",
"TraMineR",
"treelet",
"tripack",
"truncnorm",
"truncreg",
"TSA",
"tseries",
"tsne",
"TSP",
"TTR",
"tweedie",
"urca",
"vcd",
"vegan",
"VGAM",
"WriteXLS",
"xgboost",
"XML",
"xtable",
"xts",
"yaImpute",
"yaml",
"Zelig",
"zipfR",
"zoo")
paquets.a.installer.pour.ss <- c ("tidyverse", "nycflights13", "gapminder", "Lahman")
la.liste.des.paquets.a.installer <- c (paquets.a.installer.pour.pp, paquets.a.installer.pour.ss)
#install.packages (la.liste.des.paquets.a.installer, dep = T,
# lib = "/usr/local/lib/R/site-library",
# repos = "http://cran.univ-paris1.fr/")
|
9a57a15c0e55c7d3fdd54a9979e0783697ae808e | 0c7c74342fd5a89b06457bc123983ed26a054ff6 | /practice/day21secA (1).R | 7a1b386982a9758eecd3425866e30c0a9044b6d8 | [] | no_license | jessica7890/R-visualization- | d4b3ef02fc361c5281a2644f284afc08a6f77990 | c13cfbf215909c3074b766c5fa7df802f78667ba | refs/heads/master | 2022-09-10T04:46:42.856432 | 2020-05-26T21:17:45 | 2020-05-26T21:17:45 | 267,121,512 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,938 | r | day21secA (1).R | # ----Day21--Week 07------#
# More Maps, Shapefiles #
#-------------------------#
library(tidyverse)
library(maps)
#------ Data Preparation -----------#
world_map_data <- map_data("world")
library(gapminder)
country2007 <- gapminder %>%
filter(year==2007)
##############################
# 1. Nesting data with tidyr #
##############################
## What if we store the data a little differently, put all outline in single row next to state name?
## - problem: we have never put a complex object into a data cell before, but tibble can handle it
world_data <- world_map_data %>%
group_by(region) %>%
nest()
world_data$data[[2]] # outlines
test <- world_map_data %>%
filter(region=="Afghanistan")
head(world_data$data[[which(world_data$region=="Turkey")]])
?which
head(test)
head(world_data$data[[2]])
world_data$data[[2]]$long # looks like values are rounded, but they arn't
world_all <- left_join(world_data,country2007, by=c("region"="country"))
head(world_all)
## The gain here is that the storage size is much smaller without the redundancy of
## having the gapminder values repeated for each boundary outline.
?unnest
head(unnest(world_all))
head(unnest(world_all,cols="data"))
library(ggthemes)
lifeExpmap <- ggplot() +
geom_polygon(aes(x=long, y=lat,
fill=lifeExp, group=group),
data=unnest(world_all,cols=c(data)))+
coord_map(xlim=c(-180,180)) + # What's wrong with it?
# issue with the coord_map(), see https://stackoverflow.com/questions/30360830/map-in-ggplot2-visualization-displaying-bug/30463740#30463740
theme_map()+
theme(legend.position = c(0.05,0.1))
lifeExpmap
#######################################
# 2. Adding layer of labels to states #
#######################################
?sort
sort(unique(world_all$lifeExp))
world_lab <- world_all %>%
filter(lifeExp>81.702) %>%
unnest(data) %>% #"data", c(data)
summarise(life=round(lifeExp[1],1),#round(var,#of decimal points)
long=mean(range(long)),
lat=mean(range(lat)))
?range
world_lab
lifeExpmap+
geom_text(aes(x=long,y=lat,label=life),data=world_lab)
##################################################
# 3. Heatmap with approximate geographic layouts #
##################################################
## using statebins package
#install.packages("statebins")
library(statebins)
?statebins
?statebins_continuous
states_stats <- read.csv("http://kmaurer.github.io/documents/data/StateStatsBRFSS.csv")
# Downloaded from Dr. Maurer's github page.
str(states_stats)
# Needs state names with capitalized first letter or as postal abbreviation
states_stats$StateName <- str_to_title(states_stats$StateName)
# make binned state plot
statebins_continuous(states_stats,
state_col = "",
value_col = "")
# More examples:
# http://rstudio-pubs-static.s3.amazonaws.com/332155_761cb4672f7644d290084eca9c195ed5.html
|
bf26efd2f7c8ef99bf948131e675c8e08197d2e6 | 951c6a55c992858bc2f10e0744aedd0d190899b7 | /tests/testthat.R | 9f606e06a4f360a01c70454013fc14babf135678 | [] | no_license | Lucaweihs/RIM | 9820f217750cce98e983e6a1ef70a820ebd0c626 | 8957a8d612978e8816dffdcc14ed7d5ffc73e99e | refs/heads/master | 2020-12-26T04:49:21.429208 | 2017-11-11T23:30:15 | 2017-11-11T23:30:15 | 35,631,745 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50 | r | testthat.R | library(testthat)
library(RIM)
test_check("RIM")
|
5b529ef3602fd28aef8f7ef44939a1e206ab3f07 | 4b0e2bea643405f9a1f8799538c4c4ecf60e8013 | /Workshop/scripts/ep03-working-with-data.R | 7ea38d7926839a12d9589f66a246d6921cb59e1c | [] | no_license | sattwell/2020-09-21-intro-to-r-workshop | e3123255f4b2b6830c20cd8984c8566f6ec1334e | 17fead2f1706d31b9fbe3543b15cad7dc8617127 | refs/heads/master | 2022-12-22T15:21:06.224531 | 2020-09-25T02:53:42 | 2020-09-25T02:53:42 | 297,182,788 | 0 | 0 | null | 2020-09-20T23:36:07 | 2020-09-20T23:36:06 | null | UTF-8 | R | false | false | 6,052 | r | ep03-working-with-data.R | #####################
# MANIPULATING DATA #
# using #
# TIDYVERSE #
#####################
#
#
# Based on: https://datacarpentry.org/R-ecology-lesson/03-dplyr.html
# Data is available from the following link (we should already have it)
download.file(url = "https://ndownloader.figshare.com/files/2292169",
destfile = "data_raw/portal_data_joined.csv")
#---------------------
# Learning Objectives
#---------------------
# Describe the purpose of the dplyr and tidyr packages.
# Select certain columns in a data frame with the dplyr function select.
# Select certain rows in a data frame according to filtering conditions with the dplyr function filter .
# Link the output of one dplyr function to the input of another function with the ‘pipe’ operator %>%.
# Add new columns to a data frame that are functions of existing columns with mutate.
# Use the split-apply-combine concept for data analysis.
# Use summarize, group_by, and count to split a data frame into groups of observations, apply summary statistics for each group, and then combine the results.
# Describe the concept of a wide and a long table format and for which purpose those formats are useful.
# Describe what key-value pairs are.
# Reshape a data frame from long to wide format and back with the pivit_wider and pivit_longer commands from the tidyr package.
# Export a data frame to a .csv file.
#----------------------
#------------------
# Lets get started!
#------------------
install.packages("tidyverse")
library(tidyverse)
#dplyr and tidyr
#load the dataset
surveys <- read_csv("data_raw/portal_data_joined.csv")
#check structure
str(surveys)
#trying it out
#---------------------------
#test
#sample
#-------------------------
#-----------------------------------
# Selecting columns & filtering rows
#-----------------------------------
select(surveys, plot_id,species_id,weight)
select(surveys, -record_id, -species_id)
#filter for a particular year
filter(surveys, year==1995)
surveys_1995 <- filter(surveys, year==1995)
surveys2 <- filter(surveys, weight < 5 )
surveys_sml <- select(surveys2, species_id,sex, weight)
#combined the two
surveys_sml <-select(filter(surveys, weight < 5), species_id, sex, weight)
#-------
# Pipes
#-------
# The pipe --> %>%
#Shortcut --, Ctrl+ shift+ m or command + shift + m
surveys %>%
filter(weight <5) %>%
select(species_id, sex, weight)
#assigned to surveys_sml
surveys_sml <- surveys %>%
filter(weight <5) %>%
select(species_id, sex, weight)<-
#-----------
# CHALLENGE
#-----------
# Using pipes, subset the ```surveys``` data to include animals collected before 1995 and
# retain only the columns ```year```, ```sex```, and ```weight```.
surveys_1995 <- surveys %>%
filter(year < 1995) %>%
select(year, sex, weight)
#ordering your columns DOES matter
#--------
# Mutate
#--------
surveys%>%
mutate(weight_kg = weight/ 1000,
weight_lb =weight * 2.2)
surveys_weights <-surveys%>%
mutate(weight_kg = weight/ 1000,
weight_lb =weight * 2.2)
surveys%>%
mutate(weight_kg = weight/ 1000,
weight_lb =weight * 2.2)
head()
surveys%>%
mutate(weight_kg = weight/ 1000,
weight_lb =weight * 2.2)
tail()
surveys %>%
filter(!is.na(weight)) %>%
mutate(weight_kg= weight/ 1000) %>%
head()
filter(length !="")
#-----------
# CHALLENGE
#-----------
# Create a new data frame from the ```surveys``` data that meets the following criteria:
# contains only the ```species_id``` column and a new column called ```hindfoot_cm``` containing
# the ```hindfoot_length``` values converted to centimeters. In this hindfoot_cm column,
# there are no ```NA```s and all values are less than 3.
# Hint: think about how the commands should be ordered to produce this data frame!
surveys_new <-surveys %>%
filter(!is.na(hindfoot_length)) %>%
mutate(hindfoot_cm = hindfoot_length / 10) %>%
filter(hindfoot_cm < 3) %>%
select(species_id, hindfoot_cm)
#---------------------
# Split-apply-combine
#---------------------
#-----------
# CHALLENGE
#-----------
# 1. How many animals were caught in each ```plot_type``` surveyed?
# 2. Use ```group_by()``` and ```summarize()``` to find the mean, min, and max hindfoot length
# for each species (using ```species_id```). Also add the number of observations
# (hint: see ```?n```).
# 3. What was the heaviest animal measured in each year?
# Return the columns ```year```, ```genus```, ```species_id```, and ```weight```.
#-----------
# Reshaping
#-----------
#-----------
# CHALLENGE
#-----------
# 1. Spread the surveys data frame with year as columns, plot_id as rows,
# and the number of genera per plot as the values. You will need to summarize before reshaping,
# and use the function n_distinct() to get the number of unique genera within a particular chunk of data.
# It’s a powerful function! See ?n_distinct for more.
# 2. Now take that data frame and pivot_longer() it again, so each row is a unique plot_id by year combination.
# 3. The surveys data set has two measurement columns: hindfoot_length and weight.
# This makes it difficult to do things like look at the relationship between mean values of each
# measurement per year in different plot types. Let’s walk through a common solution for this type of problem.
# First, use pivot_longer() to create a dataset where we have a key column called measurement and a value column that
# takes on the value of either hindfoot_length or weight.
# Hint: You’ll need to specify which columns are being pivoted.
# 4. With this new data set, calculate the average of each measurement in each year for each different plot_type.
# Then pivot_wider() them into a data set with a column for hindfoot_length and weight.
# Hint: You only need to specify the key and value columns for pivot_wider().
#----------------
# Exporting data
#----------------
|
661a39e8a5d9ea9b7c6f0e9f7da1057f455bec66 | 50736499067f7ceba0955914c44f16f9c98d6413 | /AlkoholAnaliza/ASkripta.R | 278c0b54b123d5e5fe7c245fdce53663f5881340 | [] | no_license | NovakTesic/AnalizaR | 7c0cec8ad67afd69f8252c6904f8734e5e1d43ad | 595cb5658d60f62aa56c5892b12d3b5fe2b20d32 | refs/heads/master | 2022-04-03T13:31:27.863268 | 2020-02-17T08:41:06 | 2020-02-17T08:41:06 | 237,653,528 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,601 | r | ASkripta.R | #### OPŠTE INFORMACIJE O BAZI ####
# Baza podataka je preuzeta sa sledećeg linka:
# https://github.com/fivethirtyeight/data/tree/master/alcohol-consumption
# U datoj bazi možemo pronaći podatke o potrošnji alkohola u 193 zemlje tokom 2010. godine.
# Potrošnja alkohola je iskazana kroz četiri varijable:
# 1. prosečna potrošnja piva po glavi stanovnika,
# koja je izražena kroz broj konzumiranih limenki piva (cans of beer),
# u ovoj anlizi ova varijabla će nositi naziv potrošnja limenki piva;
# 2. prosečna potrošnja vina po glavi stanovnika,
# koja je izražena kroz broj konzumiranih čaša vina(glasses of wine),
# u ovoj anlizi ova varijabla će nositi naziv potrošnja čaša vina;
# 3. prosečna potrošnja žestokih pića po glavi stanovnika
# koja je izražena kroz broj konzumiranih čašica žestokih pića (shots of spirits),
# u ovoj anlizi ova varijabla će nositi naziv potrošnja čašicia žestokih pića:
# 4. prosečan unos čistog alkohola po glavi stanovnika, koji je izražen u litrima,
# i u ovoj analizi ova varijabla će nositi naziv unos čistog alkohola.
# Važno je napomenuti da su čaše, čašice i limenke samo standardizovane mere
# za uobičajeni način konzumiranja datih pića a ne pravi podaci o načinu njihovog konzumiranja.
# Primera radi ako je neko popio dve litre piva to je iskazano preko 4 limenke,
# bez obzira na to da li je to pivo u realnosti konzumirano putem krigle, flaše, ili limenke.
#### UČITAVANJE I UPOZVAVANJE SA KARATERISTIKAMA BAZE ####
# originalna baza
orig_baza <- read.csv("data/drinks.csv", stringsAsFactors = FALSE)
# baza za sređivanje
baza_n <- orig_baza
# Upoznavanje sa osnovnim karateristikama baze
ncol(baza_n)
nrow(baza_n)
tail(baza_n, 10)
head(baza_n, 10)
summary(baza_n)
str(baza_n)
##### SREĐIVANJE BAZE ####
# 1. Provera prisutnosti nedostajućih vrednosti.
is.na(baza_n) # klasičan način
sum(is.na(baza_n)) # pregledniji način, ako je vrednost nula onda ih nema
# Očigledno da nema nedostajućih vrednosti u NA formatu.
# Ali to ne znači da one stvarno ne postoje.
# Sve opservacije koje imaju vrednost nula,
# su potencijalno nedostajuće vrednosti ili rezultat lošeg merenja.
# Primera radi teško je zamisliti zemlju u kojoj stvarno nema nikakve potrošnje alkohola,
# i gde nijedan njen stanovnik ne konzumira ni kap piva, vina ili žestokih pića.
# Čak i kad bi postojala zabrana konzumiranja pića,
# to ne znači da bi se ona nužno poštovala u realnim okolnostima.
# Zbog toga smatram je potrebno eliminisati sve observacije tog tipa.
# 2. Proces eliminacije
baza1 <- replace(baza_n, baza_n == 0, NA)
baza <- na.omit(baza1)
summary(baza)
# Prvo su zamenjene sve 0 vrednosti sa NA vrednostima.
# Nakon toga je primenjena funkcija na.omit,
# koja eliminiše sve observacije koje imaju vrednos NA.
# Na ovaj način je eliminisatno 38 zemalja,
# sa potencijalno "problematičim vrednostima".
# Broj elimnisanih zemalja može delovati kao preobiman,
# ali ne treba zaboraviti da da je,
# ova baza obuhvatila skoro sve zemlje sveta i da samim tim
# imamo idalje prilično dobar uzorak,
# u kome zasigurno nema nedostajućih vrednosti.
#### ZANIMLJIVOSTI ####
# U ovom delu su istaktnuti određeni zanimlijvi podaci,
# koji nisu rezultat ozbiljne statističke analize,
# već prostog izvlačenja podataka iz baze.
# Pa samim tim nemaju status istraživačkog nalaza,
# već zanimlijve, a možda i korisne infomracije.
# Za sve podatke koji će u ovom delu biti istaknuti,
# se podrazumeva da je potrošnja računata po glavi stanovnika.
# 1. Gde je najviše konzumiran alkohol tokom 2010. godine,
# U Srbiji, Bugarskoj ili Rusiji?
baza[baza$country == "Serbia", ]
baza[baza$country == "Bulgaria", ]
baza[baza$country == "Russian Federation", ]
# Očigledno je najviše konzumiran u Rusiji (11.5 litara čistog alkohola),
# u kojoj dominira potrošnja čašica žestokih pića (326).
# Mada je zanimljiv podatak da je u Srbiji
# konzumirano znatno više limenki piva (283) i čaša vina (127),
# u odnosu na Rusiju (247/73) i Bugarsku (231/94).
# 2. U kojoj zemlji je najviše konzumirano pivo tokom 2010. godine?
baza[(which.max(baza$beer_servings)), c(1,2)]
# Odgovor je pomalo začuđujući, reč je o Namibiji.
# 3. U kojoj zemlji je najviše konzumirano vino tokom 2010. godine?
baza[(which.max(baza$wine_servings)), c(1,4)]
# Odgovor ne bi trebalo da nas čudi, reč je o Francuskoj koja je poznata po vinima.
# 4. U kojoj zemlji su najviše konzumirana žestoka pića tokom 2010. godine?
baza[(which.max(baza$spirit_servings)), c(1,3)]
# Odgovor isto može da bude začućujući, pošto je reč o Grenadi.
# 5. U kojoj zemlji je najviše konzumiran alkohol tokom 2010. godine.
baza[(which.max(baza$total_litres_of_pure_alcohol)), c(1,5)]
# Reč je o Belorusiji.Odgovor je verovatno očekivan.
# 6. Lista zemalja sa najmanje konzumiranim pivom tokom 2010. godine.
baza[baza$beer_servings == 1, c(1,2)]
# 7. Lista zemalja sa najmanje konzumiranim vinom tokom 2010.godine.
baza[baza$wine_servings == 1, c(1,4)]
# 8. Lista zemalja u kojoj su najmanje konzumirana žestoka pića tokom 2010. godine.
baza[baza$spirit_servings == 1, c(1,3)]
# 9. Za kraj možemo videti u kojoj zemlji je najmanje konzumiran alkohol tokom 2010. godine?
baza[(which.min(baza$total_litres_of_pure_alcohol)), c(1,5)]
# Najmanje je konzumiran na Komorima.
#### ISTRAŽIVAČKA PITANJA ####
# 1. Da li postoji statistički značajna veza između:
# - potrošnje: limenki piva i čaša vina
# - potrošnje: čašica žestokih pića i limenki piva
# - potrošnje: čaša vina i čašica žestokih pića
# 2. Koliko dobro mere potrošnje: limenki piva, čaša vina i čašica žestokih pića,
# predviđaju ukupan unos čistog alkohola.
# Za prvo istraživačko pitanje koristiće se korelacioni testovi,
# a za drugo metod višestruke regresije.
#### PROVERA NORMALNOSTI DISTRIBUCIJE ####
# Ali pre primene pomenutih statističkih tehnika,
# treba proveriti normalnost distribucija datih varijabli,
# radi odabira adekvatnih stastistničkih testova.
# To radimo pomoću Shapiro–Wilk testa.
shapiro.test(baza$beer_servings)
shapiro.test(baza$spirit_servings)
shapiro.test(baza$wine_servings)
shapiro.test(baza$total_litres_of_pure_alcohol)
# p vrednost za sve varijable je znanto manja od 0.01
# usled čega odbacujemo nultu hipotezu u korist alterativne,
# i dolazimo do zakjučka da date varijable nemaju normalnu distribuciju,
# pa je poželjno koristiti neparametraske tehnike tamo gde je to moguće.
## Histogrami ##
# Pomoću histograma možemo grafički
# predstaviti distibuciju datih varijabli,
# kao bismo imali stekli što bolji uvid o njima.
## Instalacija i pozivanje ggplot paketa ##
# Instalacija nije neophodna ako je paket već instaliran,
# zato je i data u vidu komentara a ne komande, u narednom redu:
# install.packages("ggplot2")
# Isti princip će važiti za svako instaliranje paketa u ovoj analizi.
# pozivanje paketa je obavezan korak
library(ggplot2)
# 1. Histogram potrošnje lmenki piva.
ggplot(baza, aes( x = baza$beer_servings))+
geom_histogram(aes(y = ..density..), bins = 12, colour = "white", fill = "grey75") +
geom_density(aes(y = ..density..), colour = "blueviolet") +
ggtitle("Potrošnja piva u 2010. godini") +
xlab("broj konzumiranih limenki piva")+
ylab("gustina")
# Ovde možemo zapaziti da je distribucija zakrivljena ulevo,
# iz čega možemo izvesti zaključak,
# da je u velikom broju zemalja potrošnja limenki piva niska.
# 2. Histogram potrošnje čaša vina.
ggplot(baza, aes(x = baza$wine_servings))+
geom_histogram(aes(y = ..density..), bins = 12, colour = "white", fill = "grey75") +
geom_density(aes(y = ..density..), colour = "darkred") +
ggtitle("Potrošnja vina u 2010. godini") +
xlab("broj konzumiranih čaša vina") +
ylab("gustina")
# Sličan je slučaj kao i sa pivom,
# samo što je u ovom slučaju zakrivljenje još intezivnije.
# Iz čega možemo zaključiti da je potrošnja čaša vina
# u još većm broju zemalja niska nego što je to slučaj sa pivom.
# 3. Histogram potrošnje čašica žestokih pića.
ggplot(baza, aes(x = baza$spirit_servings)) +
geom_histogram(aes(y = ..density..), bins=12, colour = "white", fill = "grey75") +
geom_density(aes(y =..density..), colour = "dodgerblue1") +
ggtitle("Potrošnja žestokih pića u 2010. godini") +
xlab("broj konzumiranih čašica žestokih pića") +
ylab("gustina")
# Isti slučaj kao i sa vinom i pivom,
# distribucija je zakrivljena ulevo
# što znači da u većini zemalja imamo,
# nisku potrošnu čašica žestokog pića.
# 4. Histogram unosa čistog alkohola.
ggplot(baza, aes(x = baza$total_litres_of_pure_alcohol)) +
geom_histogram(aes(y = ..density..), bins = 12, colour = "white", fill = "grey75") +
geom_density(aes(y = ..density..), colour = "slateblue") +
ggtitle("Unos čistog alkohola u 2010. godini") +
xlab("čist alkohol izražen u litrima") +
ylab("gustina")
# Zakrivljenje je znatno manje izraženo nego u predhodnim varijablama.
# U ovom slučaju imamo velki broj,
# kako zemalja sa niskim vrednostima unosa čistog alkohola,
# tako i zemalja sa srednjim vrednostima (opseg 5-10),
# dok je najmanji broj zemalja sa izrazito visokoim vrednostima ove varijalbe.
##### KORELACIJA #####
pvs <- data.frame(baza$beer_servings, baza$spirit_servings, baza$wine_servings)
# Matrica za korelaciju koja je sastavljena od
# varijabli nad kojima želimo da primenimo korelacione testove.
# Instalacija i pozivanje paketa za korelaciju
# install.packages("Hmisc")
library("Hmisc")
## Korelacioni testovi nad matricom ##
rcorr(as.matrix(pvs), type = c("spearman"))
n <- rcorr(as.matrix(pvs), type = c("spearman"))
print(n$P, digits = 15) # tačniji prikaz p vrednosti
## Nalazi korelacionih testova ##
# 1. Veza između potrošnje limenki piva i potrošnje čaša vina,
# istražena je pomoću Spirmanovog ro koeficijenta korelacije,
# izračunata je pozitivna korelacija srednje jačine između dve promenjive,
# r = 0.62, n = 155, p < 0,01
# a to znači da sa porastom potrošnje limenki piva,
# raste i potrošnja čaša vina.
# 2. Veza između potrošnje čašica žestokog pića i potrošnje limenki piva
# istražena je pomoću Spirmanovog ro koeficijenta korelacije,
# izračunata je pozitivna korelacija srednje jačine između dve promenjive,
# r = 0.50, n = 155, p < 0,01
# a to znači da sa porastom potrošnje limenki piva,
# raste i potrošnja čašica žestokih pića.
# 3. Veza između čaša vina i čašica žestokog pića
# istražena je pomoću Spirmanovog ro koeficijenta korelacije,
# izračunata je pozitivna korelacija srednje jačine između dve promenjive,
# r = 0.39, n = 155, p < 0,01
# a to znači da sa porastom potrošnje čaša vina,
# raste i potrošnja čašica žestokih pića.
## Vizualizacija korelacije u ggplot-u (dijagrami raspršenosti) ##
# Instalacija i pozivanje ggplot-a
# install.packages("ggplot2")
library(ggplot2)
# Prikaz dijagrama
# 1. Dijagram korelacije potrošnje čaša vina i limenki piva
ggplot(baza, aes(x = baza$beer_servings, y = baza$wine_servings)) +
geom_point(size = 3, shape = 2, colour = "blue") +
ggtitle("Korelacija potrošnje limenki piva i čaša vina") +
xlab("potrošnja limenki piva") +
ylab("potrošnja čaša vina")
# 2. Dijagram korelacije potrošnje čašica žestokih pića i limenki piva
ggplot(baza, aes(x = baza$beer_servings, y = baza$spirit_servings)) +
geom_point(size = 3, shape = 1, colour = "red3")+
ggtitle("Korelacija potrošnje limenki piva i čašica žestokih pića") +
xlab("potrošnja limenki piva") +
ylab("potrošnja čašica žestokih pića")
# 3. Dijagram korelacije potrošnje čaša vina i čašica žestokih pića
ggplot(baza, aes(x = baza$wine_servings, y = baza$spirit_servings)) +
geom_point(size = 3, shape = 0, colour = "purple") +
ggtitle("Korelacija potrošnje čaša vina i čašica žestokih pića") +
xlab("potrošnja čaša vina") +
ylab("potrošnja čašica žestokih pića")
## Vizualizacija u ggcorrplot-u (kvadrati i krugovi) ##
# Instaliranje i pozivanje datog paketa paketa
# install.packages("ggcorrplot")
library(ggcorrplot)
#Sređivanje imena kolona i matrica za korelaciju
names(pvs) = c("potrošnja krigli piva", "potrošnja čašica žestokih pića", "potrošnja čaša vina")
viz <- cor(pvs, method = "spearman")
#Vizalizacija korelacije
ggcorrplot(viz, lab = TRUE) # Prvi način, kvadrat
ggcorrplot(viz, method = "circle") # drugi način krug
#### REGRESIJA ####
# Korelacioni testovi predstavljaju
# prvi korak za izradu,regresionih modela
# Zato prvo treba napraviti korelacionu matricu
# a nakon toga sprovesti korelacione testove.
reg <- data.frame(baza$total_litres_of_pure_alcohol, baza$beer_servings, baza$wine_servings, baza$spirit_servings)
# Instalacija i pozivanje paketa za korelaciju
# install.packages("Hmisc")
library("Hmisc")
## Korelacioni testovi ##
rcorr(as.matrix(reg), type = c("spearman"))
m <-rcorr(as.matrix(reg), type = c("spearman"))
print(m$P, digits = 5)
## Nalazi ##
# Spirmanov ro koeficijent korelacije,
# je pokazao da postoji pozitivna korelacija između varijable
# ukupnog unosa unosa čistog alkohola i svih drugih varijabli potrošnje alkohola.
# Najjača je korelacija sa potrošnjom limenki piva (r = 0.85).
# Dok je sa potrošnjom čaša vina(r = 0.70)
# i čašica žestokog pića(r = 0.66) ona umerene jačine.
# Iz svega pomenutog možemo izvesti zaključak
# da sa porastom potrošnje limenki piva, čaša vina i čašica žestokih pića,
# raste i ukupan unos čistog alkohola.
# Ovakav nalaz je logičan i očekivan.
# Sve pomenute veze su statistički značajne p < 0.01
# Iz ovakvih nalaza možemo zaključiti da je najsvrsihsodnije
# napraviti tri regresiona modela: jedan sa svim varijablama,
# drugi sa pivom i vinom i treći samo sa pivom.
# Izrada regresionih modela:
# 1. Prvi model, sve tri varijable:
Model1 <- lm(baza$total_litres_of_pure_alcohol ~ baza$wine_servings + baza$spirit_servings + baza$beer_servings )
summary(Model1)
# Model je odličan, pošto objašnjava 88% varjabiliteta ukupnog unosa čistog alkohla.
# r^2 = 0.88, F = 385, p < 0.01 (za ceo model i sve koeficijente)
# 2. Drugi model, bez žestokih pića:
Model2 <- lm (baza$total_litres_of_pure_alcohol ~ baza$wine_servings + baza$beer_servings)
summary(Model2)
# I ovaj model je prilično dobar pošto objašnjava 75% varjabiliteta
# ukupnog unosa čistog alkholoa, sa dve varijable
# r^2 = 0.75, F = 234.5, p < 0.01 (za ceo model i sve koeficijente)
# 2.1. Vizualizacija drugog modela
ggplot(Model2,aes(y=baza$total_litres_of_pure_alcohol,x=baza$beer_servings, color=baza$wine_servings)) +
geom_point(size = 4) +
stat_smooth(method = "lm", se = FALSE, colour = "red", size = 1.3 )+
ggtitle("Model broj 2") +
xlab("potrošnja limenki piva") +
ylab("ukupan unos čistog alkohola") +
labs(color = "potrošnja čaša vina")
# 3. Treći model, samo limenke piva:
Model3 <- lm (baza$total_litres_of_pure_alcohol ~ baza$beer_servings)
summary(Model3)
# Ovaj model je takođe veoma dobar pošto objašnjava 66 % varjabiliteta
# ukupnog unosa čistog alkohola sa samo jednom varijablom
# r^2 = 0.66, F = 304.9, p < 0.01 (za ceo model i koeficijente)
# 3.1. Vizualizacija trećeg modela
ggplot(Model3, aes(y = baza$total_litres_of_pure_alcohol, x = baza$beer_servings)) +
geom_point(size = 3, color = "azure4" )+
stat_smooth(method = "lm",se = FALSE) +
ggtitle("Model broj 3") +
xlab("potrošnja limenki piva") +
ylab("ukupna unos čistog alkohola")
# Zaključak
# Sva tri modela su odlična ali je prvi ipak najbolji,
# jer objašnjava najveći procenat varijabiteta ukupnog unosa čistog alkohola.
# Ovaj model pokazuje da su
# varijable potrošnje čaša vina, limenki piva i čašica žestokog pića,
# odlčni prediktori vrednosti varijable ukupnog unosa čistog alkohola.
# Takav nalaz je očekivan pošto je reč o najpopularnijim vrstama pića.
#### ZAVRŠETAK ANALIZE ####
# Analizu sproveo: Novak Tešić
# GitHub profil: https://github.com/NovakTesic |
3238d3a32705fe5b7e760a6b93b0931b14df56fa | 250647d597c52cd832aeed3452f66af2146fa86f | /r_scripts_NoSNP/ada_cross_validation_noSNP.R | 4a15566c5a92c02f945c30978357378dc5a05c2c | [
"BSD-2-Clause"
] | permissive | zzygyx9119/somaticseq | 7fa79ec07db17d5b8bc1153e75c9703a952c8328 | 52e5f1d7cb5c22095cde13cfa56fd4f0bf79d412 | refs/heads/master | 2021-05-15T09:40:28.859946 | 2017-10-09T22:25:22 | 2017-10-09T22:25:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,829 | r | ada_cross_validation_noSNP.R | ### To start, set the input filenames in "Main (entry point)" section.
require("ada")
args <- commandArgs(TRUE)
filename = args[1]
numTrueCalls = as.integer( args[2] )
##### Main (entry point)
# Train and test filenames
train_filename = filename
test_filename = filename
#train_filename <- ""
test_filename <- ""
# If one filename is set, data is splitted to test/train randomly
data_filename <- train_filename
train_filename = ""
test_filename = ""
print(test_filename)
print(train_filename)
if (data_filename != "") {
data <- read.table(data_filename, header=TRUE)
index <- 1:nrow(data)
train_index <- sample(index, trunc(length(index)/2))
test_data_ <- data[-train_index,]
train_data_ <- data[train_index,]
}
if (test_filename != "") {
test_data_ = read.table(test_filename, header=TRUE)
}
if (train_filename != "") {
train_data_ = read.table(train_filename, header=TRUE)
}
train_data <- train_data_
test_data <- test_data_
if (FALSE) {
print("Updating missing values...")
train_data <- set_missing_values(train_data)
test_data <- set_missing_values(test_data)
}else {
train_data <- train_data[,-c(1, 2, 3, 4, 5)]
test_data <- test_data[,-c(1, 2, 3, 4, 5)]
}
test_data[,'TrueVariant_or_False'] <- NULL
if (FALSE) {
print("Updating features...")
train_data <- update_features(train_data, 0.3)
test_data <- update_features(test_data, 0.3)
}
train_data[,'REF'] <- NULL
train_data[,'ALT'] <- NULL
test_data[,'REF'] <- NULL
test_data[,'ALT'] <- NULL
# Do not use dbsnp information
train_data[,'if_dbsnp'] <- NULL
train_data[,'BAF'] <- NULL
train_data[,'COMMON'] <- NULL
train_data[,'G5'] <- NULL
train_data[,'G5A'] <- NULL
test_data[,'if_dbsnp'] <- NULL
test_data[,'BAF'] <- NULL
test_data[,'COMMON'] <- NULL
test_data[,'G5'] <- NULL
test_data[,'G5A'] <- NULL
if (FALSE) {
model_formula <- as.formula( TrueVariant_or_False ~
((if_MuTect_ + if_VarScan2_ + if_JointSNVMix2_ + if_SomaticSniper_ +
if_MuTect_if_JointSNVMix2 + if_MuTect_if_SomaticSniper + if_JointSNVMix2_if_SomaticSniper +
if_MuTect_if_JointSNVMix2_if_SomaticSniper) +
(SNVMix2_Score + Sniper_Score +
if_dbsnp + BAF + COMMON + G5 + G5A + # Probably no need for G5/G5A
N_VAQ +
T_VAQ + T_MQ0 + T_MLEAF +
N_StrandBias + N_BaseQBias + N_MapQBias + N_TailDistBias +
T_StrandBias + T_BaseQBias + T_MapQBias + T_TailDistBias +
N_AMQ_REF + N_AMQ_ALT + N_BQ_REF + N_BQ_ALT + N_MQ +
T_AMQ_REF + T_AMQ_ALT + T_BQ_REF + T_BQ_ALT + T_MQ +
#N_DP_large +
T_DP_small + T_DP_large +
N_ALT_FREQ_FOR + N_ALT_FREQ_REV + N_ALT_STRAND_BIAS +
T_ALT_FREQ_FOR + T_ALT_FREQ_REV + T_ALT_STRAND_BIAS )))
} else {
model_formula <- as.formula(TrueVariant_or_False ~ .)
}
print("Fitting model...")
ada.model <- ada(model_formula, data = train_data, iter = 500)
print(ada.model)
#pdf("varplot.pdf")
#varplot(ada.model)
#dev.off()
#pdf("iterplot.pdf")
#plot(ada.model, TRUE, TRUE)
#dev.off()
print("Computing prediction values...")
ada.pred <- predict(ada.model, newdata = test_data, type="both")
# Print results out:
if (TRUE) {
for (threshold in seq(0,1, .01)) {
cat("threshold: ", threshold, "\t")
ada_predicted_call <- ada.pred$prob[,2] > threshold
# Sensitivity
# numTrueCalls <- 14194 # stage4 indel
# numTrueCalls <- 8292 # stage3 indel
# numTrueCalls <- 16268 # stage4 snv
# numTrueCalls <- 7903 # stage3 snv
# numTrueCalls <- 4332 # stage2 snv
# numTrueCalls <- 3537 # stage1 snv
num_true_positives_predicted <- sum(ada_predicted_call[ada_predicted_call == test_data_[,'TrueVariant_or_False']])
num_all_positive_predictied <- sum(ada_predicted_call)
Sensitivity <- num_true_positives_predicted / numTrueCalls
cat("Recall: ", Sensitivity , "\t")
# Specificity
Specificity <- num_true_positives_predicted / num_all_positive_predictied
cat("Precision: ", Specificity, "\t")
cat("DREAM_Accuracy: ", (Specificity + Sensitivity)/2, "\t")
cat("F1: ", ( 2 * num_true_positives_predicted / ( numTrueCalls + num_all_positive_predictied ) ), "\n")
}
}
# Write predicted values to output file
if(FALSE){
test_data_output <- cbind(test_data_, SCORE = ada.pred$prob[,2])
write.table(test_data_output, row.names = FALSE, sep="\t", na = "nan", file = paste( "ADA", filename, sep = "."), quote=FALSE)
}
#source("script/log_regression.R")
|
57182ddcf1c5c464469009d6858b0202ffa7ee35 | d0a9e3485a92c02d7407a91ffe063705572f1576 | /R/atoms.R | 9e5a3310bb282cc1ebdb65a26b27533bb5bbba31 | [
"MIT"
] | permissive | dobinyim/rUMLS | d28761301a3c7f34cecfb9352fe9631458a2c05f | 0ad7989528597cdc52019d1e9776ad1b24537d86 | refs/heads/master | 2023-04-10T15:36:58.605962 | 2018-02-26T21:48:06 | 2018-02-26T21:48:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,819 | r | atoms.R | get_atom_rels <- function(AUI){
}
#' Get familial relationships for a given Atom.
#'
#' @param AUI The AUI of interest.
#' @param type The type of familial relationship. This can be one of \code{ancestors, descendants, parents, children}. See the possible endpoints from the \href{https://documentation.uts.nlm.nih.gov/rest/atoms/}{UMLS}.
#' @return A list of results. These are of UMLS class \code{Atom}.
#' @export
#' @examples
#' # Get parents of atom A8345234
#' parents <- get_atom_parents("A8345234")
get_atom_family <- function(AUI, type) {
exhaust_search(FUN = get_atom_family_page, PARSER = parse_results, AUI = AUI, type = type)
}
get_atom_family_page <- function(AUI, type, pageNumber = 1, pageSize = 25) {
params = list(ticket = get_service_ticket(get_TGT()), pageNumber = pageNumber, pageSize = pageSize)
r <- GET(restBaseURL, path = paste0("rest/content/current/AUI/", AUI, "/", type), query = params)
r
}
#' @rdname get_atom_family
#' @export
get_atom_parents <- function(AUI){
get_atom_family(AUI, "parents")
}
#' @rdname get_atom_family
#' @export
get_atom_children <- function(AUI){
get_atom_family(AUI, "children")
}
#' @rdname get_atom_family
#' @export
get_atom_ancestors <- function(AUI){
get_atom_family(AUI, "ancestors")
}
#' @rdname get_atom_family
#' @export
get_atom_descendants <- function(AUI){
get_atom_family(AUI, "descendants")
}
#' Get information about a given atom.
#' @param AUI The AUI of interest.
#' @return Information about the atom. This is of UMLS class \code{Atom}.
#' @export
#' @examples
#' # Get information about atom A8345234
#' info <- get_atom_info("A8345234")
get_atom_info <- function(AUI){
params = list(ticket = get_service_ticket(get_TGT()))
r <- GET(restBaseURL, path = paste0("rest/content/current/AUI/", AUI), query = params)
r
}
|
0ad67df171050da2a5a33133f184891c3d10931a | 6e800ec948de2d96bd11288a3ba06611d0395865 | /R/select_k_features_FP.R | 8ba64081961b4047037274b73cb3b55b773a2540 | [] | no_license | xf15/NDTr | 874945172b306c1e4ac92bce4fdc39d8b73880b0 | 79a3b269cbe518cd4a3fcca95861e65755e7f40f | refs/heads/master | 2021-09-26T14:45:18.857285 | 2018-10-30T23:24:16 | 2018-10-30T23:24:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,218 | r | select_k_features_FP.R | #' A feature preprocessor (FP) that reduces the data to the best k features
#'
#' This feature prerpocessor object find the k most selective features using an ANOVA on the training data.
#' The proprocessor then eleminates all other features in both the training and test sets. This preprocessor
#' can also eliminate the best k features.
#' This object uses \href{https://cran.r-project.org/web/packages/R6/vignettes/Introduction.html}{R6 package}
#'
#'
#' @section select_k_features_FP:
#'
#' \describe{
#' \item{\code{select_k_features_FP$new(num_site_to_use, num_sites_to_exclude)}}{
#' This constructor uses num_site_to_use of the best sites as found via an ANOVA.
#' Additionally, it can eliminate the best num_sites_to_exclude to use sites again
#' using an ANOVA. If both num_site_to_use and num_sites_to_exclude are set, then
#' num_sites_to_exclude will first be eliminated and then the next num_site_to_use will
#' be kept. If successful, will return a new \code{select_k_features_FP} object.
#' }}
#'
#' @section Methods
#' \describe{
#' \item{\code{preprocess_data}}{
#' Like all FP objects, this method finds parameters on the training set and then applies them
#' to the training and test sets. For select_k_features_FP, the parameters found on the training set are
#' the sites that are the most selective, and these sites are then kept and/or eliminated on training and
#' test sets.
#' }}
#'
#'
#'
#' @import R6
#' @export
select_k_features_FP <- R6Class("select_k_features_FP",
public = list(
# properties
num_site_to_use = NA,
num_sites_to_exclude = NA,
# constructor
initialize = function(num_site_to_use, num_sites_to_exclude) {
if (!missing(num_site_to_use)) {
self$num_site_to_use <- num_site_to_use
}
if (!missing(num_sites_to_exclude)) {
self$num_sites_to_exclude <- num_sites_to_exclude
}
},
# methods
preprocess_data = function(train_data, test_data){
if (is.na(self$num_site_to_use) && is.na(self$num_sites_to_exclude)) {
stop('Either num_site_to_use or num_sites_to_exclude must be set prior to calling the preprocess_data method')
}
# test the the ANOVA function that I will write works
# all_pvals <- NULL
# for (iSite in 1:(ncol(train_data) - 1)){
# curr_data <- train_data[, iSite]
# all_pvals[iSite] <- anova(lm(curr_data ~ train_data$labels))$Pr[1]
# }
# an alternative way - still is as slow
# get_anova_pvals <- function(iSite) {
# anova(lm(train_data[, iSite] ~ train_data$labels))$Pr[1]
# }
# all_pvals <- sapply(grep("site", names(train_data)), get_anova_pvals)
# writing the ANOVA function myself to speed it up (as I did in MATLAB)
# get the ANOVA p-values for all sites...
num_points_in_each_group <- train_data %>% group_by(labels) %>% summarize(n = n())
num_sites <- dim(train_data)[2] - 1
num_groups <- dim(num_points_in_each_group)[1] # the number of classes
# group_means <- select(aggregate(train_data[, 1:num_sites], list(train_data$labels), mean), starts_with("site)) # slowest part of the code...
# another option that is just as fast...
# group_means <- train_data %>% group_by(labels) %>% summarise_each(funs(mean)) %>% select(starts_with("site"))
# marginally faster way to compute the group means (might have more of a speed up if more sites are used)
split_data <- split(train_data[, 1:num_sites], train_data$labels)
group_means <- t(sapply(split_data, function(one_group_data) apply(one_group_data, 2, mean)))
MSS <- apply(sweep(scale(group_means, scale = FALSE)^2, 1, num_points_in_each_group$n, FUN = "*"), 2, sum)
TSS <- apply(scale(select(train_data, -labels), scale = FALSE)^2, 2, sum)
SSE <- TSS - MSS # residual SS = total SS + model SS
between_deg_free <- num_groups - 1
within_deg_free <- dim(train_data)[1] - num_groups
f_stats <- (MSS/between_deg_free)/(SSE/within_deg_free)
all_pvals <- pf(f_stats, df1 = between_deg_free, df2 = within_deg_free, lower.tail = FALSE)
# find the sites with the k smallest p-values
sorted_data <- sort(all_pvals, index.return = TRUE)
sites_to_use <- sorted_data$ix
# if excluding selective sites, first remove these num_sites_to_exclude sites
# (before using only the self$num_sites_to_exclude)
if (!is.na(self$num_sites_to_exclude)) {
sites_to_use <- sites_to_use[(self$num_sites_to_exclude + 1):num_sites]
}
# use only the num_site_to_use most selective sites
if (!is.na(self$num_site_to_use)) {
sites_to_use <- sites_to_use[1:self$num_site_to_use]
}
# return a list with the results
processed_data <- list(train_data = cbind(train_data[sites_to_use], labels = train_data$labels),
test_data = cbind(test_data[sites_to_use], labels = test_data$labels, time = test_data$time))
return(processed_data)
} # end preprocess_data
) # end public properties/methods
) # end class |
99b02830b129b4d1c4cdc5bdc11dd5366a5d84cd | b3f0fbf6b1ed95ab6013bba25d1a98b0296d5f9b | /man/tpf.Rd | b5573c8f19bceca083f22fdaed0eb0c31b7b5530 | [] | no_license | bdshaff/conversionpath | 346a090bf1e60275213dea9061e928e9c9725fda | 20b4bd2b27929f2dfa41edd0cc76196ac35cf232 | refs/heads/master | 2023-06-28T16:24:35.967347 | 2021-08-06T17:12:02 | 2021-08-06T17:12:02 | 387,070,617 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 316 | rd | tpf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tpf.R
\name{tpf}
\alias{tpf}
\title{Touch-point Frequency per Path}
\usage{
tpf(paths, touchpoint)
}
\arguments{
\item{paths}{paths}
\item{touchpoint}{touchpoint}
}
\value{
numeric vector
}
\description{
Touch-point Frequency per Path
}
|
37c6176f04b63d6dae0742ecc00383a936be4394 | 0e2c5c23478b065d59c06576b7bf6afed950cd92 | /auxilliary.R | 784bc265d575450bc33b64308069a6f762228d72 | [] | no_license | chandarb/quadratic_voting | 9d415ef1d3efbd92ded829b82da129e383cb1859 | 13663166b6b0f2d24f03052a1dc57fb9893c79de | refs/heads/master | 2021-01-21T17:10:15.579280 | 2018-01-29T08:24:39 | 2018-01-29T08:24:39 | 91,938,338 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,492 | r | auxilliary.R | ### Auxilliary functions
# Utility function given v,u, and G
util.fn = function(v,u,G){
return(u*(1-G(-v))-v^2)
}
# first order condition for utility function
foc = function(v, u, g){
dens = g(-v)
dens[is.na(dens)] = 10^-12
return(v - (u * dens / 2))
}
# solves first order condition to get optimal vote level
u.root = function(x, lower, upper, g){
vg1 = uniroot(foc, c(lower, upper), u=x, g=g)$root
return(vg1)
}
# bisection method to solve for roots
# allows vectorized root solving
# see http://r.789695.n4.nabble.com/vectorized-uni-root-td4648920.html
bisection <- function(f, lower, upper, ..., numiter =100, tolerance =
.Machine$double.eps^0.25 ) {
stopifnot(length(lower) == length(upper))
flower <- f(lower, ...); fupper <- f(upper, ...)
for (n in 1:numiter) {
mid <- (lower+upper)/2
fmid <- f(mid, ...)
if (all(abs(fmid) < tolerance) & (n > 1)) break
samesign <- ((fmid<0)&(flower<0))|((fmid>=0)&(flower>=0))
lower <- ifelse(samesign, mid, lower )
flower <- ifelse(samesign, fmid, flower )
upper <- ifelse(!samesign, mid, upper )
fupper <- ifelse(!samesign, fmid, fupper )
}
return(list( mid=mid, fmid=fmid, lower=lower, upper=upper,
flower=flower, fupper=fupper, n=n ))
}
# vectorized method to interpolate votes for the sample utility matrix
# assigns same vote as nearest utility value in the utility grid
sample_votes = function(ugrid, vgrid_0, sample_u, N, dc){
a = ugrid[1]
n = length(ugrid)
b = ugrid[n]
sl = (n - 1) / (b - a)
# linear map to index in utility grid
cst = 1 - (sl * a)
samp_u = as.vector(sample_u)
# apply linear map to values in sample_u to get the corresponding utility index
# in ugrid
inds = samp_u * sl + cst
# find the closest utility value in the grid for each sample utility value
inds = round(inds)
# have to fix cases around dc
if (!is.na(dc)){
# find index for discontinuity
dc_ind = dc * sl + cst
# closest extremist in grid
dc_indr = floor(dc_ind)
# some extremists were misclassified
if (dc_ind - dc_indr > .5){
# rounded to moderate but should be extremist
inds[(inds==(dc_indr+1)) & (samp_u <= dc)] = dc_indr
}
# some moderates were misclassified
else
# rounded to extremist but should be moderate
inds[(inds==dc_indr) & (samp_u > dc)] = dc_indr + 1
}
inds[inds < 1] = 1
inds[inds > n] = n
return(matrix(vgrid_0[inds], ncol=N-1))
} |
86c049cc3b0b2c751616321d5a15b7445af29460 | 5c7cac880f610051afd2b23c4a60409269538cf8 | /man/calcul_ratio.Rd | b2279c0388817fd5bc3390f062b74afd7cf77dcb | [] | no_license | dominiqueemmanuel/kmino.typologie | 4c954e92691ec6a51afbc34c6581652518a33f6f | 8fe1c70ab615586b287fe56434a325e5594d6071 | refs/heads/master | 2021-01-19T19:47:11.593086 | 2017-04-16T22:13:09 | 2017-04-16T22:13:09 | 88,445,497 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 404 | rd | calcul_ratio.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcul_ratio.r
\name{calcul_ratio}
\alias{calcul_ratio}
\title{Une fonction permettant de calculer des ratios par rapport à des sur-populations.}
\usage{
calcul_ratio(data, var, by, new_name = NULL, ...)
}
\description{
Une fonction permettant de calculer des ratios par rapport à des sur-populations.
}
\examples{
#Rien
}
|
87a89075bca50205f1b6be021cba0cecf467b899 | 3fd7571c606b6e467a1bd750282b1f994b9094dc | /test_get_report.R | 5a182f47b3a07b103edaf9c9ae796cdd83aea33c | [] | no_license | phuong-ha/gambia | f304c2ecd9be944c6f2800da06ed47a381611439 | ec0ccc5bed0910de080a60753bbd7e10d80b9140 | refs/heads/master | 2020-03-12T08:21:13.959893 | 2018-04-22T02:59:13 | 2018-04-22T02:59:13 | 130,525,914 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,006 | r | test_get_report.R | ##START_HERE
#* @get /getReport
#* @html
getReport <- function(site, report_id, filename, params) {
if (missing(site)) {
stop("argument 'site' is missing, with no default", call. = FALSE)
}
if (missing(report_id)) {
stop("argument 'report_id' is missing, with no default", call. = FALSE)
}
if (missing(filename)) filename <- "report.html"
if (missing(params)) params <- list()
create_report_url <- function(site, report_id) {
paste0(RTA::get_project_url(site), "/markdown/", report_id, "/rmd_files/mdscript.Rmd")
}
rmd <- create_report_url(site = site, report_id = report_id)
temp_report <- file.path(tempdir(), "report1.Rmd")
curl::curl_download(rmd, temp_report)
output_file <- rmarkdown::render(
temp_report,
output_file = "/home/phuongha/MEGAsync/RTA/Analytic/gambia/report.html",
envir = new.env(parent = globalenv())
)
readBin(output_file, "raw", file.info(output_file)$size)
}
##END_HERE
|
fec763d1499e0ce499c588a01bb9ffefd63830e4 | 691bfa00e6fea304710d6becea1b57989b591210 | /tests/testthat.R | f2ec6019f601bafd958a9889543ccce388d89077 | [] | no_license | jepusto/scdhlm | e85c665b80e7173cfba76e248caee33a851af89d | 0da6cdc94a775654592662e63d99634406564ac4 | refs/heads/main | 2023-08-21T16:36:21.010293 | 2023-08-04T21:13:54 | 2023-08-04T21:13:54 | 15,191,793 | 6 | 1 | null | 2023-03-29T12:49:36 | 2013-12-14T19:47:17 | R | UTF-8 | R | false | false | 56 | r | testthat.R | library(testthat)
library(scdhlm)
test_check("scdhlm")
|
aae8e6f977ba70c252889745e5a1046263fa9950 | 3fa5d2b705a1078b0fe099ff15a41955a18e81b6 | /RScripts/siham/test2.R | 113858b3dfc9a7367868f02325b342430409834e | [] | no_license | CodesByMass/Rambam-Hospital_EDA | 4eb61b464877e1cd52599cc637c3bed356714254 | eaf19646ebcbd3fa5a7776fdd6da7263955692b1 | refs/heads/master | 2020-12-20T00:59:43.111711 | 2020-04-26T22:36:59 | 2020-04-26T22:36:59 | 235,907,005 | 0 | 0 | null | 2020-01-23T23:30:16 | 2020-01-23T23:30:08 | null | UTF-8 | R | false | false | 1,871 | r | test2.R |
#### porcentage des patients passée par les argances par rapport à leurs age ###
library(dbConnect)
con = dbConnect(MySQL(),dbname='hopital',user='root',password='Racheletmoi2',host='localhost')
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1'")
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years<11")
r11<-(rq$`COUNT(*)`*100)/r
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years>=11 AND age_years<21")
r21<-(rq$`COUNT(*)`*100)/r
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years>=21 AND age_years<32")
r32<-(rq$`COUNT(*)`*100)/r
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years>=32 AND age_years<43")
r43<-(rq$`COUNT(*)`*100)/r
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years>=43 AND age_years<54")
r54<-(rq$`COUNT(*)`*100)/r
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years>=54 AND age_years<65")
r65<-(rq$`COUNT(*)`*100)/r
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years>=65 AND age_years<76")
r76<-(rq$`COUNT(*)`*100)/r
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years>=76 AND age_years<87")
r87<-(rq$`COUNT(*)`*100)/r
rq=dbGetQuery(con,"SELECT COUNT(*) FROM visits WHERE entry_group='1' AND
age_years>=87 AND age_years<100")
r100<-(rq$`COUNT(*)`*100)/r
H<-c(r11$`COUNT(*)`,r21$`COUNT(*)`,r32$`COUNT(*)`,r43$`COUNT(*)`,r54$`COUNT(*)`,r65$`COUNT(*)`,r76$`COUNT(*)`,r87$`COUNT(*)`,r100$`COUNT(*)`)
M<-c("0-10","11-21","22-32","33-43","44-54","55-65","66-76","77-87","88-100")
pie(H,labels = paste(M,"\n",round(prop.table(H)*100,1),"%") , radius =1.08,cex=0.8,main = " porcentage des patients passée par les urgances \n par tranche d'âge ")
on.exit(dbDisconnect(con))
|
c743c3848009b4b5ad58199cda5c331928350a82 | 258d1e7a20cb697f8521101fb8720182f940bad3 | /Vergence/Transientbatchsave.R | 7b83c88d2e54754278faca22a0b3c1d6e9ff7f74 | [] | no_license | AdamPallus/NPH-Analysis | ea1122d5f33725cfbd82e07b98767f766d862fae | 59eadccce9118b37a5c9cd1087238885ec894f96 | refs/heads/master | 2021-01-19T07:06:12.515171 | 2018-05-11T21:59:00 | 2018-05-11T21:59:00 | 43,463,190 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,601 | r | Transientbatchsave.R |
for (sac in 2:nsac){
ga<- ggplot(filter(zz,sacnum==goodsacs[sac]))+
# geom_area(aes(time,sdf),alpha=1/10)+
geom_line(aes(counter,(lev-rev)-100),color='darkblue',alpha=1)+
geom_line(aes(counter,(lev-rev)),color='green',alpha=1)+
geom_line(aes(counter,enhance.velocity-100),size=2,color='darkblue')+
geom_line(aes(counter,(lep-rep)*5-100),color='darkgreen')+
geom_line(aes(counter,lev-lag(rev,4)),color='red',linetype=2)+
geom_line(aes(counter,lag(lev,4)-rev),color='blue',linetype=2)+
geom_line(aes(counter,lev-lag(rev,3)),color='red',linetype=3)+
geom_line(aes(counter,lag(lev,3)-rev),color='blue',linetype=3)+
geom_line(aes(counter,lev-lag(rev,2)),color='red',linetype=4)+
geom_line(aes(counter,lag(lev,2)-rev),color='blue',linetype=4)+
geom_line(aes(counter,lev-lag(rev,1)),color='red',linetype=5)+
geom_line(aes(counter,lag(lev,1)-rev),color='blue',linetype=5)+
geom_line(aes(counter,lev-lag(rev,6)),color='red',linetype=6)+
geom_line(aes(counter,lag(lev,6)-rev),color='blue',linetype=6)+
geom_line(aes(counter,lev-lag(rev,9)),color='red',linetype=9)+
geom_line(aes(counter,lag(lev,9)-rev),color='blue',linetype=9)+
geom_point(aes(plep*10+200,100+plepV*10),color='blue',alpha=1/2)+
geom_point(aes(prep*10+200,100+prepV*10),color='red',alpha=1/2)+
geom_point(aes(200,100),size=3)
ggsave(paste('TestDemo-',sac,'.PNG',sep=''))
} |
a1671c2cba80befedfdab4930e64397f1809a30b | 5b722119d1b1ca9df17a2914a4db2d35f73b5490 | /Projects/Presidential Speeches/analyze_presidential_speeches.r | 767ea211c78c0281096cce3f8e0f5f526f02e209 | [
"CC-BY-4.0"
] | permissive | vishalbelsare/Public_Policy | 1d459eba9009e7183fa266d3bb9d4dd0d6dacddc | 4f57140f85855859ff2e49992f4b7673f1b72857 | refs/heads/master | 2023-03-29T05:01:10.846030 | 2021-01-13T21:52:45 | 2021-01-13T21:52:45 | 311,356,474 | 0 | 0 | NOASSERTION | 2021-04-04T20:12:17 | 2020-11-09T14:00:21 | null | UTF-8 | R | false | false | 3,575 | r | analyze_presidential_speeches.r |
library(tidyverse)
library(quanteda)
library(topicmodels)
library(tm)
library(tidytext)
library(textmineR)
library(data.table)
library(gridExtra)
# SnowballC
setwd("~/Public_Policy/Projects/Presidential Speeches")
all_campaign_docs_stacked = readRDS('data/all_campaign_docs_stacked.rds') %>%
filter(
str_trim(content) != ''
) %>%
mutate(
date_clean = as.Date(date, format = '%B %d, %Y')
) %>%
data.table() %>%
unique(by = 'url')
top_2016_candidates = filter(all_campaign_docs_stacked_sub, year(date_clean) == 2016) %>% pull(person_name) %>% table() %>% sort() %>%
tail(3)
all_campaign_docs_stacked_sub = filter(all_campaign_docs_stacked, person_name %in% names(top_2016_candidates))
the_stop_words = c(stopwords::stopwords("en"),
stopwords::stopwords(source = "smart"), 'applause')
dtm <- CreateDtm(doc_vec = all_campaign_docs_stacked_sub$content,
doc_names = all_campaign_docs_stacked_sub$url,
ngram_window = c(1, 1),
stopword_vec = the_stop_words,
stem_lemma_function = function(x) SnowballC::wordStem(x, "porter"))
the_lda = LDA(dtm, k = 10)
the_lda_tidy = tidy(the_lda, matrix = 'beta') %>%
group_by(topic) %>%
top_n(15, beta) %>%
ungroup() %>%
arrange(topic, -beta) %>%
mutate(
term = reorder_within(term, beta, topic)
)
the_lda_tidy_doc = tidy(the_lda, matrix = 'gamma') %>%
inner_join(all_campaign_docs_stacked_sub %>% select(url, person_name), by = c('document' = 'url'))
counts_by_topic = group_by(the_lda_tidy_doc, topic, person_name) %>%
summarize(
avg_gamma = mean(gamma)
)
main_plot = ggplot(counts_by_topic, aes(factor(topic), avg_gamma, fill = person_name)) +
geom_bar(stat = 'identity', colour = 'black')
sub_plot = ggplot(the_lda_tidy, aes(term, beta)) +
facet_wrap(~topic, ncol = 3, scales = 'free') +
geom_bar(stat = 'identity') +
scale_x_reordered() +
coord_flip()
grid.arrange(main_plot, sub_plot, heights = unit(c(3, 6), 'in'))
#
# reut21578 <- system.file("texts", "crude", package = "tm")
# reuters <- VCorpus(DirSource(reut21578, mode = "binary"),
# readerControl = list(reader = readReut21578XMLasPlain))
#
# as.VCorpus(all_campaign_docs_stacked$content)
# ?VCorpus
# a = VCorpus(all_campaign_docs_stacked$content)
# reuters <- tm_map(reuters, stripWhitespace)
# reuters <- tm_map(reuters, content_transformer(tolower))
# reuters <- tm_map(reuters, removeWords, stopwords("english"))
# tm_map(reuters, stemDocument)
# dtm <- DocumentTermMatrix(reuters)
# inspect(dtm)
names(the_lda)
the_lda
ap_topics <- tidy(the_lda, matrix = "beta") %>% arrange(-beta)
ap_topics
coleman.liau.grade =
textstat_readability(all_campaign_docs_stacked$content,
measure = 'Coleman.Liau.grade')
ELF =
textstat_readability(all_campaign_docs_stacked$content,
measure = 'ELF')
Flesch =
textstat_readability(all_campaign_docs_stacked$content,
measure = 'Flesch')
coleman.liau.grade %>% head()
all_campaign_docs_stacked$coleman.liau.grade = coleman.liau.grade[[2]]
all_campaign_docs_stacked$ELF = ELF[[2]]
all_campaign_docs_stacked$Flesch = Flesch[[2]]
all_campaign_docs_stacked$content = NULL
ggplot(all_campaign_docs_stacked, aes(date_clean, coleman.liau.grade)) +
geom_point() +
stat_smooth() +
coord_cartesian(x = c('2000-01-01', '2020-01-01') %>% as.Date())
head(all_campaign_docs_stacked)
group_by(all_campaign_docs_stacked, person_name) %>%
summarize(
mean_grade_level = mean(coleman.liau.grade)
) %>% View()
|
6d1f1e9f3a0d53c39f078ca228cba2c964673b35 | b0bdd9332eceb12533f4f0f16d2827c1f24c153b | /R/set_post_dir.R | d6a428723272f75d99f83985e295188ebc4ba14a | [] | no_license | eustin/jekyfy | 1eb8fc3d5a010fefbd98dcb3c2e1abfcac4d0322 | 37aaa5a87e928956c4ed9722e28c98fa0f5030d9 | refs/heads/master | 2020-04-17T19:44:18.241081 | 2019-07-06T00:17:35 | 2019-07-06T00:17:35 | 166,876,582 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 89 | r | set_post_dir.R | #' @export
set_post_dir <- function(post_dir) {
Sys.setenv(BLOG_POST_DIR = post_dir)
}
|
5d57b7c45794d9d27190ab5b60d1dbb178f7096d | 400b426c3e3b56b34c38c71d473df223089906ab | /R/is-predictive.R | 86e1dfd4b47846565289774febd520162067c71c | [] | no_license | poissonconsulting/poiscon | fcea48c3e994ff86dfd7cc521aba1842ebb24ce3 | 97007c1f318cfebb21905b8f42e74486984a1970 | refs/heads/master | 2021-06-11T18:47:30.563459 | 2021-02-12T22:56:24 | 2021-02-12T22:56:24 | 12,257,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 646 | r | is-predictive.R | #' @title Is Predictive
#'
#' @description
#' Tests whether a prediction data.frame is informative i.e.,
#' has variation in estimate and/or lower/upper.
#'
#' @param x data.frame of predictions to test informative
#' @return logical scalar indicating whether informative
#' @export
is_predictive <- function(x) {
assert_that(is.data.frame(x))
assert_that(all(c("estimate", "lower", "upper") %in% colnames(x)))
if (nrow(x) == 0)
return(FALSE)
if (!all(x$estimate == x$estimate[1]))
return(TRUE)
if (!all(x$lower == x$lower[1]))
return(TRUE)
if (!all(x$upper == x$upper[1]))
return(TRUE)
FALSE
}
|
a7022ba7f9b4bad1fe8e5f2835c77b553be2f34a | c67d7ed2877bbe77bfb1f889ecb121c2153fc201 | /Postproc_code/L227/Stoichiometry/ChlvsP_analysis.R | 60fc888e93e8f1c7f5019c31aad37480714c971b | [] | no_license | biogeochemistry/MyLake_Lake-227 | 732d87f7ee4d1fc75165b85ded3953fd79f18cd5 | d7a11d19e74038c14e97eee398f2e290c0e53535 | refs/heads/master | 2023-08-01T10:50:19.952041 | 2021-08-18T13:30:35 | 2021-08-18T13:30:35 | 106,003,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,034 | r | ChlvsP_analysis.R |
setwd("/Users/krsalkgu/Documents/SourceTree/Lake227/Postproc_code/L227/Stoichiometry")
L227chemistry <- read.csv("./IISDELA_L227_chemistry_cleaned_1969-2016.csv")
L227chemistry$Date <- as.Date(L227chemistry$Date, format = "%Y-%m-%d")
L227chemistry <- mutate(L227chemistry, Month = format(L227chemistry$Date, "%m"))
L227chemistry$Month <- as.numeric(L227chemistry$Month)
L227chemistry_epi_icefree <-
L227chemistry %>%
filter(Stratum == "EPI") %>%
filter(Month > 4 & Month < 11) %>%
filter(Susp.P !=-1111 & Susp.P != -200 & Susp.P <100) %>%
filter(chla !=-1111 & chla != -200 & chla <100)
PPvschl <- lm(L227chemistry_epi_icefree$chla ~ L227chemistry_epi_icefree$Susp.P)
summary(PPvschl)
PPvschlbymonth <- lm(L227chemistry_epi_icefree$chla ~ L227chemistry_epi_icefree$Susp.P + L227chemistry_epi_icefree$Month)
summary(PPvschlbymonth)
library(ggplot2)
ggplot(L227chemistry_epi_icefree, aes(x = Susp.P, y = chla, color = Month)) +
geom_point(size = 0.5) +
geom_smooth(method = "lm", color = "black", se = FALSE)
|
76885e767f60578e93ccf35101c04c0ea606fecc | c97ae65ab40d030f56a3f72f138cefc7936a30b1 | /svm.R | 2ec6a68133977ae0bf8a6db050a9c8334d28e90b | [] | no_license | abhisara/AnalyticsEdge | b021248e875b04406362fb8227daefa4144d3edd | a8d86858778e8c59da0752cf1b2588e74a175204 | refs/heads/master | 2021-01-01T19:10:46.270011 | 2015-06-22T19:19:14 | 2015-06-22T19:19:14 | 34,015,417 | 0 | 1 | null | 2015-06-22T19:19:14 | 2015-04-15T19:56:02 | R | UTF-8 | R | false | false | 1,242 | r | svm.R | #SVM model for the data.
#Don't use cost 100. Takes forever.
for( i in seq(6,18,3)){
cost = 5 * (i)
kernel = 'radial'
spl = sample.split(dtmTrain$Pop , SplitRatio = 0.7)
train.data = subset(dtmTrain, spl == TRUE)
test.data = subset(dtmTrain , spl == FALSE)
svm.mdl = svm(Pop ~ ., data = train.data , kernel = kernel , cost = cost)
svm.pred = predict(svm.mdl , newdata = test.data)
svm.pred = ifelse(svm.pred < 0, 0, svm.pred)
svm.pred = ifelse(svm.pred > 1, 1, svm.pred)
RMSES_sum = sum(sqrt((svm.pred - test.data$Pop)^2))
print(paste( 'Cost is ' , cost , 'RMSE is ', RMSES_sum))
}
#crossvalidation for svm
set.seed(1)
tune.out = tune(svm, Pop ~ . , data = dtmTrain , kernel = 'radial' , ranges = list(cost = c(0.001,
0.01, 0.1, 1, 5, 10 , 20 )))
c = rbind(dtmTrain[,seq(11,18)] , dtmTest[,seq(10, 17)])
l.test = c[6533:8402,] #Matching the levels
l.t = cbind(dtmTest[,seq(1,9)], l.test)
svm.mdl = svm( Pop ~ . , data = dtmTrain , kernel = 'radial' ,cost = 37)
svm.pred = predict(svm.mdl , newdata = l.t )
svm.pred = ifelse(svm.pred < 0 , 0, svm.pred)
sub$Probability1 = svm.pred
write.csv(sub, 'sub3.csv', row.names = FALSE, quote = FALSE)
|
7432b91d59dffd0bc0cd0ea0f0e5c521cf96cd2e | ab6a33040183119ac2328d1286dca1555dcc5ea7 | /cachematrix.R | b8d3ef406b4064784f6f40cdce1c86ca77443bdf | [] | no_license | CeriMitchell/ProgrammingAssignment2 | 75a3a35d2e65e955ef2c8ccdff844496ec02cc26 | 91a992378184de42220bf15c707fca802e8303ed | refs/heads/master | 2021-01-21T15:56:58.902354 | 2016-03-21T12:20:45 | 2016-03-21T12:20:45 | 54,260,018 | 0 | 0 | null | 2016-03-19T10:23:50 | 2016-03-19T10:23:50 | null | UTF-8 | R | false | false | 1,549 | r | cachematrix.R | ## Caching the Inverse of a Matrix
## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## FUNCTION: "makeCacheMatrix" creates a speical "matrix" that can cache
## its inverse.
## Here are a pair of functions that are used to create a special object to store
## a matrix and cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function () x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function calculates the inverse of the speical "matrix" created
## with the function above. However, it first checks to see if the inverse has
## already been calculated. If so, it gets the inverse from the cache and skips
## the computation. Otherwise, it calculates the inverse of the data and sets
## the value of the inverse in the cache via the setinverse function.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
## TEST:
## ceri.matrix <- makeCacheMatrix(matrix(1:4, 2, 2))
## ceri_matrix$get()
}
|
6d2aa5017e0cd1c89d14bcc7612a8ee9786c61f1 | a441ac887d892f3999f1051df80989e3bea04941 | /find_behaviorSeqs (Autosaved).R | 2e43af0a20e825a094f4d5bbe4c103eb3af28312 | [] | no_license | FernaldLab/_code | bdad38d072fe3a9a09ba8e5c98591c0caaf73173 | 46a08137095c4c0d8fbe046ea1d740b61714ad47 | refs/heads/master | 2016-09-06T14:14:35.280437 | 2015-07-07T19:43:16 | 2015-07-07T19:43:16 | 38,710,635 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,711 | r | find_behaviorSeqs (Autosaved).R | # count number of times a given sequence is observed in a vector of behaviors
.countBehaviorSequences = function (behaviorVec, seq) {
seq = strsplit(seq, '')[[1]];
beh1 = seq[1];
toTest = paste('behaviorVec[b+1]==\"', seq[2], '\"', sep='');
if (length(seq) > 2) {
for (b in 3:length(seq)) {
toTest = paste(toTest, ' && behaviorVec[b+', b-1, ']==\"', seq[b], '\"', sep='');
}
}
count = 0;
firstpos = c(0);
for (b in 1:(length(behaviorVec)-length(seq)+1)) {
if (behaviorVec[b] == beh1) {
if (b < (firstpos[length(firstpos)] + length(seq)) ) {
next;
} else {
if (eval(parse(text=toTest))) {
count = count+1;
firstpos = c(firstpos, b);
}
}
}
}
firstpos = firstpos[-1];
return(list(count=count,pos=firstpos));
}
# shuffle behavior vector and count occurences of a given sequence to generate a null distribution
.generateNull = function (behaviorVec, seq, runs=1000) {
nullDist = c();
for (r in 1:runs) {
if (r %% 1000 == 0) {
cat('run ', r, '\n', sep='');
}
pVec = sample(behaviorVec);
nullDist = c(nullDist, .countBehaviorSequences(behaviorVec=pVec, seq=seq)$count);
}
return(nullDist);
}
# compute a pvalue for how often a given sequence is observed
.computeOverrepPval = function (behaviorVec, seq, runs=1000, plot=T) {
actual = .countBehaviorSequences(behaviorVec=behaviorVec, seq=seq);
nullDist = .generateNull(behaviorVec=behaviorVec, seq=seq, runs=runs);
pval = sum(nullDist >= actual$count) / runs;
if (plot) {
hist(nullDist, col='grey', border='darkgrey', main=paste('p=',pval,sep=''), xlab='');
abline(v=actual$count, lty='dashed');
}
return(list(pval=pval, nullDist=nullDist, actual=actual));
}
# generate a vector of all possible behavior sequences of a given length
### missing some, e.g. no bb if b only appears once in data ###
.getPossibleCombos = function (behaviorVec, len) {
l = unique(combn(behaviorVec, len, simplify=F));
l = unlist(lapply(l, function(f) paste(f, collapse='')));
return(sort(l));
}
.getOverrepCombos = function (behaviorVec, len, runs) {
possibleSeqs = .getPossibleCombos(behaviorVec=behaviorVec, len=len);
l = list();
for (i in 1:length(possibleSeqs)) {
cat(possibleSeqs[i], ' ');
l[[i]] = .computeOverrepPval(behaviorVec=behaviorVec, seq=possibleSeqs[i], runs=runs, plot=F);
}
names(l) = possibleSeqs;
m = cbind(count=sapply(l, function(f) f$actual$count),
mean_exp=sapply(l, function(f) mean(f$nullDist)),
pval=sapply(l, function(f) f$pval)
);
return(list(m[order(m[,3], -m[,1]), ], l));
}
################################################################################
.getOverrepCombos2 = function (behaviorVec, len, runs) {
possibleSeqs = .getPossibleCombos(behaviorVec=behaviorVec, len=len);
pCountMat = as.data.frame(matrix(ncol=length(possibleSeqs), nrow=runs));
names(pCountMat) = possibleSeqs;
for (r in 1:runs) {
pCounts = c();
if (r %% 10 == 0){cat('Run ', r, '\n', sep='')}
pVec = sample(behaviorVec);
for (b in 1:length(possibleSeqs)) {
pCounts = c(pCounts, .countBehaviorSequences(behaviorVec=pVec, seq=possibleSeqs[b])$count);
names(pCounts)[b] = possibleSeqs[b];
}
pCountMat[r, ] = pCounts;
}
resMat = as.data.frame(matrix(ncol=3, nrow=length(possibleSeqs),
dimnames=list(possibleSeqs, c('count','mean_exp','pval'))
)
);
for (b in 1:ncol(pCountMat)) {
#cat(names(pCountMat)[b], ' ');
actual = .countBehaviorSequences(behaviorVec=behaviorVec, seq=names(pCountMat)[b])$count;
mean_exp = mean(pCountMat[,b]);
pval = sum(pCountMat[,b] >= actual) / runs;
resMat[b, ] = c(actual, mean_exp, pval)
}
return(list(pvals=resMat[order(resMat[,3],-resMat[,1]), ], nullDists=pCountMat));
} |
061d7ba4cf007ddeb38098daedfa3e28d10bec09 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/gtkPageSetupNewFromKeyFile.Rd | 4f4fa9983395e6cb65e8662541157789374ea1f7 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 1,043 | rd | gtkPageSetupNewFromKeyFile.Rd | \alias{gtkPageSetupNewFromKeyFile}
\name{gtkPageSetupNewFromKeyFile}
\title{gtkPageSetupNewFromKeyFile}
\description{Reads the page setup from the group \code{group.name} in the key file
\code{key.file}. Returns a new \code{\link{GtkPageSetup}} object with the restored
page setup, or \code{NULL} if an error occurred.}
\usage{gtkPageSetupNewFromKeyFile(key.file, group.name, .errwarn = TRUE)}
\arguments{
\item{\verb{key.file}}{the \verb{GKeyFile} to retrieve the page_setup from}
\item{\verb{group.name}}{the name of the group in the key_file to read, or \code{NULL}
to use the default name "Page Setup". \emph{[ \acronym{allow-none} ]}}
\item{.errwarn}{Whether to issue a warning on error or fail silently}
}
\details{Since 2.12}
\value{
A list containing the following elements:
\item{retval}{[\code{\link{GtkPageSetup}}] the restored \code{\link{GtkPageSetup}}}
\item{\verb{error}}{return location for an error, or \code{NULL}. \emph{[ \acronym{allow-none} ]}}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
0c0e80472d32891a7299045153bcbb03d520b83a | 02ede3d4e0e67c2df8145a87d09f9f992e99cbe2 | /man/copy_number_density.Rd | abc4b80a31640d4a4e0b71e7484a95f9e6bedbff | [
"MIT"
] | permissive | crukci-bioinformatics/rascal | 1c663f6234d010071f631ede5a24b3d2c9131356 | 8471278a3376bd3f10d713474bceea595c8a3e11 | refs/heads/master | 2023-08-21T00:33:03.681406 | 2021-10-20T08:01:26 | 2021-10-20T08:01:26 | 374,957,938 | 20 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,233 | rd | copy_number_density.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{copy_number_density}
\alias{copy_number_density}
\title{Copy number density estimation}
\usage{
copy_number_density(
copy_numbers,
min_copy_number = NULL,
max_copy_number = NULL,
n = 512
)
}
\arguments{
\item{copy_numbers}{a numeric vector of relative copy number values.}
\item{min_copy_number}{the lower copy number value in the range of values to
estimate the density.}
\item{max_copy_number}{the upper copy number value in the range of values to
estimate the density.}
\item{n}{the number of equally-spaced points for which the density will be
estimated (a smaller number may be returned if \code{min_copy_number} and/or
\code{max_copy_number} are specified).}
}
\value{
a data frame with copy number and density columns
}
\description{
Obtain density estimates for the distribution of the given copy number
values.
}
\examples{
data(copy_number)
copy_number <- copy_number[copy_number$sample == "X17222", ]
density <- copy_number_density(copy_number$segmented)
density <- copy_number_density(copy_number$segmented,
min_copy_number = 0,
max_copy_number = 2.5)
}
|
ac39d77bed6056c165122cfbabead068c25c4366 | d68dce332f3972a749dacf549a8ef14d7f2332bb | /R/desmodels.r | 740d0eeae8990aa0d6dd7e43227cbb549d11d675 | [] | no_license | openfields/dnmix | 31a6ce20a86a06f1eedfbed5f4788413f9aa04e1 | fe94f0c104cd88757dfc796f429b9ad63dc087c3 | refs/heads/master | 2021-01-20T02:53:13.342366 | 2017-10-13T18:39:55 | 2017-10-13T18:39:55 | 101,339,999 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,392 | r | desmodels.r | library(jagsUI)
# zero-inflated
sink("zi1.jags")
cat("
model {
omega ~ dunif(0, 1) # zero inflation parameter
beta0 ~ dnorm(0,.1) # intercept
beta1 ~ dnorm(0,.1) # pc1
beta2 ~ dnorm(0,.1) # pc2
beta3 ~ dnorm(0,.1) # pc3
beta4 ~ dnorm(0,.1) # RT
beta5 ~ dnorm(0,.1) # T1
beta6 ~ dnorm(0,.1) # centrality parameter
b4 ~ dnorm(0,.1) # pc1 - detection prob
b5 ~ dnorm(0,.1) # pc2 - detection prob
p0~dnorm(0,.1) # int - detection prob
for(i in 1:nsites){
p[i]<- p0 + b4*cov1[i] + b5*cov2[i]# could have covariates here
mu[i,1]<- p[i]
mu[i,2]<- p[i]*(1-p[i])
mu[i,3]<- p[i]*(1-p[i])*(1-p[i])
pi0[i]<- 1 - mu[i,1]-mu[i,2]-mu[i,3]
pcap[i]<-1-pi0[i]
for(j in 1:3){
muc[i,j]<-mu[i,j]/pcap[i]
}
# 1. model part 1: the conditional multinomial
y[i,1:3] ~ dmulti(muc[i,1:3],ncap[i])
# 2. model for the observed count of uniques
ncap[i] ~ dbin(pcap[i],N[i])
# 3. abundance model
z[i] ~ dbern(omega)
N[i] ~ dpois(lam.eff[i])
lam.eff[i] <- z[i]*lambda[i]
log(lambda[i])<- beta0 + beta1*cov1[i] + beta2*cov2[i] + beta3*cov3[i] + beta4*cov4[i] + beta5*cov5[i]+ beta6*cov6[i]
# fit stats
for (j in 1:3){
eval[i,j] <- p[i]*N[i]
E[i,j] <- pow((y[i,j] - eval[i,j]),2)/(eval[i,j]+0.5)
y.new[i,j] ~ dbin(p[i], N[i])
E.new[i,j] <- pow((y.new[i,j] - eval[i,j]),2)/(eval[i,j]+0.5)
}
}
# sum model fit stats
fit <- sum(E[,])
fit.new <- sum(E.new[,])
}
",fill=TRUE)
sink()
# random site effects
sink("re1.jags")
cat("
model {
for(j in 1:nsites){
alpha[j] ~ dnorm(0, tau.alpha)
}
tau.alpha <- 1/(sd.alpha*sd.alpha)
sd.alpha ~ dunif(0,5)
#omega ~ dunif(0, 1) # zero inflation parameter
beta0 ~ dnorm(0,.1) # intercept
beta4 ~ dnorm(0,.1) # RT
beta5 ~ dnorm(0,.1) # T1
beta6 ~ dnorm(0,.1) # centrality parameter
b4 ~ dnorm(0,.1) # pc1 - detection prob
b5 ~ dnorm(0,.1) # pc2 - detection prob
p0~dnorm(0,.1) # int - detection prob
for(i in 1:nsites){
p[i]<- p0 + b4*cov1[i] + b5*cov2[i]# could have covariates here
mu[i,1]<- p[i]
mu[i,2]<- p[i]*(1-p[i])
mu[i,3]<- p[i]*(1-p[i])*(1-p[i])
pi0[i]<- 1 - mu[i,1]-mu[i,2]-mu[i,3]
pcap[i]<-1-pi0[i]
for(j in 1:3){
muc[i,j]<-mu[i,j]/pcap[i]
}
# 1. model part 1: the conditional multinomial
y[i,1:3] ~ dmulti(muc[i,1:3],ncap[i])
# 2. model for the observed count of uniques
ncap[i] ~ dbin(pcap[i],N[i])
# 3. abundance model
#z[i] ~ dbern(omega)
N[i] ~ dpois(lambda[i])
#lam.eff[i] <- z[i]*lambda[i]
log(lambda[i])<- beta0 + beta4*cov4[i] + beta5*cov5[i]+ beta6*cov6[i] + alpha[i]
# fit stats
for (j in 1:3){
eval[i,j] <- p[i]*N[i]
E[i,j] <- pow((y[i,j] - eval[i,j]),2)/(eval[i,j]+0.5)
y.new[i,j] ~ dbin(p[i], N[i])
E.new[i,j] <- pow((y.new[i,j] - eval[i,j]),2)/(eval[i,j]+0.5)
}
}
# sum model fit stats
fit <- sum(E[,])
fit.new <- sum(E.new[,])
}
",fill=TRUE)
sink()
# load data
read.csv('./data/dfadma_merge.csv', header=TRUE) -> desdata
desdata[,10:12]-> y.dfus
desdata[,26:28]-> y.dmon
y.dmon[is.na(y.dmon)] <- 0
# -------------------------------------------------------------------------
# fit models for D. fuscus: local habitat + centrality + network structure
# -------------------------------------------------------------------------
nsites <- dim(y.dfus)[1]
ncap.df<-apply(y.dfus,1,sum)
ymax.df<-ncap.df
# initial values
inits.df <- function(){
list (p0=runif(1),beta0=runif(1,-1,1),N=ymax.df+1,z=rep(1,53))
}
# parameters to monitor
parameters <- c("N","p0","beta0","beta1","beta2","beta3","beta4","beta5","beta6","omega","b4","b5","fit","fit.new")
parameters.re <- c("N","alpha","p0","beta0","beta4","beta5","beta6","b4","b5","fit","fit.new")
# mcmc settings
nthin<-3
nc<-3
nb<-10000
ni<-30000
t1 <- Sys.time()
# betweenness centrality:
data.dfa.bc <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$bcns))
dfa.zi.bc <- autojags(data.dfa.bc, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE)
dfa.re.bc <- autojags(data.dfa.bc, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE)
# closeness centrality - 1:1 (upstream/downstream)
data.dfa.c1 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c1n))
dfa.zi.c1 <- autojags(data.dfa.c1, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c1 <- autojags(data.dfa.c1, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:2 (upstream/downstream)
data.dfa.c2 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c2n))
dfa.zi.c2 <- autojags(data.dfa.c2, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c2 <- autojags(data.dfa.c2, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:3 (upstream/downstream)
data.dfa.c3 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c3n))
dfa.zi.c3 <- autojags(data.dfa.c3, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c3 <- autojags(data.dfa.c3, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:4 (upstream/downstream)
data.dfa.c4 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c4n))
dfa.zi.c4 <- autojags(data.dfa.c4, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c4 <- autojags(data.dfa.c4, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:5 (upstream/downstream)
data.dfa.c5 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c5n))
dfa.zi.c5 <- autojags(data.dfa.c5, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c5 <- autojags(data.dfa.c5, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:6 (upstream/downstream)
data.dfa.c6 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c6n))
dfa.zi.c6 <- autojags(data.dfa.c6, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c6 <- autojags(data.dfa.c6, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:7 (upstream/downstream)
data.dfa.c7 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c7n))
dfa.zi.c7 <- autojags(data.dfa.c7, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c7 <- autojags(data.dfa.c7, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:8 (upstream/downstream)
data.dfa.c8 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c8n))
dfa.zi.c8 <- autojags(data.dfa.c8, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c8 <- autojags(data.dfa.c8, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:9 (upstream/downstream)
data.dfa.c9 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c9n))
dfa.zi.c9 <- autojags(data.dfa.c9, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c9 <- autojags(data.dfa.c9, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# closeness centrality - 1:10 (upstream/downstream)
data.dfa.c10 <- list(y=y.dfus,nsites=nsites,ncap=ncap.df,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c10n))
dfa.zi.c10 <- autojags(data.dfa.c10, inits.df, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
dfa.re.c10 <- autojags(data.dfa.c10, inits.df, parameters.re, "re1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1)
# ----------------------------------------------------------------------------
# fit models for D. monticola: local habitat + centrality + network structure
# ----------------------------------------------------------------------------
# initial values
ncap.dm<-apply(y.dmon,1,sum)
ymax.dm<-ncap.dm
inits.dmzi <- function(){
list (p0=runif(1),beta0=runif(1,-1,1),N=ymax.dm+1,z=rep(1,53))
}
inits.dm <- function(){
list(p0=runif(1),beta0=runif(1,-1,1),N=ymax.dm+1)
}
# betweenness centrality:
data.dma.bc <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$bcns))
dma.zi.bc <- autojags(data.dma.bc, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.bc <- autojags(data.dma.bc, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:1 (upstream/downstream)
data.dma.c1 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c1n))
dma.zi.c1 <- autojags(data.dma.c1, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c1 <- autojags(data.dma.c1, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:2 (upstream/downstream)
data.dma.c2 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c2n))
dma.zi.c2 <- autojags(data.dma.c2, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c2 <- autojags(data.dma.c2, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:3 (upstream/downstream)
data.dma.c3 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c3n))
dma.zi.c3 <- autojags(data.dma.c3, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c3 <- autojags(data.dma.c3, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:4 (upstream/downstream)
data.dma.c4 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c4n))
dma.zi.c4 <- autojags(data.dma.c4, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c4 <- autojags(data.dma.c4, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:5 (upstream/downstream)
data.dma.c5 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c5n))
dma.zi.c5 <- autojags(data.dma.c5, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c5 <- autojags(data.dma.c5, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:6 (upstream/downstream)
data.dma.c6 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c6n))
dma.zi.c6 <- autojags(data.dma.c6, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c6 <- autojags(data.dma.c6, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:7 (upstream/downstream)
data.dma.c7 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c7n))
dma.zi.c7 <- autojags(data.dma.c7, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c7 <- autojags(data.dma.c7, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:8 (upstream/downstream)
data.dma.c8 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c8n))
dma.zi.c8 <- autojags(data.dma.c8, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c8 <- autojags(data.dma.c8, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:9 (upstream/downstream)
data.dma.c9 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c9n))
dma.zi.c9 <- autojags(data.dma.c9, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c9 <- autojags(data.dma.c9, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
# closeness centrality - 1:10 (upstream/downstream)
data.dma.c10 <- list(y=y.dmon,nsites=nsites,ncap=ncap.dm,cov1=scale(desdata$PC1),cov2=scale(desdata$PC2),cov3=scale(desdata$PC3),
cov4=scale(desdata$rt),cov5=scale(desdata$t1),cov6=scale(desdata$c10n))
dma.zi.c10 <- autojags(data.dma.c10, inits.dmzi, parameters, "zi1.jags", n.chains=nc, n.thin=2, parallel=TRUE, Rhat.limit = 1.1, max.iter = 300000)
dma.re.c10 <- autojags(data.dma.c10, inits.dmzi, parameters, "re1.jags", n.chains = 3, n.thin = 2, parallel = TRUE, Rhat.limit = 1.1, max.iter = 300000)
t2 <- Sys.time()
|
a5cedd7b2b3a50b59db1e9598a8173fb23d2879d | 106fea99b04a023ff7f5878bbb0c86bafeca741a | /GEvT_gallery/ui.R | 43e7d6fd5dee26f6b3a7403e02936fd19ed7c869 | [] | no_license | amcrisan/GEvT | e4242edc1b3cc0ec98d97b68c8bc38b79cafc92e | e94c513fc9d4d2487a23c595a0c6143eefd0982a | refs/heads/master | 2021-01-11T20:47:02.788864 | 2017-06-20T23:08:28 | 2017-06-20T23:08:28 | 79,182,345 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,062 | r | ui.R | library(shiny)
library(shinyAce)
library(dplyr)
## Function from Joe Cheng
## https://gist.github.com/jcheng5/5913297
helpPopup <- function(title, content,
placement = c('right', 'top', 'left', 'bottom'),
trigger = c('click', 'hover', 'focus', 'manual')) {
tagList(
singleton(
tags$head(
tags$script("$(function() { $(\"[data-toggle='popover']\").popover()})"),
tags$style(type = "text/css", ".popover{max-width:500px; position: fixed;}")
)
),
tags$a(
href = "#", class = "btn btn-link",
`data-toggle` = "popover", `data-html` = "true",
title = title, `data-content` = content, `data-animation` = TRUE,
`data-placement` = match.arg(placement, several.ok = TRUE)[1],
`data-trigger` = match.arg(trigger, several.ok = TRUE)[1],
"More..."
)
)
}
shinyUI(fluidPage(
tags$head(includeScript(file.path("www", "js", "app.js"))#,
#includeScript(file.path("www", "js", "google-analytics.js"))
),
includeCSS(file.path("www", "css", "app.css")),
titlePanel("GEviT Prototype"),
sidebarLayout(
sidebarPanel(
id = "sidepanel",
width = 3,
h3("About"),
includeMarkdown("about.md"),
tags$div(id = "popup",
helpPopup(strong("Additional Information"),
includeMarkdown("about-extended.md"),
placement = "right", trigger = "click")),
br(),
h3("What"),
uiOutput("whatLevelOne"),
selectizeInput(inputId="selectWhatLevelTwo",label = "What - Level 2",choices="Show All",selected="Show All",multiple=TRUE),
br(),
h3("How"),
uiOutput("How")
),
mainPanel(
width = 9,
tabsetPanel(
id = "tabset",
tabPanel("Catalog", tableOutput("mytable")),
# tabPanel("Figure & Code",
# fluidRow(
# column(width = 5, imageOutput("figImage", height = "auto")),
# column(width = 7,
# aceEditor("fig_and_code",
# value = "Please select a figure" ,
# readOnly = TRUE, height = "450px"),
# htmlOutput("link")))),
tabPanel("Figure",
#br(),
#fluidRow(
# actionButton("showAnnotations", "Show Annotations")#,
#actionButton("annotateGo", "Edit or Add Annotations Tags")
#),
br(),
htmlOutput("figPaper_info"),
#htmlOutput("figPaper_annotation"),
br(),
imageOutput("figImage_only", height = "100%"),
br(),
h4("GEviT Terms"),
dataTableOutput("codeTable"))#,
#tabPanel("Annotate",htmlOutput("annotate_interface"))#,
#tabPanel("Paper Info",htmlOutput("figPaper_info"))
#tabPanel("Code", htmlOutput("code_only"))
)
)
)
))
|
59a9500a1f4f58a140a24de5880538c8f81811ad | 8bda73fe8941318ad0d96f5629fd16c7d8cd5a43 | /Check identity AN.R | 0d21985770ada9004fcb7b9e1d69c839e192356e | [] | no_license | pygmy83/identity---little-owl | 4712eb4eaaca9325e17f202ece66e874c263f463 | bb509d2fd745228fe9470e831e6ac2f397bb136b | refs/heads/master | 2020-03-19T11:43:21.405536 | 2018-06-07T12:55:31 | 2018-06-07T12:55:31 | 136,472,525 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,252 | r | Check identity AN.R | ### PACKAGES NEEDED ######################################################################
##########################################################################################
packages.needed <- c("seewave", "signal", "tuneR", "monitoR","warbleR", "tcltk2", "scales", "testit")
out <- lapply(packages.needed, function(y) {
if(!y %in% installed.packages()[,"Package"]) install.packages(y)
require(y, character.only = T)
})
### MAIN CODE ############################################################################
##########################################################################################
list_templates <- ChooseTemplates() # choose folder with templates (at least 3 files required in current version)
#templateCutoff(list_templates) <- rep(20,length(templateCutoff(list_templates)))
DetectCalls(list_templates) # detect sound events (calls) with the templates selected in previous step
AnalyzeCalls() # automatic analysis of detected sound events; spectrograms along with measurements are plotted and user enters "y" (measurements are ok) or "n" (there is no call or measurements do not track call properly)
CompareCalls() # plot the measurments from two males / localities in one figure for comparison
PlotCalls()
|
80f4a1698ba11ab0fd86e0a31debcf0e64ab4a0c | ae485055546a565d92909b31b2ac3e10a61a3a27 | /ui.R | b3e4404d1ee895df9edd4a5c5307cdfd467ec8d2 | [] | no_license | RekhaDS/DataProducts | a3b1be09a517da79317969d588bfbf27fd1ad943 | 884fef4e4ce7812e1f2d448bf6781db40c97c5c9 | refs/heads/master | 2021-01-10T16:10:14.009594 | 2015-10-24T07:23:11 | 2015-10-24T07:23:11 | 44,856,537 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,190 | r | ui.R | ##This is a simple web application mainly for Kid-centives, i.e: to inspire their best behavior. But everyone
## in the family participates. Each family member starts with $1 in their account.
##Any time someone gets a ticket 0.35 cents is deducted from their account and
##each reward is rewarded by increasing 0.65 cents in their account.
library(shiny)
shinyUI(fluidPage(
##Choose name from the dropdown
title = 'Kid-centives',
sidebarLayout(
sidebarPanel(
selectInput(
'chooseName', 'Family Member names', choices = c("Jack",
"Thomas","Dad",
"Mom"),
selectize = FALSE
),
## Action button for a ticket or a reward
fluidRow(
column(3,div(style="display:inline-block",actionButton("ticket", "Ticket"), style="float:right")),
column(6,div(style="display:inline-block",actionButton("reward", "Reward"), style="float:right")))
),
mainPanel(
helpText('The table below shows the total dollar amount for each family member'),
column(6,
tableOutput('table')
)
)
)
)) |
c43721a26c0748444113d669a089f43cd8dee88a | 1145da6e1c01508c5d59f52f285702002f92e2d8 | /Dynamic Input test/app.R | d229f9334da1aa1220982fd193b03d98ba826786 | [] | no_license | SSEngland/Shiny | 096bb3997aaace06ef8788e742978aa00198e5fb | 9a015ffe47a8c26fb612be5c7dfd9db1bb3f0f50 | refs/heads/master | 2020-03-22T23:39:22.185665 | 2018-07-17T14:55:08 | 2018-07-17T14:55:08 | 140,821,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,378 | r | app.R | library(shiny)
ui<-shinyUI(pageWithSidebar (
headerPanel( "Portfolio Returns"),
sidebarPanel(
numericInput("assets", label = "Enter Number of variants in Experiment", value="3")
),
mainPanel(
uiOutput("variants"),
uiOutput("lastVariant"))
))
server<-shinyServer( function(input, output, session) {
output$variants <- renderUI({
numAssets <- as.integer(input$assets)
lapply(1:(numAssets-1), function(i) {
list(tags$p(tags$u(h4(paste0("Variant ", i, ":")))),
textInput(paste0("variant", i), label = "Variant Name", value = paste0("Variant ", i, " name..."))
, numericInput(paste0("weight", i)
, label = "Proportion allocated (0 - 100)", value=0)
)
}) #end of lapply
}) # end of renderUI
output$lastVariant <- renderUI({
numAssets <- as.integer(input$assets)
for (j in 1:(numAssets-1)){
if(j==1){x=100}
x = x - input[[paste0("weight",j)]]
}
tagList(
tags$p(tags$u(h4(paste0("Variant ", numAssets, ":")))),
textInput(paste0("variantFinal"), label = "Variant Name", value = paste0("Variant ", numAssets, " name...")),
tags$p(tags$b("Proportion allocated (0 - 100)")),
helpText(paste0(x))
) #end of tagList
}) #end of renderUI
}) #end of shinyServer
shinyApp(ui=ui, server=server) |
e96f1835e5e5853b9984a042202bb4dfee4a7218 | cd2f27faac9571f15afaf4c63e90d001b7ed33de | /data-raw/prepare_ilo_test_data.R | c0df38ac7fdfe197fba4b4ff85f1bfd96752b75a | [
"MIT"
] | permissive | EnergyEconomyDecoupling/MWTools | 2430ad483b9bd759088e0a79572ca691ce05e9e4 | a3488a24a850d7e2338307446b66961ec3feb68a | refs/heads/master | 2023-09-04T13:03:10.451579 | 2023-08-20T09:30:56 | 2023-08-20T09:30:56 | 308,628,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 644 | r | prepare_ilo_test_data.R | ilo_working_hours_data_path <- PFUSetup::get_abs_paths(version = "v1.2")[["ilo_working_hours_data_path"]]
ilo_employment_data_path <- PFUSetup::get_abs_paths(version = "v1.2")[["ilo_employment_data_path"]]
ilo_working_hours_test_data <- readr::read_rds(ilo_working_hours_data_path) |>
dplyr::filter(ref_area == "GBR")
ilo_employment_test_data <- readr::read_rds(ilo_employment_data_path) |>
dplyr::filter(ref_area == "GBR")
write.csv(x = ilo_working_hours_test_data, file = "inst/extdata/test_data/test_ilo_working_hours_data.csv")
write.csv(x = ilo_employment_test_data, file = "inst/extdata/test_data/test_ilo_employment_data.csv")
|
4ddf2bb6f7a1540f86db945b092fd6717801c8fd | defc646bb56990743469ae0bffa9cc99252c4b8f | /man/AddParSetting.Rd | 49cb865f311b8dbbace180faa44bd7d18f79d0ef | [] | no_license | feiyoung/PRECAST | b1abe112a9d9df173f1e7e9259e92c19a33ea05b | b866f4b940e5815b2f85e8a8f8b2d0824809db39 | refs/heads/main | 2023-06-08T01:01:41.235579 | 2023-06-05T02:10:15 | 2023-06-05T02:10:15 | 500,674,213 | 7 | 3 | null | null | null | null | UTF-8 | R | false | false | 699 | rd | AddParSetting.Rd | \name{AddParSetting}
\alias{AddParSetting}
\title{Add model settings for a PRECASTObj object}
\description{
The main interface function provides serveral PRECAST submodels, so a model setting is required to specified in advance for a PRECASTObj object.
}
\usage{
AddParSetting(PRECASTObj, ...)
}
\arguments{
\item{PRECASTObj}{a PRECASTObj object created by \link{CreatePRECASTObject}.}
\item{...}{other arguments to be passed to \link{model_set} funciton.}
}
\details{
Nothing
}
\value{
Return a revised PRECASTObj object.
}
\author{
Wei Liu
}
\note{
nothing
}
\seealso{
None
}
\examples{
data(PRECASTObj)
PRECASTObj <-AddParSetting(PRECASTObj)
PRECASTObj@parameterList
} |
3d41ea4eec5954d150c99a2c3efb6f35f7696122 | 4d216630e99eda5974b2655baf8928ca7da754bd | /scripts/ed2/figures/simulation_example.R | 73455c425ef39b5e37782bc1e1fda349e7a634ab | [] | no_license | ashiklom/edr-da | 467861ec61cd8953eb272e2844414a522db7268f | b092600954b73fa064300c6e7b21d0413d115b94 | refs/heads/master | 2021-07-12T18:59:20.190169 | 2021-04-12T14:00:17 | 2021-04-12T14:00:17 | 71,824,349 | 2 | 5 | null | 2018-02-01T13:29:03 | 2016-10-24T19:26:27 | R | UTF-8 | R | false | false | 2,415 | r | simulation_example.R | library(ggplot2)
library(purrr)
library(dplyr)
library(forcats)
library(redr)
library(PEcAnRTM)
library(PEcAn.ED2)
import::from(lubridate, as_date, year, month, mday)
import::from(progress, progress_bar)
import::from(imguR, imgur, imgur_off)
import::from(tidyr, unnest, spread)
ens_dir <- "ensemble_outputs/msp_hf20180402"
date <- "2009-07-02"
ens <- 1
run_edr_site <- function(date, site, ens_dir, ens = 1, pb = NULL) {
on.exit(if (!is.null(pb)) pb$tick())
site_dir <- list.files(ens_dir, site, full.names = TRUE)
stopifnot(length(site_dir) == 1)
ens_dir <- file.path(site_dir, sprintf("ens_%03d", ens))
stopifnot(file.exists(ens_dir))
ens_out_dir <- file.path(ens_dir, "out")
stopifnot(file.exists(ens_out_dir))
history_file <- list.files(ens_out_dir, date, full.names = TRUE)
stopifnot(length(history_file) == 1)
ed2in <- read_ed2in(file.path(ens_dir, "ED2IN"))
ed2in$RK4_TOLERANCE <- 1e-5
trait_values <- readRDS(file.path(ens_dir, "trait_values.rds"))
run_edr_date(date, ed2in, trait_values)
}
sites <- readLines("other_site_data/selected_sites")
pb <- progress_bar$new(total = length(sites))
site_out <- map(
sites, safely(run_edr_site),
date = date, ens = ens, ens_dir = ens_dir, pb = pb
)
s2 <- transpose(site_out)
spec <- s2$result %>% setNames(sites) %>% discard(is.null)
tidyspec <- spec %>%
imap(~tibble(site = .y, waves = 400:2500, refl = .x)) %>%
bind_rows()
i1 <- imgur("png", width = 5, height = 5, units = "in", res = 300)
ggplot(tidyspec) +
aes(x = waves, y = refl, color = fct_reorder(site, refl, max, .desc = TRUE)) +
geom_line() +
labs(color = "Site code", x = "Wavelength", y = "Reflectance") +
scale_color_brewer(palette = "Dark2") +
theme_bw() +
theme(legend.position = c(0.95, 0.95), legend.justification = c(1, 1))
i2 <- imgur_off(i1)
i2$link
data("sensor.rsr")
lsat <- tidyspec %>%
group_by(site) %>%
summarize(data = list(spec2landsat(refl))) %>%
unnest()
l7 <- lsat %>%
filter(landsat == "landsat7")
i1 <- imguR::imgur("png", width = 5, height = 5, units = "in", res = 300)
ggplot(l7) +
aes(x = wavelength, y = value, color = fct_reorder(site, value, max, .desc = TRUE)) +
geom_line() +
labs(color = "Site code", x = "Wavelength", y = "Reflectance") +
scale_color_brewer(palette = "Dark2") +
theme_bw() +
theme(legend.position = c(0.95, 0.95), legend.justification = c(1, 1))
i2 <- imguR::imgur_off(i1)
i2$link
|
b57997a98fcd84c6e9d30acdc5be2b0652568c69 | 64b780d93dfea46200ab6d17baa23e43a719eec5 | /man/data_its.Rd | 210212fa67aae27937011690a2d5d48c822a5629 | [
"Artistic-2.0"
] | permissive | mengdy0217/yyeasy | 06a90c121e2894f8448242865a109f0dee57849b | 422d95dfb95f368769dfe71895aa9fece5b6ce5a | refs/heads/master | 2023-08-21T15:12:33.681990 | 2021-10-15T05:12:55 | 2021-10-15T05:12:55 | 417,439,100 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 234 | rd | data_its.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_its}
\alias{data_its}
\alias{its}
\title{ITS data}
\description{
This is fungal microbial diversity data.
}
\keyword{data}
|
0876f4caad2043a105374d4493bc63c2f1809f9d | 2f6d7a99ce3155d2c635c39013a0da1418208b40 | /man/loadGithub.Rd | 60c5c71cedff0731b12005a69090ddf9175ee80d | [
"MIT"
] | permissive | oganm/ogbox | c75eb1d8f4df00be214731e085e6c19e141992cc | ba99a46487836af5ab4fb5b013bc92cf35ad8e95 | refs/heads/master | 2020-04-04T07:07:27.383911 | 2019-07-29T23:00:12 | 2019-07-29T23:00:12 | 37,562,559 | 5 | 1 | null | null | null | null | UTF-8 | R | false | true | 621 | rd | loadGithub.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sourceWeb.R
\name{loadGithub}
\alias{loadGithub}
\title{Load an Rdata file from a URL}
\usage{
loadGithub(githubPath, branch = "master", envir = parent.frame(),
token = NULL)
}
\arguments{
\item{githubPath}{character. username/repository/pathToFile}
\item{branch}{which branch to source from}
\item{envir}{the environment where the data should be loaded.}
}
\value{
A character vector of the names of objects created, invisibly.
}
\description{
Load an Rdata file from a URL
}
\seealso{
\code{\link{loadURL}}, \code{\link{sourceGithub}}
}
|
f5866a0b160dc16243c5bedcbddfa81b5c107a6e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/fs/examples/path_sanitize.Rd.R | 352ba2ffb7d6750b9e56a944fcb84a5cc227e64d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 280 | r | path_sanitize.Rd.R | library(fs)
### Name: path_sanitize
### Title: Sanitize a filename by removing directory paths and invalid
### characters
### Aliases: path_sanitize
### ** Examples
# potentially unsafe string
str <- "~/.\u0001ssh/authorized_keys"
path_sanitize(str)
path_sanitize("..")
|
68e7a4760e9f8836d42f7f5a217af655c9ae74af | 626f48ccf21f6fbbce20dfccb39374f497a2e79a | /NeuralNetwork1/R/Neurualnetwork.R | da744d2fb08ca554416e7a725b08da85412a5497 | [] | no_license | gotlr/CS499-machinelearning | cf8f5bca5f759726984c60fee4fe63c855bd4cb3 | 1e508fb5167b64041271ebfefe6d436f214eb509 | refs/heads/master | 2020-05-03T19:22:05.558837 | 2019-04-10T06:45:10 | 2019-04-10T06:45:10 | 178,783,035 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,493 | r | Neurualnetwork.R | #'Neural networks for regression and binary classification
#'
#'Training by using nerual network with gradient descending
#'(real numbers for regression, probabilities for binary classification).
#'
#'@param X.mat (feature matrix, n_observations x n_features)
#'@param y.vec (label vector, n_observations x 1)
#'@param max.iterations (int scalar > 1)
#'@param step.size
#'@param n.hidden.units (number of hidden units)
#'@param is.train (logical vector of size n_observations,
#'TRUE if the observation is in the train set, FALSE for the validation set)
#'
#'@return pred.mat (n_observations x max.iterations matrix of predicted values or n x k)
#'@return W.mat:final weight matrix(n_features+1 x n.hidden.units or p+1 x u)
#'@return v.vec: final weight vector (n.hidden.units+1 or u+1).
#'@return predict(testX.mat):
#'a function that takes a test features matrix and returns a vector of predictions
#' (real numbers for regression, probabilities for binary classification)
#' The first row of W.mat should be the intercept terms;
#' the first element of v.vec should be the intercept term.
#'
#' @export
#'
#' @examples
#' data(ozone, package = "ElemStatLearn")
#' y.vec <- ozone[, 1]
#' X.mat <- as.matrix(ozone[,-1])
#' num.train <- dim(X.mat)[1]
#' num.feature <- dim(X.mat)[2]
#' X.mean.vec <- colMeans(X.mat)
#' X.std.vec <- sqrt(rowSums((t(X.mat) - X.mean.vec) ^ 2) / num.train)
#' X.std.mat <- diag(num.feature) * (1 / X.std.vec)
#' X.scaled.mat <- t((t(X.mat) - X.mean.vec) / X.std.vec)
NNetIterations <- function(X.mat,y.vec,max.iterations,step.size,n.hidden.units,is.train){
#NNetIterations <- function(X.mat,y.vec,max.iterations,step.size,n.hidden.units){
if(!all(is.matrix(X.mat),is.numeric(X.mat))){
stop("X.mat must be a numberic matrix!")
}
if (!all(is.vector(y.vec), is.numeric(y.vec),length(y.vec) == nrow(X.mat))) {
stop("y.vec must be a numeric vector of the same number of rows as X.mat!")
}
if(!all(max.iterations>=1, is.integer(max.iterations))){
stop("max.iterations must be an interger greater or equal to 1!")
}
if(!all(is.numeric(step.size), 0<step.size, step.size<1)){
stop("step.size must be a number between 0 and 1!")
}
if(!all(n.hidden.units>=1, is.integer(n.hidden.units))){
stop("n.hidden.units must be an interger greater or equal to 1!")
}
if(!all(is.logical(is.train), length(is.train)==nrow(X.mat))){
stop("is.train must be a logical vector of the same number of rows as X.mat!")
}
if(length(unique(y.vec))==2){is.binary = 1
}else{is.binary = 0}
n.observations <- nrow(X.mat)
n.features <- ncol(X.mat)
#find(split) the train set and validation set
train.index = which(is.train==TRUE)
validation.index = which(is.train!=TRUE)
X.train = X.mat[train.index,]
y.train = y.vec[train.index]
X.validation = X.mat[validation.index,]
y.validation = y.vec[validation.index]
#compute a scaled input matrix, which has mean=0 and sd=1 for each column
X.scaled.train = scale(X.train,center = TRUE,scale = TRUE)
X.scaled.validation = scale(X.validation,center = TRUE,scale = TRUE)
X.scaled.mat = scale(X.mat,center = TRUE,scale = TRUE)
pred.mat = matrix(0,n.observations, max.iterations)
v.mat = matrix(runif((n.features+1)*n.hidden.units),n.features+1,n.hidden.units)
w.vec = runif(n.hidden.units+1)
w.gradient=rep(0,n.hidden.units+1)
v.gradient=matrix(0,n.features+1,n.hidden.units)
sigmoid = function(x){
return(1/(1+exp(-x)))
}
desigmoid=function(x){
return(sigmoid(x)/(1-sigmoid(x)))
}
for(iteration in 1:max.iterations){
X.a.mat = (cbind(1,X.scaled.train))%*%v.mat
X.z.mat = sigmoid(X.a.mat)
#X.b.vec = X.z.mat %*% v.vec + interception.vec
X.b.vec = as.numeric((cbind(1,X.z.mat)) %*% w.vec)
#z.temp = X.z.mat * (1-X.z.mat)
if(is.binary){
##binary classification
#pred.mat[train.index,iteration] = sigNoid(cbind(1,sigmoid(cbind(1,X.scaled.train)%*%v.mat))%*%w.vec)
pred.mat[,iteration] = sigmoid(cbind(1,sigmoid(cbind(1,X.scaled.mat)%*%v.mat))%*%w.vec)
y.tilde.train = y.train
y.tilde.train[which(y.tilde.train==0)] = -1 # change y into non-zero number
delta.w = -y.tilde.train*sigmoid(-y.tilde.train*X.b.vec)
delta.v = delta.w * (X.z.mat * (1-X.z.mat)) * matrix(w.vec[-1],nrow(X.z.mat * (1-X.z.mat)) , ncol(X.z.mat * (1-X.z.mat)))
}else{
##if regression
#pred.mat[train.index,iteration] = cbind(1,sigmoid(cbind(1,X.scaled.train)%*%v.mat))%*%w.vec
#pred.mat[validation.index,iteration] = cbind(1,sigmoid(cbind(1,X.scaled.validation)%*%v.mat))%*%w.vec
pred.mat[,iteration] = cbind(1,sigmoid(cbind(1,X.scaled.mat)%*%v.mat))%*%w.vec
delta.w = X.b.vec - y.train
delta.v = delta.w * (X.z.mat * (1-X.z.mat)) * matrix(w.vec[-1],nrow(X.z.mat * (1-X.z.mat)) , ncol(X.z.mat * (1-X.z.mat)))
#delta.v = diag(as.vector(delta.w))%*%desigmoid(X.a.mat)%*%diag(as.vector(w.vec[-1]))
#
}
w.gradient = (t(cbind(1,X.z.mat))%*%delta.w)/n.observations
v.gradient = (t(cbind(1,X.scaled.train))%*%delta.v)/ n.observations
w.vec = w.vec - step.size*as.vector(w.gradient)
v.mat = v.mat - step.size*v.gradient
}
result.list = list(
pred.mat = pred.mat,
v.mat = v.mat,
w.vec = w.vec,
prediction = function(testX.mat){
if(is.binary){
prediction.vec = sigmoid(cbind(1,sigmoid(cbind(1,testX.mat)%*%v.mat))%*%w.vec)
}else{
prediction.vec = cbind(1,sigmoid(cbind(1,testX.mat)%*%v.mat))%*%w.vec
}
return (prediction.vec)
}
)
return(result.list)
}
#' a function using nerual network through cross validation
#'
#' use K-fold cross validation based on the folds IDs provided in fold.vec(randomly)
#'
#' for each validarion/train split, use NNetIterations to compute the predictions
#' for all observations
#'
#' compute mean.validation.loss.vec, which is a vector(with max.iterations elements)
#' of mean validation loss over all K folds
#'
#' comput mean.train.loss.vec, analogous to above but for the train data
#'
#' minimize the mean validation loss to determine selected.steps,
#' the optimal number of steps/iterations
#'
#' finally use NNetIteration(max.iterations=selected.steps) on the whole training data set
#'
#' @param X.mat : n x p
#' @param y.vec : vector n x 1
#' @param fold.vec : number of validation/training sets
#' fold.vec = samole(1:n.folds,length(y.vec))
#' @param max.iterations
#' @param step.size
#' @n.hidden.units
#' @n.folds = 4
#'
#' @return mean.validation.loss
#' @return mean.train.loss.vec
#' @return selected.steps
NNetEarlyStoppingCV <-
function(X.mat, y.vec,fold.vec,max.iterations,step.size,n.hidden.units,n.folds = 4){
#fold.vec = sample(rep(1:n.folds), length(y.vec),TRUE) in test file
mean.train.loss.vec = rep(0,max.iterations)
mean.validation.loss.vec = rep(0,max.iterations)
is.train = rep(TRUE,length(y.vec))
for(fold.number in 1:n.folds){
is.train[which(fold.vec == fold.number)] = FALSE
is.train[which(fold.vec != fold.number)] = TRUE
#X.scaled.mat = scale(X.train,center = TRUE,scale = TRUE)
#
train.index = which(is.train==TRUE)
validation.index = which(is.train!=TRUE)
X.train = X.mat[train.index,]
y.train = y.vec[train.index]
X.validation = X.mat[validation.index,]
y.validation = y.vec[validation.index]
return.list = NNetIterations(X.mat,y.vec,max.iterations,step.size,n.hidden.units,is.train)
prediction.train = return.list$pred.mat[train.index,]
prediction.validation = return.list$pred.mat[validation.index,]
mean.train.loss.vec = mean.train.loss.vec + colMeans(abs(prediction.train - y.train))
mean.validation.loss.vec = mean.train.loss.vec + colMeans(abs(prediction.validation - y.validation))
}
mean.train.loss.vec = mean.train.loss.vec / 4
mean.validation.loss.vec = mean.validation.loss.vec / 4
selected.steps = which.min(mean.validation.loss.vec)
result.list = list(
mean.train.loss.vec = mean.train.loss.vec,
mean.validation.loss.vec = mean.validation.loss.vec,
selected.steps = selected.steps
)
return(result.list)
}
|
7e13cb3086aec86196876af6efd6170a03751fd3 | ca90f55bcf23d4dd7580ff25f6fa342956e00cfc | /Gramener Case Study/Gramener Case Study.R | 0bd972692e51d251477475c8983329757d47dc8c | [] | no_license | ashsureshwin/Data-Science-Projects-in-R | 1b1035006c5cd30be5993f6ad7a709c2971d23a8 | a40f86853c288ecad2bde8fa9e2411d257e12dc0 | refs/heads/master | 2020-03-26T23:54:21.349390 | 2018-10-02T04:44:04 | 2018-10-02T04:44:04 | 145,576,757 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 21,319 | r | Gramener Case Study.R | ###################################################
# EDA Case Study Assignment #
###################################################
# Group members
# D Mruthyunjaya Kumar (Facilitator) - Roll Number - DDA1730298
# Dharmanandana Reddy Pothula
# Ashwin Suresh
# Manohar Shanmugasundaram
# Import the required libraries
library(ggplot2)
library(tidyr)
library(dplyr)
library(stringr)
library(gridExtra)
library(caret)
library(PerformanceAnalytics)
# Read the data from the data file provided for the case study
loan <- read.csv("loan.csv", stringsAsFactors = FALSE)
###############################
# Data Cleaning
###############################
# 1. Select only the required fields into a new data frame
# After analysis, we identified the following field neccessary fields to be considered for this case study
# and all the other fields are ignored.
# The logic for the column rejection are below.
# 1. Ignored all the columns only have 'NA' values
#------------------------------------------------------------------------------
# Treatment of Na and selection of columns #
#------------------------------------------------------------------------------
# check the distribution of NA
barplot(colMeans(is.na(loan)))
# Remove columns with NA more than 20%
dat1 <- loan[, colMeans(is.na(loan)) <= .2]
dim(dat1)
barplot(colMeans(is.na(dat1)))
# Remove Zero and Near Zero-Variance columns as they cannot impact the other variables
nzv <- nearZeroVar(dat1)
dat2 <- dat1[, -nzv]
dim(dat2)
barplot(colMeans(is.na(dat2)))
# 2. Ignored all the columns which are related to customer payments (since these details will not help for this analysis)
# 3. Ignored the other fields like zip_code, emp_title, URL, etc, as these not related to this analysis
# Selecting the subset of records after removing the above mentioned variables
loan_dt <- subset(dat2, select = c(loan_amnt, term, int_rate, grade, sub_grade, emp_length,
home_ownership, annual_inc, verification_status, loan_status, dti, pub_rec,
total_acc, open_acc, purpose, installment, revol_util, revol_bal))
barplot(colMeans(is.na(loan_dt))) # NA are completely removed
dim(loan_dt)
# 2. Check for duplicate records.
nrow(unique(loan_dt))
### Result - No duplicates records found, since the unique record count matches the total count
###########################################
# Outlier Identification and removal #
###########################################
# box plot to check for outliers
loan_dt %>%
filter(!is.na(emp_length)) %>%
ggplot(aes(x=emp_length, y=annual_inc)) + geom_boxplot() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Remove the outlier from the dataset
loan_dt<-loan_dt[!(loan_dt$annual_inc >= 1000000.0 & loan_dt$annual_inc <= 6000000.0),]
# box plot after removing the outliers
loan_dt %>%
filter(!is.na(emp_length)) %>%
ggplot(aes(x=emp_length, y=annual_inc)) + geom_boxplot() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
###########################################
# Univarite & Derived Metrics Analysis #
###########################################
###########################
# Correlation Analysis #
###########################
#-----------------------------------------------------------------------------------
#filter data for defaulters
defaulters <- loan_dt %>% filter(loan_dt$loan_status == "Charged Off")
numeric_data<-defaulters[sapply(defaulters,is.numeric)]
chart.Correlation(numeric_data, histogram=TRUE, pch=23,main="corr_Hist_scatter_density for Defaulters")
#correlation matrix
corr_df<-as.data.frame(cor(numeric_data))
print(corr_df)
View(corr_df)
# Data cleaning - Remove additional characters texts from the below variables for numeric analysis and grouping
loan_dt$int_rate = as.numeric(gsub("\\%", "", loan_dt$int_rate))
loan_dt$term = as.numeric(gsub("\\months", "", loan_dt$term))
loan_dt$revol_util = as.numeric(gsub("\\%", "", loan_dt$revol_util))
#######################################
# Derived Variables #
#######################################
# 1. Derving a new column for default or not, based on the loan status.
# This field will be useful for applying correlation
default_flag <- function(loan_status){
if(loan_status=="Charged Off"){
out = 1
}else{
out = 0
}
return(out)
}
# invoke the function using lapply
loan_dt$default <- lapply(loan_dt$loan_status,default_flag)
# convert the field to numeric
loan_dt$default <- as.numeric(loan_dt$default)
##################################################
# 2. Creating a bin as below for the interest rate
# Group Interest rate
# ----- -------------
# Low int_rate < 10
# Medium int_rate >=10 and < 15
# High int_rate >= 15
# Initialise the variable
int_rate_grp <- function(int_rate){
if (int_rate < 10){
out = "Low"
}else if(int_rate >= 10 & int_rate < 15){
out = "Medium"
}else if(int_rate >= 15){
out = "High"
}
return(out)
}
# invoke the function using lapply
loan_dt$int_rate_group <- lapply(loan_dt$int_rate,int_rate_grp)
loan_dt$int_rate_group <- as.character(loan_dt$int_rate_group)
##############################################
# 3. Create a bin based on the customer income
# Income group Annual income
# ------------- --------------
# <=25 thousand <= 25000
# 25 to 50 thousand > 25000 and <= 50000
# 50 to 75 thousand > 50000 and <= 75000
# 75 to 1 million > 75000 and <= 100000
# 1 to 2 million > 100000 and <= 200000
# 2 to 10 million > 200000 and <= 1000000
# 10 to 60 million > 1000000 and <= 6000000
loan_dt$annual_inc_grp <- cut((loan_dt$annual_inc),
breaks=c(0,25000,50000,75000,100000,200000,1000000,6000000),
labels=c("<=25 thousand","25 to 50 thousand","50 to 75 thousand","75 to 1 million","1 to 2 million",
"2 to 10 million","10 to 60 million"),include.lowest=T, na.rm = TRUE)
##############################################
# 4. Create a bin based on the installment
# Installment Group Installment
# ----------------- --------------
# <=200 <= 200
# 200 to 500 > 200 and <= 500
# 500 to 750 > 500 and <= 750
# 750 to 1000 > 750 and <= 1000
# 1000 to 1500 > 1000 and <= 1500
loan_dt$installment_grp <- cut((loan_dt$installment),
breaks=c(0,200,500,750,1000,1500),
labels=c("<=200","200 to 500","500 to 750","750 to 1000","1000 to 1500"),
include.lowest=T, na.rm = TRUE)
##############################################
# 5. Create a bin based on the dti
# dti group dti
# ----------------- --------------
# <=5 <= 5
# 5 to 10 > 5 and <= 10
# 10 to 15 > 10 and <= 15
# 15 to 20 > 15 and <= 20
# 20 to 25 > 20 and <= 25
# 25 to 30 > 25 and <= 30
loan_dt$dti_grp <- cut((loan_dt$dti),
breaks=c(0,5,10,15,20,25,30),
labels=c("<=5","5 to 10","10 to 15","15 to 20","20 to 25","25 to 30"),
include.lowest=T, na.rm = TRUE)
##############################################
# 6. Create a bin based on the revol_util
# revol util grp revol_util
# ----------------- --------------
# <=25 <= 25
# 25 to 50 > 25 and <= 50
# 50 to 75 > 50 and <= 75
# 55 to 100 > 75 and <= 100
loan_dt$revol_util_grp <- cut((loan_dt$revol_util),
breaks=c(0,25,50,75,100),
labels=c("<=25","25 to 50","50 to 75","75 to 100"),
include.lowest=T, na.rm = TRUE)
#######################################
# Univariate Analysis #
#######################################
############### Univariate Analysis on CATEGORICAL VARIABLES ##########
############### 1. BAR plor for term ####################################
ggplot(data=loan_dt,aes(as.factor(term))) +
geom_bar(color=I('black'),fill=I('#56B4E9')) +
ggtitle("Bar Plot for Term") +
geom_text(stat='count',aes(label=..count..),vjust=-1,size=3)
# Insight from plot - The more loans are with 36 months term
############### 2. BAR plot for home ownership ####################################
ggplot(data=loan_dt,aes(home_ownership)) +
geom_bar(color=I('black'),fill=I('#56B4E9')) +
ggtitle("Bar Plot for Home Ownership") +
geom_text(stat='count',aes(label=..count..),vjust=-1,size=3) + labs(x = "home ownership") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Insight from plot - The major home ownership is with 'Mortgage' & 'Rent' almost 90%
############### 3. BAR plot for grade ####################################
ggplot(data=loan_dt,aes(grade)) +
geom_bar(color=I('black'),fill=I('#56B4E9')) +
ggtitle("Bar Plot for Grade") + geom_text(stat='count',aes(label=..count..),vjust=-1)
# Insight from plot - The Grades 'A', 'B', 'C' and 'D' have more loans
############### 4. BAR plot for Employment Length ####################################
ggplot(data=loan_dt,aes(emp_length)) + geom_bar(color=I('black'),fill=I('#56B4E9'))+
ggtitle("Bar Plot for Employment Length") + geom_text(stat='count',aes(label=..count..),vjust=-1,size=3) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Insight from plot - More laons are taken by employees with 0 to 5 years and 10+ years of experience
############### BAR plot for Purpose ####################################
ggplot(data=loan_dt,aes(purpose))+geom_bar(color=I('black'),fill=I('#56B4E9'))+
ggtitle("Bar Plot for Purpose")+geom_text(stat='count',aes(label=..count..),vjust=-1,size=3) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Insight from plot - The loan purposes of 'debt_consolidation', 'credit_card', 'Other' and 'small_business'
# have move loans.
############### BAR plot for Verificaiton status ####################################
ggplot(data=loan_dt,aes(verification_status)) + geom_bar(color=I('black'),fill=I('#56B4E9')) +
ggtitle("Bar Plot for Purpose") + geom_text(stat='count',aes(label=..count..),vjust=-1)
# Insight from plot - The verification status 'Not Verified' have more loans but this is not a significant number
##########################################################################
# BIVARIATE ANALYSIS - CATEGORICAL & CONTINUOUS VARIABLES #
##########################################################################
# 1. BAR plot for Annual Income group ~ loan default
loan_dt %>%
ggplot(aes(x=annual_inc_grp, fill=as.factor(default))) +
geom_bar(position = 'dodge') +
labs(x = "Annual Income Group") + labs(y = "Loan Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_fill_discrete(name="Loan Default", breaks=c("0", "1"), labels=c("Non defaulter", "Defaulter"))
# Insight from plot - The more loans are taken by customers with an annual income of 25 thousand and 75 thousand
# and hence the number defaulters are more in this income group.
# 2. BAR plot for grade ~ loan default
ggplot(loan_dt, aes(x=grade, fill=as.factor(default))) + geom_bar(position = 'dodge') +
scale_fill_discrete(name="Loan Default",breaks=c("0", "1"),labels=c("Non defaulter", "Defaulter"))
# Insight from plot - The more number of defaulters are with grades 'B', 'C' and 'D'.
# 3. BAR plot for sub grade ~ loan default
ggplot(loan_dt, aes(x=sub_grade, fill=as.factor(default))) +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
geom_bar(position = 'dodge') +
scale_fill_discrete(name="Loan Default",breaks=c("0", "1"),labels=c("Non defaulter", "Defaulter"))
# Insight from plot - This confirms the sub grades within grade 'B', 'C' and 'D' have more dafaulters
#######################################################################################
### Note: Here on all the plots and analysis will be done on the defaulter subset, ###
### where default == 1 (loan_status = 'Charged Off') ###
#######################################################################################
# 4. BAR plot for Grade ~ Interest Rate Group for the defaulters
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=grade,fill=as.factor(int_rate_group))) + geom_bar() +
ggtitle("Grade ~ Interest Rate Group") + scale_fill_discrete(name="Interest Rate Group")
# Insight from plot - Interest Rate Group 'High' and 'Low' are having defaulter loans in grades 'B', 'C' and 'D'.
# Hence, these 2 variables are definitely driver variables for defaulter indentification.
# 5. BAR plot for Purpose ~ Interest Rate Group for the defaulters
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=purpose,fill=int_rate_group)) +
geom_bar(stat="count",position = "dodge",col="black") +
ggtitle("Purpose ~ Interest Rate Group") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_fill_discrete(name="Interest Rate Group")
# Insight from plot - The loan purposes of 'debt_consolidation', 'credit_card', 'Other' and 'small_business'
# have move loans. Hence this variable with these values are a driver for default identification
# 6. BAR plot for Home Ownership ~ Interest Rate Group for the defaulters
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=home_ownership,fill=int_rate_group)) +
geom_bar(stat="count",position = "dodge",col="black") +
ggtitle("Home Ownership ~ Interest Rate Group") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Insight from plot - The major home ownership is with 'Mortgage' & 'Rent' almost 90%
# 7. BAR plot for Loan Amount ~ Interest Rate Group for the defaulters
loan_dt %>%
filter(default == 1) %>%
ggplot() + geom_bar(aes(x = loan_amnt, fill = int_rate_group), stat = "bin", position = "stack", bins = 30) +
ggtitle("Loan Amount ~ Interest Rate Group") + scale_fill_discrete(name="Interest Rate Group")
# Insight from plot - Loan amount from 100 to 25000 have more defaulters
# 8. BAR plot for Monthly Installments ~ Home Ownership for the defaulters
loan_dt %>%
filter(default == 1) %>%
ggplot() + geom_bar(aes(x = installment, fill = home_ownership), stat = "bin", position = "stack", bins = 30) +
ggtitle("Installments ~ Home Ownership") + scale_fill_discrete(name="Home Ownership")
# Insight from plot - The major home ownership is with 'Mortgage' & 'Rent' and installments between 100 and 500
# have more defaulters
# 10 Bar plot for pub_rec ~ default
ggplot(loan_dt, aes(x=pub_rec, fill=as.factor(default))) + geom_bar(position = 'dodge')
# Insight from plot - Customer with zero public records are having more defaults. This implies bank already rejects
# customer with any public record history for loan application
#########################################
# Analysis for driver variables #
#########################################
loan_dt %>%
filter(default == 1 & emp_length != 'n/a') %>%
ggplot(aes(x=annual_inc_grp, fill=as.factor(emp_length))) +
geom_bar(position = 'dodge') +
ggtitle("Annual Income Group ~ Employee length for defaulters") +
labs(x = "Annual Income Group") + labs(y = "Default Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_fill_discrete(name="Employee Length")
# Insight from plot - Annual income 25 to 50 thousand & 50 to 70 thousand are the major driver for default.
# Additionally employee length of 0 to 5 and 10+ are have more default records
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=purpose,fill=int_rate_group)) +
geom_bar(stat="count",position = "dodge") +
ggtitle("Purpose ~ Interest Rate Group for defaulters") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_fill_discrete(name="Interest Rate Group")
# Insight from plot - The loan purposes of 'debt_consolidation', 'credit_card', 'Other' and 'small_business'
# have move loans with interest rate 'Medium' and 'High' bins. Hence these variables can be considered as driver variables
# for defaulter identification
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=grade, fill=grade)) +
geom_bar() +
ggtitle("Grade analysis for defaulters")
# Insight from plot - The Grades B', 'C' and 'D' have more defaulters. Hence this can considered for
# defaulter identification
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=sub_grade, fill=sub_grade)) +
geom_bar() +
ggtitle("Grade analysis for defaulters") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Insight from plot - The sub grades of 'B', 'C' and 'D' have more defaulters.
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=emp_length)) +
geom_bar(fill='blue') +
ggtitle("Employee length for defaulters") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Insight from plot - Employee length of 0 to 5 and 10+ are have more default records
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=annual_inc_grp,fill=installment_grp)) +
geom_bar(position = 'dodge') +
ggtitle("Installment ~ Annual income for defaulters") +
labs(x = "Annual Income Group") +
scale_fill_discrete(name="Installment Group") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Insight from plot - Installment with 200 to 750 have more defaulters. Hence this variable can be considered
# for the defaulter indentiifcation.
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=annual_inc_grp,fill=as.factor(term))) +
geom_bar(position = 'dodge') + scale_fill_discrete(name="Term") +
ggtitle("Annual income ~ term for defaulters") + labs(x = "Annual Income Group") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Insight from plot - Term 36 has more defaulters for annual income 25 to 50k and near equal for 50 to 75k for 36
# and 60 months term
# Bar plot for Home Ownership ~ Interest Rate Group for the defaulters
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=home_ownership,fill=int_rate_group)) + geom_bar(stat="count",position = "dodge",col="black") +
ggtitle("Home Ownership ~ Interest Rate Group")
# Bar plot for Home Ownership ~ Installments for the defaulters
loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=installment,fill=home_ownership)) + geom_bar(stat = "bin",bins=30, position = "stack") +
ggtitle("Installment ~ Home Ownership ") + scale_fill_discrete(name="Home Ownership")
###########################################
# Plots for final analysis and conclusion
##########################################
plot1 <- loan_dt %>%
filter(default == 1 & annual_inc >= 0 & annual_inc <= 100000) %>%
ggplot(aes(x=annual_inc)) +
ggtitle("Annual income") + labs(x = "Annual Income") +
geom_histogram(fill='brown',bins = 30)
plot2 <- loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=int_rate)) +
ggtitle("Interest rate") + labs(x = "Interest rate") +
geom_histogram(fill='brown',bins = 20)
plot3 <- loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=installment)) +
ggtitle("Installement") + labs(x = "Installment") +
geom_histogram(fill='brown',bins = 70)
plot4 <- loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=grade, fill=grade)) +
geom_bar() +
ggtitle("Grade analysis")
plot5 <- loan_dt %>%
filter(default == 1 & emp_length != 'n/a') %>%
ggplot(aes(x=emp_length)) +
geom_bar(fill='blue') + labs(x = "Employee Length") +
ggtitle("Employee Length") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
plot6 <- loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=purpose)) +
geom_bar(fill='blue') + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ggtitle("Purpose")
plot7 <- loan_dt %>%
filter(default == 1 & (!is.na(revol_util_grp))) %>%
ggplot(aes(x=purpose,fill=revol_util_grp)) +
geom_bar() + scale_fill_discrete(name="Revolving Util Group") +
ggtitle("Purpose ~ Revolving Util") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
plot8 <- loan_dt %>%
filter(default == 1) %>%
ggplot(aes(x=home_ownership,fill=int_rate_group)) + geom_bar(stat="count",position = "dodge",col="black") +
ggtitle("Home Ownership ~ Interest Rate Group")
# Display all the plots in the single page
grid.arrange(plot1,plot2,plot3,plot4,plot5,plot6,plot7,plot8, ncol = 3, nrow=3, top = "EDA analysis for defaulters")
######################### END ####################
|
b16337bfda1433e49e88c5f47bee96fd847788a4 | 8d40916cbb1f3ec4958b999a4d90015f7dc69ad8 | /Tutorial_3/Code/Part_1.r | 7732fdaa1a66af2ca417a9c22a777137b4d7350a | [] | no_license | sambittarai/Big-Data-and-Machine-Learning-in-Finance-and-Economics-ECON2333- | 398766a9b6cb5cff02738df1940110a479ffbcbe | 3e9812ee81c4a03eb50dfd52d3df9a80450176c1 | refs/heads/main | 2023-02-12T09:55:59.419761 | 2021-01-13T08:43:34 | 2021-01-13T08:43:34 | 328,915,267 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,058 | r | Part_1.r | #Set the working directory
#Change bck slash to forward slash, if needed
setwd("H:/Tut3")
library(ISLR)
View(Carseats)
summary(Carseats$ShelveLoc) #3 categories
str(Carseats$ShelveLoc) #is a factor (Categorical variable)
attach(Carseats)
str(ShelveLoc)
#Lets regress sales on all other variables plus some interaction terms
lm.fit = lm(Sales ~ . + Income:Advertising + Price:Age, data = Carseats)
summary(lm.fit)
contrasts(ShelveLoc) #Tells us how the factor is converted to dummy variables
# ******* WRITING FUNCTION / ALSO LOOPS **********
#Lets make a function that does y=x^2 for us
myxsquared = function(x){
y = x^2
print("The Calculation is done... the result is ...")
return(y)
}
myxsquared(99)
#Lets make a function that loads libraries for us
loadlibraries = function(){
library(ISLR)
library(MASS)
print("The libraries ISLR and MASS are now loaded")
}
loadlibraries()
#Lets do a loop
mydata = c(10, 15, 8, 2, 1, 2)
N = length(mydata)
myoutput = rep(0, N) #rep - repeat
for (i in 1:N){
myoutput[i] = mydata[i]^2
}
|
fc9557e1bab9600679adc87a26527d635b9ecccb | 14eb2308ee5a6d020466a09e47715b84518dd325 | /man/LoadMultivar.Rd | 049bf5f23dc4bacd0d874c883326544d7ba03f3f | [] | no_license | TimKroeger13/AWIPackage | 19b2b12fb48692854c0fec53411a5a3909567dc8 | fcc44258331031e912c0dec327621a73a025c39b | refs/heads/main | 2023-04-08T16:47:49.305768 | 2021-04-05T14:14:38 | 2021-04-05T14:14:38 | 340,125,960 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 921 | rd | LoadMultivar.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LoadMultivar.R
\name{LoadMultivar}
\alias{LoadMultivar}
\title{LoadMultivar}
\usage{
LoadMultivar(
data,
sep = ",",
DataAsPercent = T,
SartOfData = 4,
Comparison = 2
)
}
\arguments{
\item{data}{Name of the dataset in CSV format.}
\item{sep}{Separator in the CSV.}
\item{DataAsPercent}{Bool indicating whether data should be convertet to percent or not.}
\item{SartOfData}{Number indicating the Collum where the Diatom data stats.}
\item{Comparison}{Number indicating the Collum where the Depth or Age is located.}
}
\value{
Returns a Dataframe.
}
\description{
Loads Diatom data for Multivariate statistics
}
\note{
This function has only been developed for the Alfred Wegener Institute Helmholtz Centre for Polar and Marine Research and should therefore only be used in combination with their database.
}
\author{
Tim Kröger
}
|
23ad1493a8fad11f2383ca5108c1ced72873e2cc | e979752c4498c5edf47791d8b7eaafb2730524bf | /sim20032009/figs/proj13abr/plottingSAT.R | 3593c2401cad132d36e67719a82d9134569ee1af | [] | no_license | ClaudiaGEscribano/aod_and_PV | 0bf4d6c7398351aebdef7b9f9538246c5ee2bd49 | 77eaa4e454ce4ec4ec784795e2e89b8945bc1737 | refs/heads/master | 2021-03-27T15:41:09.158241 | 2018-12-10T14:58:39 | 2018-12-10T14:58:39 | 83,782,692 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,492 | r | plottingSAT.R | library(raster)
library(rasterVis)
library(maps)
library(maptools)
library(mapdata)
library(rgdal)
## load the cmsaf daily data.
## datos del satélite en lat/lon
SIS <- stack("../data/SAT/SISdm20032009_med44.nc", varname='SIS')
idx <- seq(as.Date("2003-01-01"), as.Date("2009-12-31"), 'day')
SIS <- setZ(SIS, idx)
latsis <- init(SIS, v='y')
lonsis <- init(SIS, v='x')
## raster de la máscara tierra/mar. La proyección de esta máscara es LCC.
mycrs <- CRS("+proj=lcc +lat_1=43 +lat_2=43 +lat_0=43 +lon_0=15 +k=0.684241 +units=m +datum=WGS84 +no_defs")
mascara <- raster("masque_terre_mer.nc", varname='zon_new')
maslat <- raster("masque_terre_mer.nc", varname='lat')
maslon <- raster("masque_terre_mer.nc", varname='lon')
pmaslat <- rasterToPoints(maslat)
pmaslon <- rasterToPoints(maslon)
maslonlat <- cbind(pmaslon[,3], pmaslat[,3])
# Specify the lonlat as spatial points with projection as long/lat
maslonlat <- SpatialPoints(maslonlat, proj4string = CRS("+proj=longlat +datum=WGS84"))
maslonlat
extent(maslonlat)
pmaslonlat <- spTransform(maslonlat, CRSobj = mycrs)
# Take a look
pmaslonlat
extent(pmaslonlat)
projection(mascara) <- mycrs
extent(mascara) <- extent(pmaslonlat)
## Una vez que tenemos el raster de la máscara con la extensión y la proyección bien definida, proyectamos el raster dl satélite en lat lon a la nueva proyección.
newproj <- projectExtent(mascara, mycrs)
SISproy <- projectRaster(SIS, newproj)
SISproy <- setZ(SISproy, idx)
## hago la media por años para representar.
year <- function(x) as.numeric(format(x, '%y'))
SISy <- zApply(SISproy, by=year, fun='mean')
## Media del satelite:
SISym <- mean(SISy)
SISym <- mask(SISym, mascara, maskvalue=0)
## para representar con graticule.
library(graticule)
lons <- seq(-20, 50, by=10)
lats <- seq(25, 55, by=5)
## optionally, specify the extents of the meridians and parallels
## here we push them out a little on each side
xl <- range(lons) + c(-0.4, 0.4)
yl <- range(lats) + c(-0.4, 0.4)
## build the lines with our precise locations and ranges
grat <- graticule(lons, lats, proj = mycrs,
xlim = xl, ylim = yl)
## Labels
labs <- graticule_labels(lons, lats,
xline = lons[2],
yline = lats[2],
proj = mycrs)
labsLon <- labs[labs$islon,]
labsLat <- labs[!labs$islon,]
## superponer mapa
ext <- as.vector(extent(projectExtent(SISym, crs.lonlat)))
#boundaries <- map('worldHires', fill=TRUE, exact=FALSE, xlim=ext[1:2], ylim= ext[3:4], plot=FALSE)
#boundaries$names
boundaries <- map('worldHires', fill=TRUE, exact=FALSE, plot=FALSE)
IDs <- sapply(strsplit(boundaries$names, ":"), function(x) x[1])
boundaries_sp<- map2SpatialPolygons(boundaries, IDs=IDs, proj4string=mycrs) CRS(projection(SISproy)))
border <- as(SpatialLines, boundaries_sp) ## no funciona
pdf("media_sat_anual.pdf")
## Display the raster
levelplot(SISym) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
## MODELO
rsds <- stack("../data/C-AER/rsds_day_20032009.nc")
idx <- seq(as.Date("2003-01-01"), as.Date("2009-12-31"), 'day')
rsds <- setZ(rsds, idx)
## defino el raster del modelo bien:
rsdslat <- raster("../data/C-AER/rsds_day_20032009.nc", varname='lat')
rsdslon <- raster("../data/C-AER/rsds_day_20032009.nc", varname='lon')
prsdslat <- rasterToPoints(rsdslat)
prsdslon <- rasterToPoints(rsdslon)
rsdslonlat <- cbind(prsdslon[,3], prsdslat[,3])
# Specify the lonlat as spatial points with projection as long/lat
rsdslonlat <- SpatialPoints(maslonlat, proj4string = CRS("+proj=longlat +datum=WGS84"))
rsdslonlat
extent(rsdslonlat)
prsdslonlat <- spTransform(rsdslonlat, CRSobj = mycrs)
# Take a look
prsdslonlat
extent(prsdslonlat)
extent(rsds) <- extent(prsdslonlat)
## Hago las medias anuales de la simulación C-AER
rsdsy <- zApply(rsds, by=year, fun='mean')
rsdsYm <- mean(rsdsy)
rsdsYm <- mask(rsdsYm, mascara, maskvalue=0)
pdf("rsds_caer_yearlyMean_20032009.pdf")
levelplot(rsdsYm) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
## DIFERENCIA ANUAL ENTRE EL MODELO Y EL SATÉLITE:
diferencia_caer_sat <- rsdsYm-SISym
diferencia_sat_caer <- SISym -rsdsYm
pdf("diferencia_rsds_caer_sat_yearlyMean_20032009.pdf")
levelplot(diferencia_caer_sat, par.settings=RdBuTheme) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
dif_rel_caer_sat <- diferencia_caer_sat/rsdsYm
dif_rel_sat_caer <- diferencia_sat_caer/SISym
pdf("dif_rel_rsds_caer_sat_yearlyMean_20032009.pdf")
levelplot(dif_rel_caer_sat, par.settings=RdBuTheme) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
## DIFERENCIA ENTRE SAT Y SIMULACIÓN C-NO
rsdsno <- stack("../data/C-NO/rsds_no_day_20032009.nc")
idx <- seq(as.Date("2003-01-01"), as.Date("2009-12-31"), 'day')
rsdsno <- setZ(rsdsno, idx)
## defino el raster del modelo bien:
rsdsnolat <- raster("../data/C-NO/rsds_no_day_20032009.nc", varname='lat')
rsdsnolon <- raster("../data/C-NO/rsds_no_day_20032009.nc", varname='lon')
prsdsnolat <- rasterToPoints(rsdsnolat)
prsdsnolon <- rasterToPoints(rsdsnolon)
rsdsnolonlat <- cbind(prsdsnolon[,3], prsdsnolat[,3])
# Specify the lonlat as spatial points with projection as long/lat
rsdsnolonlat <- SpatialPoints(rsdsnolonlat, proj4string = CRS("+proj=longlat +datum=WGS84"))
rsdsnolonlat
extent(rsdsnolonlat)
prsdsnolonlat <- spTransform(rsdsnolonlat, CRSobj = mycrs)
# Take a look
prsdsnolonlat
extent(prsdsnolonlat)
extent(rsdsno) <- extent(prsdsnolonlat)
## Hago las medias anuales de la simulación C-NO
rsdsyno <- zApply(rsdsno, by=year, fun='mean')
rsdsYmno <- mean(rsdsyno)
rsdsYmno <- mask(rsdsYmno, mascara, maskvalue=0)
diferencia_cno_sat <- rsdsYmno-SISym
diferencia_sat_cno <- SISym - rsdsYmno
pdf("diferencia_rsds_cno_sat_yearlyMean_20032009.pdf")
levelplot(diferencia_cno_sat, par.settings=RdBuTheme) +
## and the graticule
+ layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
dif_rel_cno_sat <- diferencia_cno_sat/rsdsYmno
dif_rel_sat_cno <- diferencia_sat_cno/SISym
pdf("dif_rel_rsds_cno_sat_yearlyMean_20032009.pdf")
levelplot(dif_rel_cno_sat, par.settings=RdBuTheme) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
## DIFERENCIAS RELATIVAS EN UN MISMO GRÁFICO
s <- stack(diferencia_caer_sat, diferencia_cno_sat)
names(s) <- c("CAER-SAT","CNO-SAT")
s1 <- stack(diferencia_sat_caer, diferencia_sat_cno)
names(s1) <- c("SAT-CAER","SAT-CNO")
## paleta
div.pal <- brewer.pal(n=11, 'RdBu')
rng <- range(s1[], na.rm=TRUE)
nInt <- 13
inc0 <- diff(rng)/nInt
n0 <- floor(abs(rng[1])/inc0)
inc <- abs(rng[1])/(n0+1/2)
n1 <- ceiling((rng[2]/inc-1/2)+1)
breaks <- seq(rng[1],by=inc,length=n0+1+n1)
idxx <- findInterval(s1[], breaks, rightmost.closed=TRUE)
mids <-tapply(s1[], idxx,median)
mx <- max(abs(breaks))
break2pal <- function(x,mx,pal){
y <- 1/2*(x/mx+1)
rgb(pal(y), maxColorValue=255)
}
divRamp <-colorRamp(div.pal)
pal <- break2pal(mids, mx, divRamp)
pdf("dif_rel_caer_cno_sat20032009.pdf")
levelplot(s, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
pdf("dif_rel_sat_caer_cno_20032009.pdf")
levelplot(s1, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
## Las tres medias anuales juntas
yearlyMean <- stack(SISym, rsdsYm, rsdsYmno)
names(yearlyMean) <- c("SAT","C-AER","C-NO")
pdf("rsds_yearly_mean20032009.pdf")
levelplot(yearlyMean) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.6)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.6))
dev.off()
## CICLO ANUAL
library(zoo)
month <- function(x) as.numeric(format(x, '%m'))
SISm <- zApply(SISproy, by=month, fun='mean')
names(SISm) <- month.abb
SISm <- mask(SISm, mascara, maskvalue=0)
## ciclo anual de C-AER
rsdsm <- zApply(rsds, by=month, fun='mean')
names(rsdsm) <- month.abb
rsdsm <- mask(rsdsm, mascara, maskvalue=0)
## ciclo anual de C-NO
rsdsnom <- zApply(rsdsno, by=month, fun='mean')
names(rsdsnom) <- month.abb
rsdsnom <- mask(rsdsnom, mascara, maskvalue=0)
## represento la diferencia entre las dos simulaciones y la diferencias entre cada una de ellas y el satélite.
pdf("ciclo_anual_rsds_caer_20032009.pdf")
levelplot(rsdsm) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
pdf("ciclo_anual_rsds_cno_20032009.pdf")
levelplot(rsdsnom) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
pdf("ciclo_anual_rsds_sat_20032009.pdf")
levelplot(SISm) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
## Diferencias de las dos simulaciones con el sat y entre ellas:
## DIF C-AER-SAT/DIF C-NO-SAT
## represento la diferencia relativa entre las simulaciones y el satélite tomando de referencia el satélite:
rel_dif_cicloAnual_sat_caer<- (SISm - rsdsm)/SISm
## paleta
div.pal <- brewer.pal(n=11, 'RdBu')
rng <- range(rel_dif_cicloAnual_sat_caer[], na.rm=TRUE)
nInt <- 13
inc0 <- diff(rng)/nInt
n0 <- floor(abs(rng[1])/inc0)
inc <- abs(rng[1])/(n0+1/2)
n1 <- ceiling((rng[2]/inc-1/2)+1)
breaks <- seq(rng[1],by=inc,length=n0+1+n1)
idxx <- findInterval(rel_dif_cicloAnual_sat_caer[], breaks, rightmost.closed=TRUE)
mids <-tapply(rel_dif_cicloAnual_sat_caer[], idxx,median)
mx <- max(abs(breaks))
break2pal <- function(x,mx,pal){
y <- 1/2*(x/mx+1)
rgb(pal(y), maxColorValue=255)
}
divRamp <-colorRamp(div.pal)
pal <- break2pal(mids, mx, divRamp)
pdf("rel_dif_cicloAnual_sat_caer.pdf")
levelplot(rel_dif_cicloAnual_sat_caer, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
## Diferencia relativa con la simulacion cno
rel_dif_cicloAnual_sat_cno<- (SISm - rsdsnom)/SISm
rng <- range(rel_dif_cicloAnual_sat_cno[], na.rm=TRUE)
nInt <- 13
inc0 <- diff(rng)/nInt
n0 <- floor(abs(rng[1])/inc0)
inc <- abs(rng[1])/(n0+1/2)
n1 <- ceiling((rng[2]/inc-1/2)+1)
breaks <- seq(rng[1],by=inc,length=n0+1+n1)
idxx <- findInterval(rel_dif_cicloAnual_sat_cno[], breaks, rightmost.closed=TRUE)
mids <-tapply(rel_dif_cicloAnual_sat_cno[], idxx,median)
mx <- max(abs(breaks))
break2pal <- function(x,mx,pal){
y <- 1/2*(x/mx+1)
rgb(pal(y), maxColorValue=255)
}
divRamp <-colorRamp(div.pal)
pal <- break2pal(mids, mx, divRamp)
pdf("rel_dif_cicloAnual_sat_cno.pdf")
levelplot(rel_dif_cicloAnual_sat_cno, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
## los los ciclos anuales a la vez:
s <- stack(rel_dif_cicloAnual_sat_caer, rel_dif_cicloAnual_sat_cno)
rng <- range(s[], na.rm=TRUE)
nInt <- 13
inc0 <- diff(rng)/nInt
n0 <- floor(abs(rng[1])/inc0)
inc <- abs(rng[1])/(n0+1/2)
n1 <- ceiling((rng[2]/inc-1/2)+1)
breaks <- seq(rng[1],by=inc,length=n0+1+n1)
idxx <- findInterval(s[], breaks, rightmost.closed=TRUE)
mids <-tapply(s[], idxx,median)
mx <- max(abs(breaks))
break2pal <- function(x,mx,pal){
y <- 1/2*(x/mx+1)
rgb(pal(y), maxColorValue=255)
}
divRamp <-colorRamp(div.pal)
pal <- break2pal(mids, mx, divRamp)
pdf("rel_dif_cicloAnual_sat_caer_cno.pdf")
levelplot(s, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
## elimino valores por debajo de -1
s[s[] < -0.8] <- -0.8
pdf("rel_dif_cicloAnual_sat_caer_cnoFiltered.pdf")
levelplot(s, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
## FILTRO TAMBIÉN LAS COMPARACIONES POR SEPARADO:
rel_dif_cicloAnual_sat_cno[rel_dif_cicloAnual_sat_cno[] < -1] <- -1
pdf("rel_dif_cicloAnual_sat_cnoFiltered.pdf")
levelplot(rel_dif_cicloAnual_sat_cno, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
rel_dif_cicloAnual_sat_caer[rel_dif_cicloAnual_sat_caer[] < -1] <- -1
pdf("rel_dif_cicloAnual_sat_caerFiltered.pdf")
levelplot(rel_dif_cicloAnual_sat_caer, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
## Voy a calcular el bias de cada una de las simulaciones en el ciclo anual:
biasRsds <- SISm- rsdsm
biasRsdsno <- SISm - rsdsnom
desviacion <- biasRsds -biasRsdsno
rng <- range(desviacion[], na.rm=TRUE)
nInt <- 13
inc0 <- diff(rng)/nInt
n0 <- floor(abs(rng[1])/inc0)
inc <- abs(rng[1])/(n0+1/2)
n1 <- ceiling((rng[2]/inc-1/2)+1)
breaks <- seq(rng[1],by=inc,length=n0+1+n1)
idxx <- findInterval(desviacion[], breaks, rightmost.closed=TRUE)
mids <-tapply(desviacion[], idxx,median)
mx <- max(abs(breaks))
break2pal <- function(x,mx,pal){
y <- 1/2*(x/mx+1)
rgb(pal(y), maxColorValue=255)
}
divRamp <-colorRamp(div.pal)
pal <- break2pal(mids, mx, divRamp)
pdf("desviacion_CicloAnual_sat_caer_cno_20032009.pdf")
levelplot(desviacion, par.settings=rasterTheme(region=pal)) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
pdf("desviacion_CicloAnual_sat_caer_cno_20032009default.pdf")
levelplot(desviacion) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
desviacionrel <- (biasRsds -biasRsdsno)/SISm
rng <- range(desviacionrel[], na.rm=TRUE)
nInt <- 13
inc0 <- diff(rng)/nInt
n0 <- floor(abs(rng[1])/inc0)
inc <- abs(rng[1])/(n0+1/2)
n1 <- ceiling((rng[2]/inc-1/2)+1)
breaks <- seq(rng[1],by=inc,length=n0+1+n1)
idxx <- findInterval(desviacionrel[], breaks, rightmost.closed=TRUE)
mids <-tapply(desviacionrel[], idxx,median)
mx <- max(abs(breaks))
break2pal <- function(x,mx,pal){
y <- 1/2*(x/mx+1)
rgb(pal(y), maxColorValue=255)
}
divRamp <-colorRamp(div.pal)
pal <- break2pal(mids, mx, divRamp)
pdf("desviacionrel_CicloAnual_sat_caer_cno_20032009default.pdf")
levelplot(desviacionrel) +
## and the graticule
layer(sp.lines(grat)) +
layer(sp.text(coordinates(labsLon),
txt = parse(text = labsLon$lab),
adj = c(1.1, -0.25),
cex = 0.3)) +
layer(sp.text(coordinates(labsLat),
txt = parse(text = labsLat$lab),
adj = c(-0.25, -0.25),
cex = 0.3))
dev.off()
|
8695e2724951e7aeffe2ee072018d3be6179f2bf | 5e37ee5e60def89a5eb4c9e938994a88c15c55d8 | /Project1-ExploreVis/yabinfan_/ui.R | b3062882f8ee36b640d31f007b675f679421fbbc | [] | no_license | xiyuansun/bootcamp009_project | 2c079c94d2339a61397d5ea8765bf4bbc7a5f5b7 | 53bad9ea33d665db222bfa4a38a92580d811b53d | refs/heads/master | 2020-05-02T18:08:48.813998 | 2017-06-02T15:30:42 | 2017-06-02T15:30:42 | 178,120,031 | 1 | 0 | null | 2019-03-28T03:33:46 | 2019-03-28T03:33:45 | null | UTF-8 | R | false | false | 5,469 | r | ui.R | # This is the user-interface definition of a Shiny web application.
library(shiny)
library(shinythemes)
navbarPage(
title = 'NYC Movie Spot',
id = 'nav',theme = shinytheme("flatly"),
tabPanel(
'Interactive Map',
div(
class = "outer",
tags$head(# Include the custom CSS
includeCSS("styles.css"),
includeScript("gomap.js")),
#createthe map interface
leafletOutput("map", width =
"100%", height = "100%"),
# Shiny versions prior to 0.11 should use class="modal" instead.
absolutePanel(
id = "controls", class = "panel panel-default", fixed = FALSE,
draggable = TRUE, top = 10, left =30 , right ="auto" , bottom = "auto",
width = 300, height = "auto",
h2(img(src = "videocam.png", height = 40),
"Movies in NYC"),
checkboxGroupInput("Genres", h4(img(src = 'gen.png', height = 40), "Select the Genres:"),choices = Genres,
selected = Genres),
helpText("You can see more movie information by click the color circle on the map"),
sliderInput(
"Year",
h4("Year"),
min = 1945,
max = 2006,
value = c(1945, 2006)
),
sliderInput(
"Score",
h4("IMDB Score"),
min = 5.2,
max = 9.0,
value = c(5.2, 9.0)
)
),
absolutePanel(id = "controls", class = "panel panel-default", fixed = FALSE,
draggable = TRUE, top = 280, left ="auto" , right =20 , bottom = "auto",
width = 350, height = "auto",
plotOutput("yearbar",height = 200),
plotOutput("scorebar",height = 200)
),
# the origins of the dataset
tags$div(
id = "cite",
'Data was provide by ',
tags$em('New York City Office of Film, Theatre, and Broadcasting '),
' and Chuan Sun,who scraped data from the IMDB website'
)
)
),
tabPanel("Movie Explorer",
fluidRow(
column(3,
h2("NYC Movie Theme"),
br(),
br(),
sliderInput("rfreq",
h4("Minimum Frequency:"),
min = 1, max = 20, value = c(1,20)),
sliderInput("rmax",
h4("Maximum Number of Words:"),
min = 1, max = 200, value = c(1,200))),
column(9,
h3("What are the words the Directors use the most in the movie title,"),
h3("when they film the movies in NYC? "),
plotOutput("wordcloud",height=500)
)
)),
tabPanel(
"Find your Film",
fluidRow(column(3,
sliderInput(
"dn",
h4("Top n Directors"),
min = 1,
max = 50,
value = (1)
)
),
column(6, DT::dataTableOutput("tbl")))),
tabPanel("Documentation",fluidRow(column(4,h3(img(src = "videocam.png", height = 40),"Welcome to NYC Movie Spot"),br(),
p("The web application NYC Movie Spot is a tool designed to aid the visualization,
analysis and exploration of movies that have been filmed in New York City for the past
several decades. This app was built with R and Shiny and it is designed so that any movie
lovers can use it."),br(),
h4("Data Source:"),
p(a("NYC Open Data ", href= "https://data.cityofnewyork.us/Business/Filming-Locations-Scenes-from-the-City-/qb3k-n8mm")),
p(a("IMDB 5000", href= "https://www.kaggle.com/deepmatrix/imdb-5000-movie-dataset")),br(),
h4("About the Author"),
p("Author: Yabin Fan"),
p("Email: yfan19@jhu.edu"),
p("Linkedin:", a("Click Here", href = "https://www.linkedin.com/in/yabin-fan-626858105/")),br(),
p("Suggested citation: NYC Movie Locations Guide 2017:
A web application for the visualization and exploration of NYC movies,
Version 1.0, Yabin Fan")),
column(6,h2(img(src ="harry.png")))))
)
|
1e58bb685b4c921cd7b88c0e8dba23fdaa90c562 | bf833eb498ee0e4e7f285e4bdd1cda9b80dd1b6c | /Map/map.R | 8503f36f34b0e316123d4140a665333e31c0606d | [] | no_license | ppflrs/TM_Cyanophages | 45f6746319c28b76277360187d2868ac109493d2 | 583d721ac72b105d48762dc061d297fb3e4e920e | refs/heads/master | 2020-03-17T17:47:18.817847 | 2018-05-17T17:51:18 | 2018-05-17T17:51:18 | 133,802,036 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,432 | r | map.R | suppressMessages(library(dplyr))
library(tidyr)
library(maps)
library(ggplot2)
library(ggthemes)
#from the anvi'o summary MAGs-SUMMARY/bins_across_samples/relative_abundance.txt
df.abundance <- read.csv("./relative_abundance.txt", sep = "\t")
df.tara_metadata <- read.csv("../data/TARA_metadata.csv")[ ,c("dataset", "Latitude_Start", "Longitude_Start", "Station", "fraction")]
abundance <- gather(df.abundance,dataset,rel_abundance, -bins)
abundance <- inner_join(abundance, df.tara_metadata)
abundance <- abundance %>% filter(fraction != "GIRUS")
gg <- ggplot()
wrld <- map_data("world")
xlims = c(-155, 70)
ylims = c(-50, 50)
p <- ggplot()
p <- p + theme(panel.background = element_rect(fill =NA),
panel.border = element_rect(colour = "#000000",
size = 1,
linetype = "solid",
fill = NA),
axis.title = element_blank(),
axis.ticks.x = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_text(),
axis.ticks.y = element_line(),
legend.position="bottom",
legend.background = element_rect(fill="white", colour = "black"),
legend.key = element_rect(fill=NA))
#Draws the map and assigns background color for continents
p <-p + geom_polygon( data=wrld, aes(x=long, y=lat, group = group), colour="#4d4d4d", fill="#4d4d4d")#,colour="black", fill="black" )
#Plots negative stations
neg_map <- p + geom_point( data=abundance %>% filter(rel_abundance == 0),
shape = 21,
# colour = "#a7a7a7",
# fill = "#a7a7a7",
colour="black",fill="black",
size = 0.5,
aes(x=Longitude_Start, y=Latitude_Start)
)
#Add positive stations sized by rel_abundance
p <- neg_map + geom_point( data=abundance %>%
filter(rel_abundance > 0),
shape=21,
colour="#b21616", fill="#e84646",
aes(x=Longitude_Start, y=Latitude_Start, size=rel_abundance)
)
# create facetted plot by fraction
p <- p + facet_wrap(~fraction) + coord_quickmap() + theme_map()
p <- p + coord_fixed(xlim = xlims, ylim = ylims)
p
|
ba202396f1d0fb9c3094cc8aafd0fb54572721b2 | 61f8b925a36449c34e3aa98eac20e8b6f616b8ca | /DESeq2.R | 444c8fe3e6ad35ddd45b2b48fd20d23901206e9d | [] | no_license | collinsjw/Zcchc8-KO-pervasive-transcripts | 03107689bb385061e8ad21fc2e11666530756f5f | daf0251c18b61e813aaa4e6f99fec681f2c262ba | refs/heads/main | 2023-03-01T20:58:29.606666 | 2021-02-07T19:21:47 | 2021-02-07T19:21:47 | 333,975,915 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,602 | r | DESeq2.R | library(DESeq2)
library(tidyverse)
library(biomaRt)
library(RColorBrewer)
library(pheatmap)
################################################
# DESeq2 on features from SIMS Zcchc8 KO cells #
################################################
#Get list of raw read files assuming files are in working directory; set working directory as needed
setwd("/Your/raw/read/files/location/")
#Create raw count files if needed. If you already have a filtered raw count file then start below at line 51.
vm24.file.names <- list.files(getwd(), full.names = F)
#Create list of raw read files to be turned into dataframe
vm24.af <- lapply(vm24.file.names, read.table, sep = "\t", header = F)
vm24.af <- as.data.frame(vm24.af)
#Subset dataframe to create matrix for DESeq2. My data has 55475 observations and I don't need the first 4 rows.
vm24.af <- vm24.af[5:55475, c(1, seq(4, 76, by = 4))]
#Rename column names
#Start by creating vector of sample names to be used for new column names
wt <- paste0("WT", sprintf("%02.0f", 2:4))
bt <- paste0("Bt", sprintf("%02.0f", 1:4))
zc <- paste0("Zc", sprintf("%02.0f", 1:12))
samples <- c("gene_ID", bt, wt, zc)
#Rename columns. These are the raw counts.
vm24.af <- rename_at(vm24.af, vars(colnames(vm24.af)), ~samples)
write.table(vm24.af, file = "/Your/file/name", row.names = F, col.names = T, quote = F, sep = "\t")
#Filter dataframe for features with >5 reads in at least one of the samples.
vm24.af <- filter_all(vm24.af, any_vars(. > 5))
write.table(vm24.af, file = "/Your/file/name", row.names = F, col.names = T, quote = F, sep = "\t")
#Get filtered read count file or use the one you just created.
#Be sure and change the first row of IDs to row the row names or DESeq will not work properly.
rc <- read.delim(file = "/Filtered/raw/count/file", row.names = 1)
#Setup colData for DESeq2
#Prep data frame and groups
groups <- c(rep("BtKO", 4), rep("WT", 3), rep("ZcKO", 12))
setup <- data.frame(ensemble_id = colnames(rc), group = groups, row.names = 1, stringsAsFactors = F)
#Make DESeq2 data set
dds <- DESeqDataSetFromMatrix(countData = rc, colData = setup, design = ~group)
#Run DESeq2
dds <- DESeq(dds)
#Generate normalized counts dataframe
dds <- estimateSizeFactors(dds)
nc <- as.data.frame(counts(dds, normalized = T))
#Fetch results
ZvW_res <- results(dds, contrast = c("group", "ZcKO", "WT"))
ZvW_df <- as.data.frame(ZvW_res, stringsAsFactors = F)
ZvB_res <- results(dds, contrast = c("group", "ZcKO", "BtKO"))
ZvB_df <- as.data.frame(ZvB_res, stringsAsFactors = F)
BvW_res <- results(dds, contrast = c("group", "BtKO", "WT"))
BvW_df <- as.data.frame(BvW_res, stringsAsFactors = F)
#Annotate ensembl ids with gene names
mus = useMart("ENSEMBL_MART_ENSEMBL", dataset = "mmusculus_gene_ensembl")
gene_ids <- getBM(attributes = c("external_gene_name", "ensembl_gene_id_version", "description"),
filters = "ensembl_gene_id_version",
values = rownames(rc),
mart = mus)
ZvW_df <- merge(ZvW_df, gene_ids, by.x = 0, by.y = "ensembl_gene_id_version")
write.table(ZvW_df, file = "/Your/file/name", sep = "\t", col.names = T, row.names = F, quote = F)
ZvB_df <- merge(ZvB_df, gene_ids, by.x = 0, by.y = "ensembl_gene_id_version")
write.table(ZvB_df, file = "/Your/file/name", sep = "\t", col.names = T, row.names = F, quote = F)
BvW_df <- merge(BvW_df, gene_ids, by.x = 0, by.y = "ensembl_gene_id_version")
write.table(BvW_df, file = "/Your/file/name", sep = "\t", col.names = T, row.names = F, quote = F)
######################################## Multi Dimensional Scaling ########################################
rld <- varianceStabilizingTransformation(dds, blind=FALSE)
sampleDists <- dist(t(assay(rld)))
sampleDistMatrix <- as.matrix(sampleDists)
mds <- data.frame(cmdscale(sampleDistMatrix))
mds <- cbind(mds, as.data.frame(colData(rld)))
ggplot(mds, aes(X1,X2, color = group)) + geom_point(size=3)
######################################## Heatmaps ########################################
#Example of how to make heatmaps.
genes_nc <- as.matrix(nc)
#K-means clustering for heatmaps
clusters <- pheatmap(genes_nc, scale = "row", kmeans_k = 2)
names(clusters$kmeans)
clusterDF <- as.data.frame(factor(clusters$kmeans$cluster))
colnames(clusterDF) <- "Cluster"
OrderByCluster <- genes_nc[order(clusterDF$Cluster), ]
#custom heatmap colors
cust.color <- colorRampPalette(c("navy", "royalblue", "#c5c9c7", "whitesmoke", "firebrick", "red"))(n = 299)
pheatmap(OrderByCluster,
scale="row", show_rownames = FALSE, cluster_rows = FALSE, color = cust.color)
|
56c17bb0ce8303018bdb298b3c645270fa69449a | 286e3600ec082719bfcae28d47869add203bc875 | /install.R | 57786570eeac417463b365bc603e342c3cb867ed | [] | no_license | crazyhottommy/r | decec5afb75f572f78bdd9bae13438f11d32a2fe | b10f3b0a91f200741d84a5f8b14f5532f15db8e2 | refs/heads/master | 2020-03-22T01:02:20.154482 | 2018-10-23T21:09:15 | 2018-10-23T21:09:15 | 139,279,146 | 0 | 0 | null | 2018-06-30T20:51:24 | 2018-06-30T20:51:23 | null | UTF-8 | R | false | false | 151 | r | install.R | install.packages("tidyverse")
install.packages("rmarkdown")
install.packages('Seurat')
source("https://bioconductor.org/biocLite.R")
biocLite("SIMLR")
|
ace3effe22b99348d73330f640662dfff343753e | 445bff301e34df034e5066021d8e1a90d76125dd | /data-raw/dev-prep.R | 851fc026e9338c004b17ef1e2b9be7ac19f1537d | [] | no_license | p-will-b/tsydirectr | f70ba3cbe885e275118598986648b5fd280df1f6 | 89b390611a3770e597acf8201aeb0d71ba0fc21f | refs/heads/master | 2023-04-26T10:01:33.850319 | 2021-05-17T03:28:27 | 2021-05-17T03:28:27 | 368,036,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,276 | r | dev-prep.R | library(devtools)
library(usethis)
library(desc)
# CREDIT TO COLIN FAY FOR EASY SET UP :) https://colinfay.me/build-api-wrapper-package-r/
# Remove default DESC
unlink("DESCRIPTION")
# Create and clean desc
my_desc <- description$new("!new")
# Set your package name
my_desc$set("Package", "tsydirectr")
#Set your name
my_desc$set("Author", "person('p-will-b', role = c('cre', 'aut'))")
# Remove some author fields
my_desc$del("Maintainer")
# Set the version
my_desc$set_version("0.0.1")
# The title of your package
my_desc$set(Title = "tsydirectr")
# The description of your package
my_desc$set(Description = "An R wrapper for the Treasury Direct API.")
# The urls
my_desc$set("URL", "https://www.github.com/p-will-b/tsydirectr")
my_desc$set("BugReports", "http://www.github.com/p-will-b/tsydirectr/issues")
# Save everyting
my_desc$write(file = "DESCRIPTION")
# If you want to use the MIT licence, code of conduct, and lifecycle badge
use_mit_license(name = "p-will-b")
use_code_of_conduct()
use_lifecycle_badge("Experimental")
use_news_md()
# Get the dependencies
use_package("httr")
use_package("jsonlite")
use_package("curl")
use_package("attempt")
use_package("purrr")
use_package("dplyr")
use_package("stringr")
# Clean your description
use_tidy_description()
|
2cad80058346298f0de78ab9c3581961018a0fa2 | 67db7e4bb5e71ee1c8a0123deaa7568696dacfae | /R/BMDadj.r | 03d4c57b30c8bfb7e8724038232147f6dbf4c5f0 | [] | no_license | DoseResponse/medrc | ece93f90e18b578612f32d57c8636a933841153e | bc36df514ad68d6e3f29ec4c740a563605231819 | refs/heads/master | 2021-09-27T10:32:28.992866 | 2018-11-08T01:07:07 | 2018-11-08T01:07:07 | 106,538,359 | 12 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,939 | r | BMDadj.r | #' Adjusted ED response levels for BMD estimation based on medrc or glsdrc models
#'
#' Calculates adjusted response levels for estimation of the BMD
#'
#' @param object an medrc object
#' @param respLev a numeric vector containing the benchmark response levels
#' @param bmd benchmark dose estimation (smallest dose resulting in a probability of an abnormal response)
#' @param background probability of an abnormal response
#'
#' @keywords htest
BMDadjresp <- function (object, respLev, bmd = c("additional", "extra"), background = 0.05){
bmd <- match.arg(bmd)
if (bmd[1] == "extra")
respLev <- respLev * (1 - background)
lenRL <- length(respLev)
sapply(1:lenRL, function(i) {
cobj <- object$parmMat
prnames <- rownames(cobj)
if (class(object)[1] == "drc") {
prnames <- unique(object$parNames[[2]])
rownames(cobj) <- prnames
}
cvals <- cobj[prnames == "c", ]
if (length(cvals) == 0) {
cvals <- 0
}
dvals <- cobj[prnames == "d", ]
if (length(cvals) == 1 & length(dvals) > 1)
cvals <- rep(cvals, length(dvals))
if (length(dvals) == 1 & length(cvals) > 1)
dvals <- rep(dvals, length(cvals))
if (any(cvals > dvals)) {
tempd <- apply(cbind(cvals, dvals), 1, function(x) sort(x))
cvals <- tempd[1, ]
dvals <- tempd[2, ]
}
if (class(object)[1] == "medrc") {
varcorr <- VarCorr(object)
return(100 * (qnorm(1 - background) - qnorm(1 - (background + respLev[i]/100))) * as.numeric(varcorr[attr(varcorr, "dimnames")[[1]] %in% "Residual", 2])/(dvals - cvals))
}
if (class(object)[1] == "glsdrc") {
return(100 * (qnorm(1 - background) - qnorm(1 - (background + respLev[i]/100))) * object$fit$sigma/(dvals - cvals))
}
if (class(object)[1] == "drc") {
return(100 * (qnorm(1 - background) - qnorm(1 - (background + respLev[i]/100))) * summary(object)$rseMat[1, 1]/(dvals - cvals))
}
})
} |
f91f253973963028c290c09e165f578d128fceef | bce305270dc536fd905897be0612e19970a7f6d0 | /Main_EDA.R | 821aabc35d03e627680682156413760bb57f242a | [] | no_license | bpawlow/Fantasy-Football-Analysis | 4999890199e966b2712d3e2b6b6334faba0b5e26 | 72a1ff87fdc2c6a569c593f9bc37951011130772 | refs/heads/master | 2023-02-10T19:23:28.192701 | 2021-01-04T06:00:40 | 2021-01-04T06:00:40 | 323,184,448 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,783 | r | Main_EDA.R | # Loaded Packages & Other R Scripts----
library(tidyverse)
library(modelr)
library(splines)
### Loading Data Collection R Script & Data Cleaning R Script
source("data_collection.R")
source("data_cleaning.R")
source("Overall_rankings_EDA.R")
# Fantasy Talent for NFL Teams Over The Last 5 Years----
#For QBs, RBs, WRs, and TEs based on total fantasy points
team_fantasy_prod <- complete_ff_data %>%
filter(fant_pos == "RB" | fant_pos == "Wr" |
fant_pos == "TE" | fant_pos == "QB", year > 2014) %>%
mutate(
fant_pt = replace_na(fant_pt, 0)
) %>%
group_by(tm, year) %>%
summarize(
total_ff_pts = sum(fant_pt)
) %>%
filter(tm != "2TM" & tm != "3TM")
team_fantasy_prod %>%
ggplot(aes(x = year, y = total_ff_pts)) +
geom_smooth(size = 1) +
facet_wrap(~tm , scales="free")
#Assessing trends of growth or decay using various models
#Teams selected based on apparent visual patterns:
#BAL (Exponential trend)
#CIN, DAL, HOU, MIA, MIN, NOR, NWE, PHI, SFO, WAS (Linear trends)
#Linear trends
team_fantasy_prod %>%
filter(tm == "CIN" | tm == "DAL" | tm == "HOU" | tm == "MIA" | tm == "MIN" |
tm == "NOR" | tm == "NWE" | tm == "PHI" | tm == "SFO" | tm == "WAS") %>%
ggplot(aes(x = year, y = total_ff_pts)) +
geom_point(size = 1) +
facet_wrap(~tm, scales = "free") +
stat_smooth(method = "lm", col = "red")
#Exponential Trend with BAL
bal_fantasy_prod <- team_fantasy_prod %>%
filter(tm == "BAL") %>%
mutate(log_ff_pts = log2(total_ff_pts))
model <- lm(log_ff_pts ~ year, data = bal_fantasy_prod)
grid <- bal_fantasy_prod %>%
data_grid(year) %>%
add_predictions(model, "log_ff_pts") %>%
mutate(total_ff_pts = 2 ^ log_ff_pts)
ggplot(bal_fantasy_prod, aes(x = year, y = total_ff_pts)) +
geom_smooth(size = 1) +
geom_line(data = grid, colour = "red", size = 1)
bal_fantasy_prod %>%
add_residuals(model) %>%
ggplot(aes(year, resid)) +
geom_line()
### Scratch Work ###
# #Fit a loess model to BAL data
# exp_mod <- loess(total_fp_pts ~ year, data = bal_fantasy_prod)
#
# #Add predictions and residuals to BAL data
# mod_data <- bal_fantasy_prod %>%
# add_predictions(model = exp_mod, var = "pred_loess") %>%
# add_residuals(exp_mod, "resid_loess")
#
# mod_data %>%
# ggplot(aes(x = year)) +
# geom_line(aes(y = total_fp_pts), size = 1) +
# geom_line(aes(y = pred_loess), color = "red")
#The top 10 teams sustaining the most top-25 players, in total at each position
#(duplicate players included)
#Over the last 20 years
complete_ff_data %>%
filter(pos_rank <= 25) %>%
group_by(tm) %>%
summarize(top25_players = n()) %>%
unique() %>%
mutate(avg_num_players = top25_players / 20) %>%
arrange(desc(top25_players)) %>%
head(10) %>%
ggplot(aes(x = reorder(tm, top25_players), y = top25_players)) +
geom_bar(stat="identity", width=.5, fill="blue") +
coord_flip()
#Over the last 5 years
complete_ff_data %>%
filter(pos_rank <= 25, year > 2014) %>%
group_by(tm) %>%
summarize(top25_players = n()) %>%
unique() %>%
mutate(avg_num_players = top25_players / 5) %>%
arrange(desc(top25_players)) %>%
head(10) %>%
ggplot(aes(x = reorder(tm, top25_players), y = top25_players)) +
geom_bar(stat="identity", width=.5, fill="cornflowerblue") +
coord_flip()
#Worst teams for fantasy production over the last 20 years
complete_ff_data %>%
filter(pos_rank <= 25, tm != "2TM" & tm != "3TM") %>%
group_by(tm) %>%
summarize(top25_players = n()) %>%
mutate(avg_num_players = top25_players / 20) %>%
unique() %>%
arrange(top25_players) %>%
head(10)
#Worst teams for fantasy production over the last 5 years
complete_ff_data %>%
filter(pos_rank <= 25, year > 2014, tm != "2TM" & tm != "3TM") %>%
group_by(tm) %>%
summarize(top25_players = n()) %>%
mutate(avg_num_players = top25_players / 5) %>%
unique() %>%
arrange(top25_players) %>%
head(10)
### Scratch Work ###
# View(complete_ff_data %>%
# filter(fant_pos == "RB" | fant_pos == "Wr" | fant_pos == "TE") %>%
# mutate(
# fant_pt = replace_na(fant_pt, 0)
# ) %>%
# group_by(tm, year) %>%
# summarize(
# total_fp_pts = sum(fant_pt)
# ) %>%
# filter(tm != "2TM" & tm != "3TM")) %>%
# head(80) %>%
# ggplot(aes(x = year, y = total_fp_pts, color = tm)) +
# geom_line(size = 1)
#
# complete_ff_data %>%
# filter(fant_pos == "RB" | fant_pos == "Wr" | fant_pos == "TE") %>%
# mutate(
# fant_pt = replace_na(fant_pt, 0)
# ) %>%
# group_by(tm, year) %>%
# summarize(
# total_fp_pts = sum(fant_pt)
# ) %>%
# filter(tm != "2TM" & tm != "3TM") %>%
# slice(81:160) %>%
# ggplot(aes(x = year, y = total_fp_pts, color = tm)) +
# geom_line(size = 1)
#
#
# View(complete_ff_data %>%
# filter(fant_pos == "RB" | fant_pos == "Wr" | fant_pos == "TE") %>%
# group_by(tm, year) %>%
# summarize(
# total_fp_pts = sum(fant_pt)
# ) %>%
# filter(tm != "2TM" & tm != "3TM"))
#
# View(complete_ff_data %>%
# filter(fant_pos == "RB" | fant_pos == "Wr" | fant_pos == "TE") %>%
# mutate(
# tm = str_replace_all(tm, "STL", "LAR"),
# fant_pt = replace_na(fant_pt, 0)
# ) %>%
# mutate(
# tm = str_replace_all(tm, "SDG", "LAC")
# ) %>%
# group_by(tm, year) %>%
# summarize(
# total_fp_pts = sum(fant_pt)
# ))
# Player Analysis and Possible Trends----
# Assessing fantasy potential of young players
top_young_players <- complete_ff_data %>%
filter(year >= 2018, age <= 24) %>%
group_by(ply_code) %>%
summarize(
player = player,
fant_pos = fant_pos,
tot_games = sum(g),
total_ff_pts = sum(fant_pt),
avg_ff_pts = total_ff_pts / tot_games
) %>%
unique() %>%
drop_na() %>%
group_by(fant_pos) %>%
slice_max(order_by = total_ff_pts, n = 5) %>%
select(-ply_code)
top_young_players %>%
filter(fant_pos == "QB")
top_young_players %>%
filter(fant_pos == "RB")
top_young_players %>%
filter(fant_pos == "WR")
top_young_players %>%
filter(fant_pos == "TE")
#Most Risky QBs (most interceptions and fumbles)
complete_ff_data %>%
filter(year >= 2015, fant_pos == "QB") %>%
group_by(ply_code) %>%
summarize(player = player,
tot_games = sum(g),
tot_turnovers = sum(fmb) + sum(int),
avg_tos_per_game = tot_turnovers / tot_games) %>%
unique() %>%
ungroup(ply_code) %>%
select(-ply_code) %>%
arrange(desc(tot_turnovers)) %>%
head(10) %>%
ggplot(aes(x = reorder(player, tot_turnovers), y = tot_turnovers)) +
geom_bar(stat="identity", width=.5, fill="tomato3") +
coord_flip()
# Positional Trends----
#QB Production
complete_ff_data %>%
filter(vbd > 0, fant_pos == "QB") %>%
ggplot(aes(x = year, y = fant_pt)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
#Are more QBs rushing than before?
complete_ff_data %>%
filter(vbd > 0, fant_pos == "QB") %>%
ggplot(aes(x = year, y = rush_yds)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
#Are more QBs passing than before?
complete_ff_data %>%
filter(vbd > 0, fant_pos == "QB") %>%
ggplot(aes(x = year, y = pass_yds)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
#QBs are passing more!
#RB Production
complete_ff_data %>%
filter(vbd > 0, fant_pos == "RB") %>%
ggplot(aes(x = year, y = fant_pt)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
#Are RBs racking up more receiving yards?
complete_ff_data %>%
filter(vbd > 0, fant_pos == "RB") %>%
ggplot(aes(x = year, y = rec_yds)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
#WR Production
complete_ff_data %>%
filter(vbd > 0, fant_pos == "WR") %>%
ggplot(aes(x = year, y = fant_pt)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
#For PPR leagues (Points Per Reception additional scoring)
complete_ff_data %>%
filter(vbd > 0, fant_pos == "WR") %>%
ggplot(aes(x = year, y = ppr)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
#TE Production
complete_ff_data %>%
filter(vbd > 0, fant_pos == "TE") %>%
ggplot(aes(x = year, y = fant_pt)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
complete_ff_data %>%
filter(vbd > 0, fant_pos == "TE") %>%
ggplot(aes(x = year, y = rec_yds)) +
geom_point(size = 1) +
geom_jitter() +
stat_smooth(method = "lm", col = "red")
#Receiving vs rushing for fantasy production
#rushing
complete_ff_data %>%
filter(rush_yds > 500) %>%
ggplot(aes(x = rush_yds, y = fant_pt)) +
geom_point(size = 1) +
stat_smooth(method = "lm", col = "red")
#receiving
complete_ff_data %>%
filter(rec_yds > 500) %>%
ggplot(aes(x = rec_yds, y = fant_pt)) +
geom_point(size = 1) +
stat_smooth(method = "lm", col = "blue")
#Highest average VBD for top 10 players, at each position, over time
data1 <- complete_ff_data %>%
group_by(fant_pos, year) %>%
slice_max(order_by = vbd, n = 15) %>%
filter(fant_pos == "RB" || fant_pos == "WR")
data2 <- complete_ff_data %>%
group_by(fant_pos, year) %>%
slice_max(order_by = vbd, n = 10) %>%
filter(fant_pos == "QB" || fant_pos == "TE")
vbd_data <- bind_rows(data1, data2)
vbd_data %>%
group_by(year, fant_pos) %>%
summarize(avg_vbd = mean(vbd, trim = 0.1)) %>%
ggplot(aes(x = year, y = avg_vbd)) +
geom_smooth(aes(color = fant_pos), size = 1, se = FALSE)
#CONCLUSION: RBs are the most valuable to target during fantasy drafts (non-ppr)
|
983037eb9477af96c5aa976396501fad14da2334 | cb32d40d1f6f8ad1109e3e8c6f992bba5d12e00c | /R/dictset.R | 1594e2c1c6cc40e5281067c0172efbce8ca8bb4e | [] | no_license | wangtengyao/dictset | c75652fe14a0c90889cd46dc771f2a98ae79f1ab | 26570066f4f7f00a0f0581754dd89ce71b367d80 | refs/heads/master | 2022-08-28T13:27:27.717225 | 2020-05-25T12:52:39 | 2020-05-25T12:52:39 | 266,431,067 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,101 | r | dictset.R | #' dictset: An implementation for Dictionary and Set data types.
#'
#' @docType package
#' @name dictset
#'
#' @import stats
#' @import digest
#' @import utils
NULL
#' Check whether input is string
#' @param x
#' @return boolean for whether x is string
#' @export
isString <- function(x){
is.character(x) && (length(x) == 1)
}
#' Compute MD5 of R object
#' @param obj R object
#' @return if obj is a string, return itself, otherwise return its md5 value
#' @export
key_hash <- function(obj) ifelse(isString(obj), obj, digest::digest(obj))
#' Check if Dictionary/Set is empty
#' @param x Dictionary or Set
#' @return boolean
#' @export
isEmpty <- function(x, ...) UseMethod('isEmpty')
#' Check if Dictionary/Set contains an element
#' @param x Dictionary or Set
#' @return boolean
#' @export
contains <- function(x, ...) UseMethod('contains')
#' Get an element from a Dictionary/Set
#' @param x Dictionary or Set
#' @return element value
#' @export
get <- function(x, ...) UseMethod('get')
#' put element to Dictionary/Set
#' @param x Dictionary or Set
#' @return invisible Dictionary/Set after change
#' @export
put <- function(x, ...) UseMethod('put')
#' Copy a Dictionary/Set
#' @param x Dictionary or Set
#' @return a new copy
#' @export
copy <- function(x, ...) UseMethod('copy')
#' Output all keys in a dictionary
#' @param x Dictionary
#' @return all keys
#' @export
keys <- function(x, ...) UseMethod('keys')
#' Output all values in Dictionary/Set
#' @param x Dictionary or Set
#' @return all values
#' @export
vals <- function(x, ...) UseMethod('vals')
#' Remove an element from Dictionary/Set and get its value
#' @param x Dictionary or Set
#' @return value of popped element
#' @export
pop <- function(x, ...) UseMethod('pop')
#' Short string description of an R object
#' @param x R object
#' @return a short string description
#' @export
toStr <- function(x){
if (is.vector(x)){
if (length(x)==1){
if (is.character(x)) return(paste0('"', x, '"')) else return(toString(x))
} else {
return(paste0(class(x), '(', length(x), ')'))
}
} else if (is.matrix(x)){
return(paste0(class(x[1,1]), '(', dim(x)[1], ', ', dim(x)[2], ')'))
} else if (is.atomic(x)){
return(class(x))
} else {
address <- substring(capture.output(.Internal(inspect(x)))[1],2,17)
return(paste0('<',class(x),': 0x', sub(' .*', '', address),'>'))
}
}
########### Dictionary ##############
#' Constructor for the Dictionary class
#' @param keys a list/vector of keys, can be of any class, and do not need to be
#' the same class.
#' @param vals a list/vector of values, can be of any class and do not need to
#' be the same class.
#' @return a pointer to a Dictionary object initialised with keys and vals
#' @export
Dictionary <- function(keys=NULL, vals=NULL){
if (length(keys) != length(vals))
stop('The length of keys must be equal to the length of vals.')
dict <- structure(new.env(hash=TRUE), class='Dictionary')
for (i in seq_along(keys)){
put(dict, keys[[i]], vals[[i]])
}
return(invisible(dict))
}
#' Check if dictionary is empty
#' @param x A Dictionary object
#' @return boolean
#' @export
isEmpty.Dictionary <- function(dict){
return(length(dict)==0)
}
#' Check if dictionary contains a specific key
#' @param x A Dictionary object
#' @param key query key
#' @return boolean
#' @export
contains.Dictionary <- function(dict, key){
key_hash <- key_hash(key)
return(key_hash %in% names(dict))
}
#' Returns value associated with a specific key
#' @param dict A Dictionary object
#' @param key query key
#' @return value associated with the query key
#' @export
get.Dictionary <- function(dict, key){
key_hash <- key_hash(key)
if (!(key_hash %in% names(dict))) {
stop('No such key contained in dictionary')
}
return(dict[[key_hash]]$val)
}
#' put a key/value pair to the dictionary
#' @param dict A Dictionary object
#' @param key key to be added
#' @param val associated value
#' @param overwrite whether to overwrite if key already exists in dict
#' @return a pointer to the updated Dictionary object
#' @details the dictionary is updated in place
#' @export
put.Dictionary <- function(dict, key, val, overwrite=TRUE){
key_hash <- key_hash(key)
if (overwrite | !(key_hash %in% names(dict))) {
dict[[key_hash]] <- list(key=key, val=val)
}
return(invisible(dict))
}
#' copy a dictionary to another
#' @param dict Dictionary object to be copied
#' @return a clone of dict
#' @export
copy.Dictionary <- function(dict){
ret <- structure(new.env(hash=TRUE), class='Dictionary')
for (name in ls(dict)){
ret[[name]] <- dict[[name]]
}
return(ret)
}
#' Remove all entries in a Dictionary object
#' @param dict A Dictionary object
#' @return the resulting dictionary with all entries removed
#' @export
clear.Dictionary <- function(dict){
rm(list=ls(envir=dict), envir=dict)
return(invisible(dict))
}
#' Return a list of keys in dictionary
#' @param dict A Dictionary object
#' @return a list of keys in dict
#' @export
keys.Dictionary <- function(dict){
sapply(ls(dict), function(x) dict[[x]]$key, USE.NAMES=F)
}
#' Return a list of values in dictionary
#' @param dict A Dictionary object
#' @return a list of values in dict
#' @export
vals.Dictionary <- function(dict){
sapply(ls(dict), function(x) dict[[x]]$val, USE.NAMES=F)
}
#' Removes the element with the specified key
#' @param dict A Dictionary object
#' @param key query key
#' @return value associated with query key
#' @details entry removal in dict is achieved as a side effect
#' @export
pop.Dictionary <- function(dict, key=NULL){
if (is.null(key)){
key_hashes <- names(dict)
if (length(key_hashes) == 0) stop('Dictionary is empty.')
key_hash <- key_hashes[1]
} else {
key_hash <- key_hash(key)
}
if (!(key_hash %in% names(dict))) stop('No such key contained in dictionary')
val <- dict[[key_hash]]$val
rm(list = key_hash, envir=dict)
return(val)
}
#' Printing methods for the 'Dictionary' class
#' @param x object of the 'Dictionary' class
#' @param ... other arguments used in \code{print}
#' @export
print.Dictionary <- function(x, ...){
hashed_keys <- names(x)
omission <- FALSE
if (length(hashed_keys) > 10) {
hashed_keys <- head(hashed_keys, 10)
omission <- TRUE
}
cat('[')
for (h in hashed_keys){
key = x[[h]]$key
val = x[[h]]$val
cat('\n key = ', toStr(key), '; val = ', toStr(val), sep='')
}
if (omission) cat('\n ...\n]\n') else cat('\n]\n')
}
########### Set ##############
#' Constructor for the Set class
#' @param elements a list/vector of elements, can be of any class, and do not
#' need to be the same class.
#' @return a pointer to a Set object initialised with elements
#' @export
Set <- function(elements=NULL){
set <- structure(new.env(hash=TRUE), class='Set')
for (i in seq_along(elements)){
put(set, elements[[i]])
}
return(invisible(set))
}
#' Check if dictionary is empty
#' @param x A Dictionary object
#' @return boolean
#' @export
isEmpty.Set <- function(set){
return(length(set)==0)
}
#' Check if set contains a specific elements
#' @param set A Set object
#' @param element query element
#' @return boolean
#' @export
contains.Set <- function(set, element){
key_hash <- key_hash(element)
return(key_hash %in% names(set))
}
#' put an element to the set
#' @param set A Set object
#' @param element element to be added
#' @return a pointer to the updated Set object
#' @export
put.Set <- function(set, element){
key_hash <- key_hash(element)
if (!(key_hash %in% names(set))) set[[key_hash]] <- element
return(invisible(set))
}
#' copy a set to another
#' @param set Set object to be copied
#' @return a clone of set
#' @export
copy.Set <- function(set){
ret <- structure(new.env(hash=TRUE), class='Set')
for (name in ls(set)){
ret[[name]] <- set[[name]]
}
return(ret)
}
#' Remove all entries in a Set object
#' @param set A Set object
#' @return the resulting set with all entries removed
#' @export
clear.Set <- function(set){
rm(list=ls(envir=set), envir=set)
return(invisible(set))
}
#' Return a list of elements in set
#' @param set A Set object
#' @return a list of elements in set
#' @export
vals.Set <- function(set){
sapply(ls(set), function(x) set[[x]], USE.NAMES=F)
}
#' Removes an element from the set
#' @param set A Set object
#' @return an element
#' @export
pop.Set <- function(set){
key_hashes <- names(set)
if (length(key_hashes) == 0) stop('Set is empty.')
key_hash <- key_hashes[1]
element <- set[[key_hash]]
rm(list = key_hash, envir=set)
return(element)
}
#' Printing methods for the 'Set' class
#' @param x object of the 'Set' class
#' @param ... other arguments used in \code{print}
#' @export
print.Set <- function(x, ...){
hashed_keys <- names(x)
omission <- FALSE
if (length(hashed_keys) > 10) {
hashed_keys <- head(hashed_keys, 10)
omission <- TRUE
}
cat('{')
for (h in hashed_keys){
key = x[[h]]
cat('\n ', toStr(key), sep='')
}
if (omission) cat('\n ...\n}\n') else cat('\n}\n')
}
|
1345dfba172a327d97a33f49a5c00b2520f952ee | 01b1302af51d339f7c8827a620c4a5fb26c890f1 | /synthesis2020/02_synthesis_absorption_prep.R | 22b8eb71749e27116a4869900aa892aab690c73d | [] | no_license | ihmeuw/gf | 64ab90fb5a5c49694bde1596f4b20fcf107a76e3 | 29e0c530b86867d5edd85104f4fe7dcb1ed0f1ee | refs/heads/develop | 2021-08-15T02:16:59.086173 | 2021-08-03T19:52:31 | 2021-08-03T19:52:31 | 109,062,373 | 3 | 6 | null | 2019-03-21T01:48:02 | 2017-10-31T23:17:16 | R | UTF-8 | R | false | false | 23,633 | r | 02_synthesis_absorption_prep.R | #############################################################
##Title: pce_synthesis_absorption_data
##Purpose: Prepping absorption data for synthesis report for all IHME/PATH PCE countries
##Author: Matthew Schneider
##Date: 10/30/2020, last updated and reran - 1/28/21
##Input Files:
## - C:\Users\mts24\Box Sync\Global Fund Files\tableau_data
## \budgetRevisions_with_frBudgets_activityLevel.csv
##Output Files:
## 1. draft_synthesis_absorption_quant.xlsx - absorption for NFM2 and NFM3 total, rssh, hrg-equity across all IHME/PATH countries
#############################################################
rm(list = ls()) #clear memory
if (Sys.info()[1] == "Linux"){
j <- "/home/j"
h <- paste0("/homes/",Sys.info()[7])
s <- paste0("/share/resource_tracking/phc/data/nha/int/")
f <- paste0("/share/resource_tracking/phc/data/nha/fin/")
k <- paste0("/ihme/cc_resources/libraries/")
}else if (Sys.info()[1] == "Windows"){
c <- "C:"
j <- "J:"
h <- "H:"
k <- "K:"
}
if (Sys.info()[1] == "Linux"){
#load libraries
.libPaths(c("/share/code/mts24",.libPaths()))
#install.packages(c("brms", "bayesplot","rstanarm","fastDummies","mipfp"),lib = "/share/code/mts24", INSTALL_opts = c('--no-lock'))
library(fastDummies, lib.loc = "/share/code/mts24")
library(readstata13)
library(data.table)
library(dplyr)
library(parallel)
library(doParallel)
library(feather)
library(reshape2)
library(foreach)
library(readxl)
library(ggplot2)
}else if (Sys.info()[1] == "Windows"){
pacman::p_load(readstata13, magrittr,
ISwR,data.table, devtools, ggplot2, ggpubr,
plyr, dplyr, parallel,
fastDummies, reshape2, readxl,xlsx,
dependencies = TRUE)
}
#path to save files
user = as.character(Sys.info()[7])
path <- paste0("C:/Users/",user,"/Box Sync/Global Fund Files/synthesis/data")
##reading in latest budget data - includes NFM2 funding requests, approved for grant making budgets, all revisions, and
## NFM3 funding requests and grant making budgets
##this dataset if budgets down to activities and cost categories
all_abs_data <- fread(input = paste0(c,"/Users/", user, "/Box Sync/Global Fund Files/tableau_data/all_absorption.csv")) #absorption by intervention and grant for each year (semester)
##need to update RSSH interventions we are considering also part of HRG-Equity
rssh_equity_int <- c("Supportive policy and programmatic environment")
all_abs_data[gf_module=="Community responses and systems" |
gf_module=="Community systems strengthening", equity:="TRUE"]
all_abs_data[gf_intervention=="Supportive policy and programmatic environment", equity:="TRUE"]
all_abs_data <- all_abs_data[grant_period!="2016-2019"]
##creating an indicator variable for grants that are Government PRs or Non-Government PRs
unique(all_abs_data[,c("iso3","grant_disease","pr"):=tstrsplit(grant,"-")])
all_abs_data[, pr_type:="NGO"]
all_abs_data[pr %in% c("MOH", "MoFPED", "MSPAS","PNLP","CNLS"), pr_type:="Government"]
##reportetd cumulative budget data
##notice that Uganda has an updated PUDR and is getting double counted (either need to drop newest data or older data as to not double count)
cum_abs_data <- fread(input = paste0(c,"/Users/", user, "/Box Sync/Global Fund Files/tableau_data/cumulative_absorption.csv")) #cumulcative (first 2 years for most grants) absorption by intervention and grant
cum_abs_data <- cum_abs_data[end_date=="2019-12-31"]
#########################################################################
##creating average absorption across countries and grants for end of year 1 and end of year 2
##this created object doesn't include Uganda MoFPED grant for the first year due to reporting
mofped <- all_abs_data[(grant=="UGA-H-MoFPED" | grant=="UGA-M-MoFPED" | grant=="UGA-T-MoFPED") & (semester=="Semester 1" | semester=="Semester 2")]
mofped_1 <- mofped[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name")] #created the semester 1-2 expenditure and budget for Mofped grants
mofped_1[,semester:="Semester 1-2"]
mofped_1_module <- mofped[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","gf_module")] #created the semester 1-2 expenditure and budget for Mofped grants
mofped_1_module[,semester:="Semester 1-2"]
abs_yr12 <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6"),.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester")]
abs_yr12 <- rbind(abs_yr12,mofped_1) #appending Uganda's MoFPED semester 1-2 budget and expenditures to other funds to then sum
abs_yr12 <- abs_yr12[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester")]
abs_yr12[,absorption:=expenditure/budget]
abs_yr12 <- abs_yr12[order(loc_name,semester),c("loc_name","semester","budget","expenditure","absorption")]
##saving absolute expenditure, budget, and absorption at end of each year (different from PUDR reported cumulative absorption)
write.xlsx2(abs_yr12, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "Absorption by cntry year",
col.names = TRUE, row.names = TRUE, append = FALSE)
##The same calculation but including a breakdown by PR Types - Governmental or NGO
mofped <- all_abs_data[(grant=="UGA-H-MoFPED" | grant=="UGA-M-MoFPED" | grant=="UGA-T-MoFPED") & (semester=="Semester 1" | semester=="Semester 2")]
mofped_pr <- mofped[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","pr_type")] #created the semester 1-2 expenditure and budget for Mofped grants
mofped_pr[,semester:="Semester 1-2"]
abs_yr12_pr <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6"),.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","pr_type")]
abs_yr12_pr <- rbind(abs_yr12_pr,mofped_pr) #appending Uganda's MoFPED semester 1-2 budget and expenditures to other funds to then sum
abs_yr12_pr <- abs_yr12_pr[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","pr_type")]
abs_yr12_pr[,absorption:=expenditure/budget]
abs_yr12_pr <- abs_yr12_pr[order(loc_name,semester),c("loc_name","pr_type","semester","budget","expenditure","absorption")]
##saving absolute expenditure, budget, and absorption at end of each year (different from PUDR reported cumulative absorption)
write.xlsx2(abs_yr12_pr, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "Absorption by cntry year pr",
col.names = TRUE, row.names = TRUE, append = TRUE)
abs_yr12_module <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6"),.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","gf_module")]
abs_yr12_module <- rbind(abs_yr12_module,mofped_1_module) #appending Uganda's MoFPED semester 1-2 budget and expenditures to other funds to then sum
abs_yr12_module <- abs_yr12_module[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","gf_module")]
abs_yr12_module[,absorption:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
abs_yr12_module <- abs_yr12_module[order(loc_name,gf_module,semester),c("loc_name","gf_module","semester","budget","expenditure","absorption")]
write.xlsx2(abs_yr12_module, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "Absorption by cntry year module",
col.names = TRUE, row.names = TRUE, append = TRUE)
###################################################################################################################
##caculating cumulative absorption from the reported cumulative expenditure and budgeted data reported within PURDs
cum_abs_module <- cum_abs_data[,.(cumulative_budget=sum(cumulative_budget),cumulative_expenditure=sum(cumulative_expenditure)),by=c("loc_name","gf_module")]
cum_abs_module[,cumulative_absorption:=cumulative_expenditure/cumulative_budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
cum_abs_country_grant <- cum_abs_data[,.(cumulative_budget=sum(cumulative_budget),cumulative_expenditure=sum(cumulative_expenditure)),by=c("loc_name","grant")]
cum_abs_country_grant[,cumulative_absorption:=cumulative_expenditure/cumulative_budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
##
##need to calcualte cumulative absorption for DRC's M-MOH grant as it isn't not provided in the PUDRs
abs_yr12_grant <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6"),.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","grant")]
abs_yr12_grant[,absoprtion:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
cod_m_moh_12_abs <- abs_yr12_grant[grant=="COD-M-MOH"]
cod_m_moh_cum_abs <- cod_m_moh_12_abs[,.(cumulative_budget=sum(budget),cumulative_expenditure=sum(expenditure)),by=c("loc_name","grant")]
cod_m_moh_cum_abs[,cumulative_absorption:=cumulative_expenditure/cumulative_budget]
##appending the calcualted cumulative absorption for COD-M-MOH
cum_abs_country_grant_codfix <- rbind(cum_abs_country_grant[grant!="COD-M-MOH"],cod_m_moh_cum_abs)
##Cumlative absorption by country - with the cumulative absorption for the COD-M-MOH grant calculated based on end of year 1 and 2
cum_abs_country_codfix <- cum_abs_country_grant_codfix[,.(cumulative_budget=sum(cumulative_budget),cumulative_expenditure=sum(cumulative_expenditure)),by=c("loc_name")]
cum_abs_country_codfix[,cumulative_absorption:=cumulative_expenditure/cumulative_budget]
#########################################################################
##RSSH
##creating average absorption across countries and grants for end of year 1 and end of year 2
##this created object doesn't include Uganda MoFPED grant for the first year due to reporting
mofped_rssh <- all_abs_data[(grant=="UGA-H-MoFPED" | grant=="UGA-M-MoFPED" | grant=="UGA-T-MoFPED") & (semester=="Semester 1" | semester=="Semester 2") & rssh=="TRUE"]
mofped_1_rssh <- mofped_rssh[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name")] #created the semester 1-2 expenditure and budget for Mofped grants
mofped_1_rssh[,semester:="Semester 1-2"]
abs_yr12_rssh <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6") & rssh=="TRUE",.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester")] #this drops the MoFPED PUDR data that is seperated 1 and 2, which is then appended below
abs_yr12_rssh <- rbind(abs_yr12_rssh,mofped_1_rssh) #appending Uganda's MoFPED semester 1-2 budget and expenditures to other funds to then sum
abs_yr12_rssh <- abs_yr12_rssh[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester")]
abs_yr12_rssh[,absorption:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
abs_yr12_rssh <- abs_yr12_rssh[order(loc_name,semester),c("loc_name","semester","budget","expenditure","absorption")]
###################
write.xlsx2(abs_yr12_rssh, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "RSSH Absorption by cntry year",
col.names = TRUE, row.names = TRUE, append = TRUE)
################################################
##RSSH doing the same calculations by modules
###############################################
mofped_rssh_module <- all_abs_data[(grant=="UGA-H-MoFPED" | grant=="UGA-M-MoFPED" | grant=="UGA-T-MoFPED") & (semester=="Semester 1" | semester=="Semester 2") & rssh=="TRUE"]
mofped_1_rssh_module <- mofped_rssh_module[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","gf_module")] #created the semester 1-2 expenditure and budget for Mofped grants
mofped_1_rssh_module[,semester:="Semester 1-2"]
abs_yr12_rssh_module <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6") & rssh=="TRUE",.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","gf_module")]
abs_yr12_rssh_module <- rbind(abs_yr12_rssh_module,mofped_1_rssh_module) #appending Uganda's MoFPED semester 1-2 budget and expenditures to other funds to then sum
abs_yr12_rssh_module <- abs_yr12_rssh_module[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","gf_module")]
abs_yr12_rssh_module[,absorption:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
abs_yr12_rssh_module <- abs_yr12_rssh_module[order(loc_name,gf_module,semester),c("loc_name","gf_module","semester","budget","expenditure","absorption")]
##################################################
write.xlsx2(abs_yr12_rssh_module, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "RSSH Absorption by cntry year module",
col.names = TRUE, row.names = TRUE, append = TRUE)
abs_yr12_rssh_module_all <- abs_yr12_rssh_module[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("semester","gf_module")]
abs_yr12_rssh_module_all[,absoprtion:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
#creating a unique list of RSSH interventions to capture these fromt the cumulative absorption reported
rssh_modules <- data.table(unique(all_abs_data[rssh=="TRUE",gf_module]))
colnames(rssh_modules) <- "gf_module"
rssh_interventions <- data.table(unique(all_abs_data[rssh=="TRUE",gf_intervention]))
colnames(rssh_interventions) <- "gf_intervention"
###################################################################################################################
##caculating cumulative absorption from the reported cumulative expenditure and budgeted data reported within PURDs
cum_abs_module_rssh <- cum_abs_data[,.(cumulative_budget=sum(cumulative_budget),cumulative_expenditure=sum(cumulative_expenditure)),by=c("loc_name","gf_module")]
cum_abs_module_rssh <- merge(cum_abs_module_rssh,rssh_modules, by = "gf_module")
cum_abs_module_rssh[,cumulative_absoprtion:=cumulative_expenditure/cumulative_budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
cum_abs_country_grant_rssh <- cum_abs_data[,.(cumulative_budget=sum(cumulative_budget),cumulative_expenditure=sum(cumulative_expenditure)),by=c("loc_name","grant","gf_module")]
cum_abs_country_grant_rssh <- merge(cum_abs_country_grant_rssh,rssh_modules, by = "gf_module")
cum_abs_country_grant_rssh <- cum_abs_country_grant_rssh[,.(cumulative_budget=sum(cumulative_budget),cumulative_expenditure=sum(cumulative_expenditure)),by=c("loc_name","grant")]
cum_abs_country_grant_rssh[,cumulative_absoprtion:=cumulative_expenditure/cumulative_budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
##
##need to calcualte RSSH cumulative absorption for DRC's M-MOH grant as it isn't not provided in the PUDRs
abs_yr12_grant_rssh <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6") & rssh=="TRUE",.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","grant")]
abs_yr12_grant_rssh[,absoprtion:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
cod_m_moh_12_abs_rssh <- abs_yr12_grant_rssh[grant=="COD-M-MOH"]
cod_m_moh_cum_abs_rssh <- cod_m_moh_12_abs_rssh[,.(cumulative_budget=sum(budget),cumulative_expenditure=sum(expenditure)),by=c("loc_name","grant")]
cod_m_moh_cum_abs_rssh[,cumulative_absoprtion:=cumulative_expenditure/cumulative_budget]
##appending the calcualted cumulative absorption for COD-M-MOH
cum_abs_country_grant_codfix_rssh <- rbind(cum_abs_country_grant_rssh[grant!="COD-M-MOH"],cod_m_moh_cum_abs_rssh)
##Cumlative absorption by country - with the cumulative absorption for the COD-M-MOH grant calculated based on end of year 1 and 2
cum_abs_country_codfix_rssh <- cum_abs_data[,.(cumulative_budget=sum(cumulative_budget),cumulative_expenditure=sum(cumulative_expenditure)),by=c("loc_name")]
#########################################################################
##HRG-Equity
##creating average absorption across countries and grants for end of year 1 and end of year 2
##this created object doesn't include Uganda MoFPED grant for the first year due to reporting
mofped_equity <- all_abs_data[(grant=="UGA-H-MoFPED" | grant=="UGA-M-MoFPED" | grant=="UGA-T-MoFPED") & (semester=="Semester 1" | semester=="Semester 2") & equity=="TRUE"]
mofped_1_equity <- mofped_equity[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name")] #created the semester 1-2 expenditure and budget for Mofped grants
mofped_1_equity[,semester:="Semester 1-2"]
abs_yr12_equity <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6") & equity=="TRUE",.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester")] #this drops the MoFPED PUDR data that is seperated 1 and 2, which is then appended below
abs_yr12_equity <- rbind(abs_yr12_equity,mofped_1_equity) #appending Uganda's MoFPED semester 1-2 budget and expenditures to other funds to then sum
abs_yr12_equity <- abs_yr12_equity[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester")]
abs_yr12_equity[,absoprtion:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
abs_yr12_equity <- abs_yr12_equity[order(loc_name,semester),c("loc_name","semester","budget","expenditure","absorption")]
###################
write.xlsx2(abs_yr12_equity, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "HRG-E Absorption by cntry year",
col.names = TRUE, row.names = TRUE, append = TRUE)
################################################
##equity doing the same calculations by modules
###############################################
mofped_equity_module <- all_abs_data[(grant=="UGA-H-MoFPED" | grant=="UGA-M-MoFPED" | grant=="UGA-T-MoFPED") & (semester=="Semester 1" | semester=="Semester 2") & equity=="TRUE"]
mofped_1_equity_module <- mofped_equity_module[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","gf_module")] #created the semester 1-2 expenditure and budget for Mofped grants
mofped_1_equity_module[,semester:="Semester 1-2"]
abs_yr12_equity_module <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6") & equity=="TRUE",.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","gf_module")]
abs_yr12_equity_module <- rbind(abs_yr12_equity_module,mofped_1_equity_module) #appending Uganda's MoFPED semester 1-2 budget and expenditures to other funds to then sum
abs_yr12_equity_module <- abs_yr12_equity_module[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","gf_module")]
abs_yr12_equity_module[,absorption:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
abs_yr12_equity_module <- abs_yr12_equity_module[order(loc_name,gf_module,semester),c("loc_name","gf_module","semester","budget","expenditure","absorption")]
##################################################
write.xlsx2(abs_yr12_equity_module, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "HRG-E Absorption by cntry year module",
col.names = TRUE, row.names = TRUE, append = TRUE)
################################################
##equity doing the same calculations by categories HR, KP, other
###############################################
#making categories for HRG-Equity
##generating indicator variable for modules and interventions that the GF CRG count as "Opt-In" activities for Human Rights
hr_modules <- c("Programs to reduce human rights-related barriers to HIV services","Reducing human rights-related barriers to HIV/TB services",
"Removing human rights and gender related barriers to TB services")
hr_interventions <- c("Addressing stigma","Removing human rights") #this will catch all interventions with the phrase "Addressing stigma"
all_abs_data[gf_module %in% hr_modules,crg_hr:= "TRUE"]
all_abs_data[gf_intervention %like% hr_interventions[1],crg_hr:= "TRUE"]
all_abs_data[gf_intervention %like% hr_interventions[2],crg_hr:= "TRUE"]
all_abs_data[is.na(crg_hr),crg_hr:= "FALSE"]
table(all_abs_data$crg_hr) #this identified 60 interventions across our 4 countries
mofped_equity_int <- all_abs_data[(grant=="UGA-H-MoFPED" | grant=="UGA-M-MoFPED" | grant=="UGA-T-MoFPED") & (semester=="Semester 1" | semester=="Semester 2") & equity=="TRUE"]
mofped_1_equity_int <- mofped_equity_int[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","crg_hr","kp")] #created the semester 1-2 expenditure and budget for Mofped grants
mofped_1_equity_int[,semester:="Semester 1-2"]
abs_yr12_equity_int <- all_abs_data[(semester=="Semester 1-2" | semester=="Semester 3-4" | semester=="Semester 5" | semester=="Semester 5-6") & equity=="TRUE",.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","crg_hr","kp")]
abs_yr12_equity_int <- rbind(abs_yr12_equity_int,mofped_1_equity_int) #appending Uganda's MoFPED semester 1-2 budget and expenditures to other funds to then sum
abs_yr12_equity_int <- abs_yr12_equity_int[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","semester","crg_hr","kp")]
abs_yr12_equity_int[,absorption:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
abs_yr12_equity_int[crg_hr=="TRUE",label:="HRG Funds"]
abs_yr12_equity_int[crg_hr=="FALSE" & kp=="TRUE",label:="KP Funds"]
abs_yr12_equity_int[crg_hr=="FALSE" & kp=="FALSE",label:="Other Vulnerable Populations & \n HRG-Equity Realted Investments"]
abs_yr12_equity_int <- abs_yr12_equity_int[order(loc_name,label,semester),c("loc_name","label","semester","budget","expenditure","absorption")]
##collapsing across semesters
abs_yr12_equity_int_collapsed <- abs_yr12_equity_int[,.(budget=sum(budget),expenditure=sum(expenditure)),by=c("loc_name","label")]
abs_yr12_equity_int_collapsed[,absorption:=expenditure/budget] #Guatemala seems a little strange - different grant periods - consider excluding certain grants - speak with Francisco
##################################################
write.xlsx2(abs_yr12_equity_int, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "HRG-E Absorption by cntry yr cats",
col.names = TRUE, row.names = TRUE, append = TRUE)
write.xlsx2(abs_yr12_equity_int_collapsed, file = paste0(path,"/draft_synthesis_absorption_quant.xlsx"), sheetName = "HRG-E Absorption by cntry cats",
col.names = TRUE, row.names = TRUE, append = TRUE)
|
76df78a3d4353f314f9d850d66c64f4054f67aef | 782f25f7a5651fd9300469f2a56d5be78648ff35 | /man/occQuery.Rd | 5fd15dc6f9cda0086f7e5dd7d91326862b12ed2f | [] | no_license | hannahlowens/BridgeTree | f026ba4affff1fd9e4b6328a06ba81d4caed24f3 | 891f973360f35261aca1a966cdfd5b2489e7c83a | refs/heads/master | 2021-06-04T16:58:46.418470 | 2018-10-05T18:08:37 | 2018-10-05T18:08:37 | 95,147,814 | 3 | 1 | null | 2018-10-04T13:49:24 | 2017-06-22T19:05:57 | R | UTF-8 | R | false | true | 1,233 | rd | occQuery.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/occQuery.R
\name{occQuery}
\alias{occQuery}
\title{Query from Taxon List}
\usage{
occQuery(x = NULL, datasources = "gbif", GBIFLogin = NULL,
options = NULL)
}
\arguments{
\item{x}{An object of class \code{\link{bridgeTreeData}} (the results of a \code{\link{studyTaxonList}} search).}
\item{datasources}{A vector of occurrence datasources to search. This is currently limited to GBIF, but may expand in the future.}
\item{GBIFLogin}{An object of class \code{\link{GBIFLogin}} to log in to GBIF to begin the download.}
\item{options}{A vector of options to pass to \code{\link{occ_download}}.}
}
\value{
The object of class \code{\link{bridgeTreeData}} supplied by the user as an argument, with occurrence data search results, as well as metadata on the occurrence sources queried.
}
\description{
Takes rectified list of specimens from \code{\link{studyTaxonList}} and returns point data from \code{\link{rgbif}} with metadata.
}
\examples{
## PLACEHOLDER
studyTaxonList(x = phylogeny, datasources = c('NCBI', 'EOL'));
## PLACEHOLDER
studyTaxonList(x = c("Buteo buteo", "Buteo buteo hartedi", "Buteo japonicus"), datasources = c('NCBI', 'EOL'));
}
|
b206e21cda8ebeec21aca07d99e8ae4e232c30c8 | 31d977aa21cec52b6e0a158e504a0ff887a6e6cb | /main.r | 6bc3b87458dac1c00d2bf8f9028b1a48c1f08cda | [] | no_license | DataSystemsGroupUT/auto_feature_engineering | f0e4c15daaca5608ed7fbdd0aaee49946fff1fe2 | 35a1f3760f6edef2fb13bdd90514144555331e1b | refs/heads/master | 2023-01-02T17:14:08.377143 | 2020-10-20T12:19:02 | 2020-10-20T12:19:02 | 279,294,250 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 726 | r | main.r | library(autofeat)
#dataf <- read.csv("data/gina.csv")
#dataf <- read.csv("data/lymphoma_2classes.csv")
#dataf <- read.csv("data/banknote.csv")
#dataf <- read.csv("data/micro-mass.csv")
#dataf <- read.csv("data/dbworld-bodies.csv")
#dataf <- read.csv("data/nomao.csv")
dataf <- read.csv("input.csv")
y <- factor(dataf$Class)
X <- data.matrix(dataf[ , !(names(dataf) %in% c("Class"))])
i <- sample(1:nrow(X), round(0.3 * nrow(X)))
X_train <- X[i,]
y_train <- y[i]
X_valid <- X[-i,]
y_valid <- y[-i]
#SAFE(x,y,x,y,alpha=0.00001,theta=1)
res <- SAFE(X_train, y_train, X_valid, y_valid)
#res <- SAFE(X, y, X, y)
new_X <- cbind(res$X_train, class = y_train)
write.csv(new_X, "test.csv" ,row.names=FALSE)
print("Done!")
|
791903609fbafa5341efae8b801d6993a440c265 | ad5617166a62559da24178872de79b0347cffafe | /R/tt_load.R | f5d7154c6cb5f91ea297cdfdda7de4eb9a5e968b | [
"MIT"
] | permissive | thecodemasterk/tidytuesdayR | 5f1e80a1c583e4bb5074825549cf0c295ce456ac | 2949ea8763acae374b83998376bafa953b236c69 | refs/heads/master | 2022-11-11T20:28:35.138065 | 2020-06-29T15:44:54 | 2020-06-29T15:44:54 | 276,115,944 | 1 | 0 | null | 2020-06-30T14:03:11 | 2020-06-30T14:03:10 | null | UTF-8 | R | false | false | 2,001 | r | tt_load.R | #' @title Load TidyTuesday data from Github
#'
#' @param x string representation of the date of data to pull, in YYYY-MM-dd
#' format, or just numeric entry for year
#' @param week left empty unless x is a numeric year entry, in which case the
#' week of interest should be entered
#' @param download_files which files to download from repo. defaults and
#' assumes "All" for the week.
#' @param ... pass methods to the parsing functions. These will be passed to
#' ALL files, so be careful.
#' @param auth github Personal Access Token. See PAT section for more
#' information
#'
#' @section PAT:
#' A Github PAT is a personal Access Token. This allows for signed queries to
#' the github api, and increases the limit on the number of requests allowed
#' from 60 to 5000. Follow instructions from
#' <https://happygitwithr.com/github-pat.html> to set the PAT.
#'
#' @return tt_data object, which contains data that can be accessed via `$`,
#' and the readme for the weeks tidytuesday through printing the object or
#' calling `readme()`
#'
#' @importFrom purrr map
#'
#' @examples
#'
#' # check to make sure there are requests still available
#' if(rate_limit_check(quiet = TRUE) > 10){
#'
#' tt_output <- tt_load("2019-01-15")
#' tt_output
#' agencies <- tt_output$agencies
#'
#' }
#'
#' @export
tt_load <-
function(x,
week,
download_files = "All",
...,
auth = github_pat()) {
## check internet connectivity and rate limit
if (!get_connectivity()) {
check_connectivity(rerun = TRUE)
if (!get_connectivity()) {
message("Warning - No Internet Connectivity")
return(NULL)
}
}
## Check Rate Limit
if (rate_limit_check() == 0) {
return(NULL)
}
# download readme and identify files
tt <- tt_load_gh(x, week, auth = auth)
#download files
tt_data <- tt_download(tt, files = download_files, ... , auth = auth)
## return tt_data object
structure(
tt_data,
".tt" = tt,
class = "tt_data"
)
}
|
a4910e45d398ea5de34f9b6ad213d843bb8d2860 | 409490d9da29446f5fb1672eab7e774731554785 | /man/list.insert.Rd | 7704f45cd8cebadf511f1968fe425276fbb9d71a | [
"MIT"
] | permissive | timelyportfolio/rlist | 8004c472fb6835182773d4458c9d604cb03795a3 | d3299cec59c36f9295493feea3d53d21278a8a2a | refs/heads/master | 2020-11-30T23:33:33.408653 | 2014-08-07T16:28:24 | 2014-08-07T16:28:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 607 | rd | list.insert.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{list.insert}
\alias{list.insert}
\title{Insert a series of lists at the given index}
\usage{
list.insert(.data, index, ...)
}
\arguments{
\item{.data}{\code{list}}
\item{index}{The index at which the lists are inserted}
\item{...}{A group of lists}
}
\description{
Insert a series of lists at the given index
}
\examples{
\dontrun{
x <- list(p1 = list(type="A",score=list(c1=10,c2=8)),
p2 = list(type="B",score=list(c1=9,c2=9)),
p3 = list(type="B",score=list(c1=9,c2=7)))
list.if(x,2,p2.1=list(type="B",score=list(c1=8,c2=9)))
}
}
|
d7c967ff86d56d36680a15f2bdcaa30d8e3029cb | b34be5c99b32277e1e56168e780e674f1b4edd85 | /man/getPredRate-methods.Rd | bcb823d55f987d931499215e950c124d3b86e427 | [] | no_license | szuwalski/mizer | a0f812eb4beac161c1cd4792f12c5f15e7a1f438 | 7893216b74c946eada267ff8b1c5b67315528a9b | refs/heads/master | 2021-01-17T04:59:59.925275 | 2014-07-29T06:47:59 | 2014-07-29T06:47:59 | 35,045,198 | 0 | 1 | null | 2015-05-04T16:12:53 | 2015-05-04T16:12:53 | null | UTF-8 | R | false | false | 1,551 | rd | getPredRate-methods.Rd | \docType{methods}
\name{getPredRate}
\alias{getPredRate}
\alias{getPredRate,MizerParams,matrix,numeric,matrix-method}
\alias{getPredRate,MizerParams,matrix,numeric,missing-method}
\alias{getPredRate-method}
\title{getPredRate method for the size based model}
\arguments{
\item{object}{A \code{MizerParams} object.}
\item{n}{A matrix of species abundance (species x size).}
\item{n_pp}{A vector of the background abundance by
size.}
\item{feeding_level}{The current feeding level
(optional). A matrix of size no. species x no. size bins.
If not supplied, is calculated internally using the
\code{getFeedingLevel()} method.}
}
\value{
A three dimensional array (predator species x predator
size x prey size)
}
\description{
Calculates the predation rate of each predator species at
size on prey size. This method is used by the
\code{\link{project}} method for performing simulations.
In the simulations, it is combined with the interaction
matrix (see \code{\link{MizerParams}}) to calculate the
realised predation mortality (see \code{\link{getM2}}).
}
\examples{
\dontrun{
data(NS_species_params_gears)
data(inter)
params <- MizerParams(NS_species_params_gears, inter)
# With constant fishing effort for all gears for 20 time steps
sim <- project(params, t_max = 20, effort = 0.5)
# Get the feeding level at one time step
n <- sim@n[21,,]
n_pp <- sim@n_pp[21,]
getPredRate(params,n,n_pp)
}
}
\seealso{
\code{\link{project}}, \code{\link{getM2}},
\code{\link{getFeedingLevel}} and
\code{\link{MizerParams}}
}
|
8d39aa9025a863e7100a42aac250f0423c270cac | c15e4a1fdb0e59f56c3b3ef82a20cf0cfc7f2c39 | /man/check_load_packages.Rd | ce2ecc91386bc0684ebf5cff488029041c74ce68 | [] | no_license | sheejamk/IPDFileCheck | 05f93b5faebb6a839edb0da230420efb44072cac | e66e3c7b895ce9455348b9d86ef31e12e9942823 | refs/heads/master | 2022-08-28T04:25:38.462930 | 2022-08-13T19:37:57 | 2022-08-13T19:37:57 | 235,594,136 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 478 | rd | check_load_packages.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IPDFilecheck.R
\name{check_load_packages}
\alias{check_load_packages}
\title{Function to check the package is installed, if not install}
\usage{
check_load_packages(pkg)
}
\arguments{
\item{pkg}{name of package(s)}
}
\value{
0, if packages cant be installed and loaded, else error
}
\description{
Function to check the package is installed, if not install
}
\examples{
check_load_packages("dplyr")
}
|
5cb5923a68657a261a45691a9c4a209647b29d45 | b46bf09a29b075814ff4a0bec715dae885642e35 | /Assignment1.R | aed95d1a594f6a99552e87f6c06ab4b9054b3ac1 | [] | no_license | mt42190n/Mary-Data-Acq | 89f6c4d9cd05d427686f8fe7fc42a4d2f470e237 | 7e2d859c1fc1062174c958343daeb6c7a4e700df | refs/heads/master | 2016-09-06T18:07:56.161025 | 2014-10-20T03:43:07 | 2014-10-20T03:43:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 125 | r | Assignment1.R | # R version 3.1.1 and R studio version 0.98.1049
# PostgreSQL version 9.3.5
# Number of observations in data(sales) is 401146 |
2172d00d1c0649e856629287138ff3832ad8cc69 | 3b7b46ff89c6e09564ee59bffdf506d6c235ec50 | /man/white.Rd | 93a859291be77dee53fbb9fed80be8e06096c74b | [] | no_license | JoFrhwld/phoneticChange | 788598dd6ebaabc709bb08e1e006532ddabcc520 | cb649882698511cce2d8850be8748c2dfb8a2304 | refs/heads/master | 2016-09-05T11:55:13.100817 | 2015-06-11T16:41:59 | 2015-06-11T16:41:59 | 24,945,476 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 693 | rd | white.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{white}
\alias{white}
\title{Basic Demographic Info from the PNC}
\format{A data frame with 326 rows
\describe{
\item{sex}{speaker sex}
\item{year}{year of interview}
\item{ethnicity}{single character code for reported ethnicity}
\item{schooling}{highest educational attainment}
\item{transcribed}{how many seconds of transcribed speech}
\item{total}{total duration of recording}
\item{nvowels}{number of vowels measured}
\item{idstring}{unique idstring for a speaker}
}}
\source{
Philadelphia Neighborhood Corpus
}
\usage{
white
}
\description{
Basic Demographic Info from the PNC
}
\keyword{datasets}
|
75ade8a55b499ef612bfc25e1a0ef7ea78efe021 | f14e3a8823d00a12c25493ff12a7303c8f8fe305 | /man/gof.Rd | 739583b264ec614ef9a1cb38ef7c97c5468eef0a | [] | no_license | cran/rtrim | 6565c068369efec0e9867b5fe397a641eb859638 | 80239e3f7cbeb66b9540284eed9fa1bd946d4666 | refs/heads/master | 2020-06-17T16:12:35.734973 | 2020-04-21T11:20:02 | 2020-04-21T11:20:02 | 74,989,195 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,348 | rd | gof.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trim_gof.R
\name{gof}
\alias{gof}
\alias{gof.trim}
\title{Extract TRIM goodness-of-fit information.}
\usage{
gof(x)
\method{gof}{trim}(x)
}
\arguments{
\item{x}{an object of class \code{\link{trim}} (as returned by \code{\link{trim}})}
}
\value{
a list of type "trim.gof", containing elements \code{chi2}, \code{LR}
and \code{AIC}, for Chi-squared, Likelihoof Ratio and Akaike informatiuon content,
respectively.
}
\description{
\code{\link{trim}} computes three goodness-of-fit measures:
\itemize{
\item Chi-squared
\item Likelihood ratio
\item Akaike Information content
}
}
\examples{
data(skylark)
z <- trim(count ~ site + time, data=skylark, model=2)
# prettyprint GOF information
gof(z)
# get individual elements, e.g. p-value
L <- gof(z)
LR_p <- L$LR$p # get p-value for likelihood ratio
}
\seealso{
Other analyses:
\code{\link{coef.trim}()},
\code{\link{confint.trim}()},
\code{\link{index}()},
\code{\link{now_what}()},
\code{\link{overall}()},
\code{\link{overdispersion}()},
\code{\link{plot.trim.index}()},
\code{\link{plot.trim.overall}()},
\code{\link{results}()},
\code{\link{serial_correlation}()},
\code{\link{summary.trim}()},
\code{\link{totals}()},
\code{\link{trim}()},
\code{\link{vcov.trim}()},
\code{\link{wald}()}
}
\concept{analyses}
|
1491697d77d1380e3b0565ba090e1aec09da09ce | cb2ce134ea2aa8b913b4aea2edc7b545e5066528 | /clean_ames.R | 81d19b3ab13f258b48fd5d0c45befacb8c933b24 | [] | no_license | ehcastroh/Housing-Price_Prediction | 290d273b3e26d04c0cfe938c8cd44596943c8f9b | 45bc4fadf253c5f1c16464ca792c866f1eb957f1 | refs/heads/master | 2021-09-05T13:03:44.946205 | 2018-01-27T22:04:47 | 2018-01-27T22:04:47 | 110,748,865 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,540 | r | clean_ames.R | ###----------------------------###
# title: "IEOR 142 Group Project"
# author: "Elias Casto Hernandez"
# date: "November 2017"
# purpose: Perform clean up and convert monthly
# Ames response variables to Quarterly response(s)
###----------------------------###
### Load Libraries and Packages ###
#install.packages("tidyverse")
library(tidyverse)
# read data set (note: previously cleaned)
ames <- read.csv("qtr_ames.csv")
#convert time sold to factor and data frame to tibble
ames$YrSold <- as.factor(ames$YrSold)
ames$MoSold <- as.factor(ames$MoSold)
qtr_ames <- as_tibble(ames)
# convert monthly prices to quarterly averages
mean_qa <- function(x, y, m){
qa_y <- x %>% filter(YrSold %in% c(y))
qb_y <- qa_y %>% filter(MoSold %in% c(m))
qc_y <- mean(qb_y$SalePrice)
}
# function calls
q01_y06 <- mean_qa(qtr_ames, y = "2006",m = c("1","2","3"))
q02_y06 <- mean_qa(qtr_ames, y = "2006",m = c("4","5","6"))
q03_y06 <- mean_qa(qtr_ames, y = "2006",m = c("7","8","9"))
q04_y06 <- mean_qa(qtr_ames, y = "2006",m = c("10","11","12"))
q01_y07 <- mean_qa(qtr_ames, y = "2007",m = c("1","2","3"))
q02_y07 <- mean_qa(qtr_ames, y = "2007",m = c("4","5","6"))
q03_y07 <- mean_qa(qtr_ames, y = "2007",m = c("7","8","9"))
q04_y07 <- mean_qa(qtr_ames, y = "2007",m = c("10","11","12"))
q01_y08 <- mean_qa(qtr_ames, y = "2008",m = c("1","2","3"))
q02_y08 <- mean_qa(qtr_ames, y = "2008",m = c("4","5","6"))
q03_y08 <- mean_qa(qtr_ames, y = "2008",m = c("7","8","9"))
q04_y08 <- mean_qa(qtr_ames, y = "2008",m = c("10","11","12"))
q01_y09 <- mean_qa(qtr_ames, y = "2009",m = c("1","2","3"))
q02_y09 <- mean_qa(qtr_ames, y = "2009",m = c("4","5","6"))
q03_y09 <- mean_qa(qtr_ames, y = "2009",m = c("7","8","9"))
q04_y09 <- mean_qa(qtr_ames, y = "2009",m = c("10","11","12"))
q01_y10 <- mean_qa(qtr_ames, y = "2010",m = c("1","2","3"))
q02_y10 <- mean_qa(qtr_ames, y = "2010",m = c("4","5","6"))
q03_y10 <- mean_qa(qtr_ames, y = "2010",m = c("7","8","9"))
q04_y10 <- mean_qa(qtr_ames, y = "2010",m = c("10","11","12"))
# row bind quartely averages
qtr_resp_ames <- rbind(q01_y06, q02_y06, q03_y06, q04_y06,
q01_y07, q02_y07, q03_y07, q04_y07,
q01_y08, q02_y08, q03_y08, q04_y08,
q01_y09, q02_y09, q03_y09, q04_y09,
q01_y10, q02_y10, q03_y10, q04_y10)
# write out to disk, include row names, and ommit NaN's
write.csv(qtr_resp_ames, file = "Ames Quarterly Responses.csv", row.names=TRUE, na="")
|
2f78eec8bee3bffb29daf322e47b99347b8c2765 | a5be19737a57491c0dfbe41d068558542b2d5e10 | /R/type_2.R | 6b066bfcad29230d49ae0525c24308e7863093bf | [] | no_license | cran/jordan | 02ed2dfc77ae1c2d23b7b37016a24d019ce6ee87 | 576f96a2d484e0f60d1a451a465ea6a7984c4380 | refs/heads/master | 2023-03-29T00:43:24.948503 | 2021-04-08T10:00:02 | 2021-04-08T10:00:02 | 355,965,712 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,941 | r | type_2.R | ## complex Hermitian matrices ("chm"); setClass("complex_herm_matrix") in 'aaa_allclasses.R'
`complex_herm_matrix` <- function(M){new("complex_herm_matrix",x=cbind(M))} # this is the only place new("real_symmetric_matrix",...) is called
`is.complex_herm_matrix` <- function(x){inherits(x,"complex_herm_matrix")}
`r_to_n_chm` <- function(r){sqrt(r)}
`n_to_r_chm` <- function(n){n^2}
`is_ok_chm` <- function(r){ # 'r' = number of rows in [rowwise] matrix
jj <- sqrt(r)
if(jj == round(jj)){
return(jj) # size of nxn complex hermitian matrix
} else {
stop("not correct")
}
}
`valid_chm` <- function(object){
x <- object@x
if(!is.numeric(x)){
return("not numeric")
} else if(!is.matrix(x)){
return("not a matrix")
} else if(is_ok_chm(nrow(x)) < 0){
return("must have appropriate size")
} else {
return(TRUE)
}
}
setValidity("complex_herm_matrix", valid_chm)
`as.complex_herm_matrix` <- function(x,d,single=FALSE){ # single modelled on as.onion()
if(is.complex_herm_matrix(x)){
return(x)
} else if(is.matrix(x)){
return(complex_herm_matrix(x))
} else if(is.vector(x)){
if(single){
return(complex_herm_matrix(x))
} else {
return(numeric_to_complex_herm_matrix(x,d)) # problem! we do not know how big it is
}
} else {
stop("not recognised")
}
}
`numeric_to_complex_herm_matrix` <- function(x,d){stop("no unique coercion")}
`rchm` <- function(n=3,d=5){complex_herm_matrix(matrix(round(rnorm(n*(d*d)),2),ncol=n))}
`chm_id` <- function(n,d){as.complex_herm_matrix(kronecker(chm1_to_vec(diag(nrow=d)),t(rep(1,n))))}
`vec_to_chm1` <- function(x){
r <- length(x)
n <- sqrt(r)
stopifnot(n==round(n))
out <- matrix(0i,n,n)
out[upper.tri(out,FALSE)] <- x[(n+1):(n*(n+1)/2)] + 1i*x[(n*(n+1)/2+1):(n^2)]
out <- out + ht(out)
diag(out) <- x[seq_len(n)]
return(out) # complex hermitian matrix
}
`chm1_to_vec` <- function(M){
c(
Re(diag(M)),
Re(M[upper.tri(M,FALSE)]),
Im(M[upper.tri(M,FALSE)])
)
}
`vec_chmprod_vec` <- function(x,y){
x <- vec_to_chm1(x)
y <- vec_to_chm1(y)
chm1_to_vec((cprod(x,y)+cprod(y,x))/2)
}
setMethod("as.1matrix","complex_herm_matrix",function(x,drop=TRUE){
out <- lapply(seq_along(x), function(i){x[i,drop=TRUE]})
if((length(x)==1) & drop){out <- out[[1]]}
return(out)
} )
`chm_prod_chm` <- function(e1,e2){
jj <- harmonize_oo(e1,e2)
out <- jj[[1]]*0
for(i in seq_len(ncol(out))){
out[,i] <- vec_chmprod_vec(jj[[1]][,i],jj[[2]][,i])
}
return(as.jordan(out,e1))
}
`chm_inverse` <- function(e1){
out <- as.matrix(e1)
for(i in seq_len(ncol(out))){
out[,i] <- chm1_to_vec(solve(e1[i,drop=TRUE])) # the meat
}
return(as.jordan(out,e1))
}
`chm_power_numeric` <- function(e1,e2){
jj <- harmonize_oo(e1,e2)
out <- jj[[1]]*0
for(i in seq_len(ncol(out))){
out[,i] <- chm1_to_vec(mymatrixpower(vec_to_chm1(jj[[1]][,i]),jj[[2]][i])) # the meat
}
return(as.jordan(out,e1))
}
`chm_arith_chm` <- function(e1,e2){
switch(.Generic,
"+" = jordan_plus_jordan(e1, e2),
"-" = jordan_plus_jordan(e1,jordan_negative(e2)),
"*" = chm_prod_chm(e1, e2),
"/" = chm_prod_chm(e1, chm_inverse(e2)),
"^" = stop("chm^chm not defined"),
stop(paste("binary operator \"", .Generic, "\" not defined for chm"))
)
}
`chm_arith_numeric` <- function(e1,e2){
switch(.Generic,
"+" = jordan_plus_numeric(e1, e2),
"-" = jordan_plus_numeric(e1,-e2),
"*" = jordan_prod_numeric(e1, e2),
"/" = jordan_prod_numeric(e1, 1/e2),
"^" = chm_power_numeric(e1, e2),
stop(paste("binary operator \"", .Generic, "\" not defined for chm"))
)
}
`numeric_arith_chm` <- function(e1,e2){
switch(.Generic,
"+" = jordan_plus_numeric(e2, e1),
"-" = jordan_plus_numeric(-e2,e1),
"*" = jordan_prod_numeric(e2, e1),
"/" = jordan_prod_numeric(chm_inverse(e2),e1),
"^" = jordan_power_jordan(e2, e1),
stop(paste("binary operator \"", .Generic, "\" not defined for chm"))
)
}
setMethod("Arith",signature(e1 = "complex_herm_matrix", e2="missing"),
function(e1,e2){
switch(.Generic,
"+" = e1,
"-" = jordan_negative(e1),
stop(paste("Unary operator", .Generic,
"not allowed on chm objects"))
)
} )
setMethod("Arith",signature(e1="complex_herm_matrix",e2="complex_herm_matrix"), chm_arith_chm )
setMethod("Arith",signature(e1="complex_herm_matrix",e2="numeric" ), chm_arith_numeric)
setMethod("Arith",signature(e1="numeric" ,e2="complex_herm_matrix"),numeric_arith_chm )
setMethod("[",signature(x="complex_herm_matrix",i="index",j="missing",drop="logical"),
function(x,i,j,drop){
out <- as.matrix(x)[,i,drop=FALSE]
if(drop){
if(ncol(out)==1){
return(vec_to_chm1(c(out)))
} else {
stop("for >1 element, use as.list()")
}
} else {
return(as.jordan(out,x))
}
} )
setReplaceMethod("[",signature(x="complex_herm_matrix",i="index",j="missing",value="complex_herm_matrix"), # matches rsm equivalent
function(x,i,j,value){
out <- as.matrix(x)
out[,i] <- as.matrix(value) # the meat
return(as.jordan(out,x))
} )
setReplaceMethod("[",signature(x="complex_herm_matrix",i="index",j="ANY",value="ANY"),function(x,i,j,value){stop("second argument redundant")}) # matches rsm equivalent
|
2da4350c380bce755c98fb9b80c783b5c115972c | 5abd83c240c3dbaa25790cc3b8d7ec5bea840282 | /limma_diffexp.R | 79e994fc0d423843f6f8b15328e9fa7edd8903b7 | [] | no_license | gnawhnehpets/R_scripts_baylin | 85a0e96e081e6b6a9594a15ac253c8fdf71fee81 | 67ffde3f5286852568a07c94afb200220055075e | refs/heads/master | 2021-05-29T02:21:31.124724 | 2015-04-22T18:23:42 | 2015-04-22T18:23:42 | 32,940,161 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 25,576 | r | limma_diffexp.R | #PRIMARY CODE FOR PRELIM2 ANALYSIS
#FINAL
library(limma)
setwd("/home/steve/Desktop/analysis/cohortoneandtwo//")
target <- readTargets("/home/steve/Desktop/analysis/cohortoneandtwo//targets_full.txt")
RG <- read.maimages(target, source="agilent", path="/home/steve/Desktop/analysis/cohortoneandtwo/")
cohorttwo<- c(target$Cy3_Sample, target$Cy5_sample)
cohorttwo
RG$weights <- matrix(rep(RG$genes$ControlType,ncol(RG$R)),ncol=ncol(RG$R),byrow=F)
RG$weights[RG$genes$ControlType!=0,] <- 0
RG$weights[RG$genes$ControlType==0,] <- 1
# x2 1 post-PH1876 pre-PH1876
# x2 2 pre-PH1900 post-PH1900
# x1 3 pre-PH1811 pre-PH1816
# x1 4 pre-PH1636 post-PH1640
# x1 5 pre-PH1892 pre-PH1902
# x1 6 pre-PH1622 pre-PH1631
# x2 7 pre-JHH005 post-JHH005
# 8 post-PH1910 pre-PH1910
# x9 pre-PH1604 pre-PH1606
# x1 10 post-JHH004 pre-JHH004
# x2 11 post-PH1612 pre-PH1612
# x1 12 pre-PH1913 pre-PH1886
# x1 13 pre-PH1635 pre-PH1644
# 14 pre-PH1861 post-PH1861
# x2 15 pre-PH1616 post-PH1616
# x3 16 post-PH1844 pre-PH1844 #NO DRY ICE
# x1 17 pre-PH1623 pre-PH1632
# x2 18 pre-PH1827 post-PH1827
# x3 19 post-PH1815 pre-PH1815
# 20 post-PH1544 pre-PH1544
# x2 21 post-PH1843 pre-PH1843
# x3 22 pre-PH1871 post-PH1871
# x1 23 pre-PH1868 pre-PH1887
# 24 pre-PH1545 post-PH1545
# 25 post-PH1869 pre-PH1869
# x1 26 pre-PH1550 pre-PH1600
as.data.frame(cbind(as.matrix(paste(target$Cy3, target$Cy3_Sample, sep="-")),as.matrix(paste(target$Cy5, target$Cy5_sample, sep="-"))))
as.data.frame(sub(".*_1_", "", RG$targets$FileName))
dat <- RG[,-18]
dat <- dat[,-6]
targets <- target[-18,]
targets <- targets[-6,]
as.data.frame(sub(".*_1_", "", dat$targets$FileName))
#1 - no changes
#2
#pos <- c(1,2,7,8,11,14:16,18:22,24,25)
#3
#pos <- c(8,14,16,19,20,22,24,25)
#4
#pos <- c(8,14,20,24,25)
#5
pos <- c(8,14)
dat <- dat[,pos]
colnames(dat)
dim(dat)
targets <- targets[pos,]
dim(targets)
targets
as.data.frame(cbind(as.matrix(paste(targets$Cy3, targets$Cy3_Sample, sep="-")),as.matrix(paste(targets$Cy5, targets$Cy5_sample, sep="-"))))
#1,2,3,4
normwithin <-normalizeWithinArrays(dat,method='loess',bc.method='normexp', offset=50)
normbetween <-normalizeBetweenArrays(normwithin,method='Aquantile')
#Remove controls from normwithin/between
normwithin <- normwithin[normbetween$genes$ControlType==0,]
normbetween <- normbetween[normbetween$genes$ControlType==0,]
#1, 2
dat <- normwithin
tar2 <- targets
#Convert MA back to RG
RGb <- RG.MA(normbetween)
#
# plotDensities(RGb)
# names(RGb)
# names(dat)
# # pre-normalization
# boxplot(data.frame(log2(dat$Gb)),main="Green background - pre-normalization", names=paste(targets$Cy3, targets$Cy3_Sample, sep="-"), las=2)
# boxplot(data.frame(log2(dat$Rb)),main="Red background - pre-normalization", names=paste(targets$Cy5, targets$Cy5_sample, sep="-"), las=2)
#
# # post-normalization
# boxplot(data.frame(log2(RGb$G)),main="Green background - normalized", names=paste(targets$Cy3, targets$Cy3_Sample, sep="-"), las=2)
# boxplot(data.frame(log2(RGb$R)),main="Red background - normalized", names=paste(targets$Cy5, targets$Cy5_sample, sep="-"), las=2)
#>>>> SKIP >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#3, 4
### [1] PH1876 post-Cy3_PH1876 pre-Cy5 - filtered
### [2] PH1900 pre-Cy3_PH1900 post-Cy5 - degraded, filtered
### [3] PH1811 pre-Cy3_PH1816 pre-Cy5 - non-pair
### [4] PH1636 pre-Cy3_PH1640 post-Cy5 - non-pair
### [5] PH1892 pre-Cy3_PH1902 pre-Cy5 - non-pair
### [6] PH1622 pre-Cy3_PH1631 pre-Cy5 - non-pair
### [7] JHH005 pre-Cy3_JHH005 post-Cy5 - degraded
# [8] PH1910 post-Cy3_PH1910 pre-Cy5
### [9] PH1604 pre-Cy3_PH1606 pre-Cy5 - non-pair
### [10] JHH004 post-Cy3_JHH004 pre-Cy5 - degraded sample
# [11] PH1612 post-Cy3_PH1612 pre-Cy5
### [12] PH1913 pre-Cy3_PH1886 pre-Cy5 - non-pair
### [13] PH1635 pre-Cy3_PH1644 pre-Cy5 - non-pair
# [14] PH1861 pre-Cy3_PH1861 post-Cy5
#4# [15] PH1616 pre-Cy3_PH1616 post-Cy5 - degraded
#4# [16] PH1844 post-Cy3_PH1844 pre-Cy5
### [17] PH1623 pre-Cy3_PH1632 pre-Cy5 - non-pair
# [18] PH1827 pre-Cy3_PH1827 post-Cy5
# [19] PH1815 post-Cy3_PH1815 pre-Cy5
### [20] PH1544 post-Cy3_PH1544 pre-Cy5 - filtered
#4# [21] PH1843 post-Cy3_PH1843 pre-Cy5
#4# [22] PH1871 pre-Cy3_PH1871 post-Cy5
### [23] PH1868 pre-Cy3_PH1887 pre-Cy5 - non-pair
# [24] PH1545 pre-Cy3_PH1545 post-Cy5
### [25] PH1869 post-Cy3_PH1869 pre-Cy5 - pre sample is too degraded
### [26] PH1550 pre-Cy3_PH1600 pre-Cy5 - non-pair
#3
# filter1 <- c(1,2,3,4,5,6,9,10,12,13,17,20,23,25,26)
# dat <- normwithin[,-filter1]
# tar2 <- targets[-filter1,]
#
# #4
# filter2 <- c(1,2,3,4,5,6,9,10,12,13,15,16,17,20,21,22,23,25,26)
# dat <- normwithin[,-filter2]
# tar2 <- targets[-filter2,]
# #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#
#
# normbetween <-normalizeBetweenArrays(dat,method='Aquantile')
# #Remove controls from normwithin/between
# dat <- dat[normbetween$genes$ControlType==0,]
# normbetween <- normbetween[normbetween$genes$ControlType==0,]
#
# #Convert MA back to RG
# RGb <- RG.MA(normbetween)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#1,2,3,4
# cy3
cy3 = RGb$R
rownames(cy3) <- RGb$genes$GeneName
colnames(cy3) <- paste(tar2$Cy3, tar2$Cy3_Sample, sep="-")
colnames(cy3)
# cy5
cy5 = RGb$G
rownames(cy5) <- RGb$genes$GeneName
colnames(cy5) <- paste(tar2$Cy5, tar2$Cy5_sample, sep="-")
colnames(cy5)
library(genefilter)
#rsd <- rowSds(dat.m)
#rsd <- rowSds(dat)
dat <- cbind(cy3, cy5)
dat <- apply(dat,2,function(v){tapply(v,names(v),function(x){median(x,na.rm=TRUE)})})
dim(dat)
fullname <- colnames(dat)
fullname
groupname <- sub("-.*", "", colnames(dat))
groupname
colnames(dat) <- fullname
colnames(dat)
pre <- as.matrix(dat[,sub("-.*","",colnames(dat))=="pre"])
dim(pre)
colnames(pre)
post <- as.matrix(dat[,sub("-.*","",colnames(dat))=="post"])
dim(post)
colnames(post)
prepost <- cbind(pre,post)
dim(prepost)
colnames(prepost)
dat.log <- log2(prepost)
dim(dat.log)
prename <- colnames(pre)
prename
postname <- colnames(post)
postname
t.test.all.genes <- function(x, d1, d2){
x1 <- x[d1]
x2 <- x[d2]
x1 <- as.numeric(x1)
x2 <- as.numeric(x2)
t.out <- t.test(x1, x2, alternative="two.sided", var.equal=T)
out <- as.numeric(t.out$p.value)
return(out)
}
prename
p.dat <- apply(dat.log, 1, t.test.all.genes, d1=prename, d2=postname)
length(p.dat)
# A histogram of the p-values and report how many probesets have a p<.05 and p<.01.
# I divided alpha of 0.05 by the total number of probesets and report how many
# probesets have a p-value less than this value. This is the Bonferroni correction
# step which is a very conservative p-value thresholding method to account for
# multiple testing
#947 probesets have p < .05
length(p.dat[p.dat<.05])
#1 - 508
#2 - 142
#3 - 181
#4 - 531
#5 - 832
length(p.dat[p.dat<.01]) #94 probesets have p < .01
#1 - 52
#2 - 12
#3 - 17
#4 - 85
#5 - 158
# 7 genes in group #4filter2
length(p.dat)
b <- .05/length(p.dat)
b
length(p.dat[p.dat<b])
#1 - 0
#2 - 0
#3 - 0
#4 - 0
#5 - 0
par(mfrow=c(1,2))
hist(p.dat,col="lightblue",xlab="p-values",main="P-value dist’n between\npre and post groups",cex.main=0.9)
abline(v=.05,col=2,lwd=2)
hist(-log10(p.dat),col="lightblue",xlab="log10(p-values)", main="-log10(p.dat) dist’n between\npre and groups",cex.main=0.9)
abline(v= -log10(.05),col=2,lwd=2)
# Calculate mean for each gene, fold change between groups
pre.m <- apply(dat.log[,prename], 1, mean, na.rm=T)
post.m <- apply(dat.log[,postname], 1, mean, na.rm=T)
fold <- pre.m-post.m
fold
fold.lin <- 2^fold
names(p.dat[p.dat<.05 & abs(fold.lin)>2])
sum(fold.lin>2)
#1, #2
# HBB
# HBD
#3 - 0
#4
# HBB
# PAIP2 - poly(A) binding protein interacting protein 2
#5 - 0
names(p.dat[p.dat<.05 & abs(fold.lin)>1.5])
#1, #2
# HBB
# HBD
#3 - 0
#4
# A_24_P169843 - NR
# A_24_P67408 - NR
# A_33_P3351615 - NR
# ATG3 - autophagy related 3
# AY927536 - NR
# BTG3 - BTG family, member 3
# C11orf73 - chromosome 11 open reading frame 73
# C16orf80 - chromosome 16 open reading frame 80
# C3orf26 - cms1 ribosomal small subunit homolog (yeast)
# CAPZA2 - capping protein (actin filament) muscle Z-line, alpha 2
# CIRH1A - cirrhosis, autosomal recessive 1A (cirhin)
# CKS1B - CDC28 protein kinase regulatory subunit 1B
# DDX21 - DEAD (Asp-Glu-Ala-Asp) box helicase 21
# DNAJB9 - DnaJ (Hsp40) homolog, subfamily B, member 9
# EI24 - etoposide induced 2.4
# FAM3C - family with sequence similarity 3, member C
# GLRX3 - glutaredoxin 3
# GPN3 - GPN-loop GTPase 3
# HBB - hemoglobin, beta
# KPNA4 - karyopherin alpha 4 (importin alpha 3)
# METAP2 - methionyl aminopeptidase 2
# NAA50 - N(alpha)-acetyltransferase 50, NatE catalytic subunit
# PAIP2 - poly(A) binding protein interacting protein 2
# PCNP - PEST proteolytic signal containing nuclear protein
# PIGH - phosphatidylinositol glycan anchor biosynthesis, class H
# PRDX3 - peroxiredoxin 3
# PTS - 6-pyruvoyltetrahydropterin synthase
# RPF2 - ribosome production factor 2 homolog (S. cerevisiae)
# SDCBP - syndecan binding protein (syntenin)
# TMEM126B - transmembrane protein 126B
# TSPAN13 - tetraspanin 13
#5
# C1QBP - complement component 1, q subcomponent binding protein
# CHCHD3 - coiled-coil-helix-coiled-coil-helix domain containing 3
# CNIH - cornichon family AMPA receptor auxiliary protein 1
# CYB5B - cytochrome b5 type B (outer mitochondrial membrane)
# EED - embryonic ectoderm development
# EIF1 - eukaryotic translation initiation factor 1
# IER3IP1 - immediate early response 3 interacting protein 1
# LPL - lipoprotein lipase
# MTDH - metadherin
# MTPN - myotrophin
# NAE1 - NEDD8 activating enzyme E1 subunit 1
# PJA1 - praja ring finger 1, E3 ubiquitin protein ligase
# PPP1CC - protein phosphatase 1, catalytic subunit, gamma isozyme
# PRDX3 - peroxiredoxin 3
# STXBP3 - syntaxin binding protein 3
# TCEB2 - transcription elongation factor B (SIII), polypeptide 2 (18kDa, elongin B)
# USP1 - ubiquitin specific peptidase 1
#1
# ATF1 - activating transcription factor 1
# FAM3C - family with sequence similarity 3, member C
# GPN3 - GPN-loop GTPase 3
# HBB - hemoglobin, beta
# HBD - hemoglobin, delta
# KLF3 - Kruppel-like factor 3 (basic)
# NETO2 - neuropilin (NRP) and tolloid (TLL)-like 2
# PLS1 - plastin 1
# STEAP1 - six transmembrane epithelial antigen of the prostate 1
# TIPIN - TIMELESS interacting protein
names(p.dat[p.dat<.05 & abs(fold.lin)>1.4])
length(names(p.dat[p.dat<.05 & abs(fold.lin)>1.4]))
#1
# A_24_P273245 - NR
# ACN9 - ACN9 homolog (S. cerevisiae)
# ATF1 - activating transcription factor 1
# C14orf142 - chromosome 14 open reading frame 142
# C16orf87 - chromosome 16 open reading frame 87
# CCDC91 - coiled-coil domain containing 91
# FAM3C - family with sequence similarity 3, member C
# GPN3 - GPN-loop GTPase 3
# HBB
# HBD
# HSPA13 - heat shock protein 70kDa family, member 13
# INTS12 - integrator complex subunit 12
# KLF3 - Kruppel-like factor 3 (basic)
# LYRM1 - LYR motif containing 1
# MRPL39 - mitochondrial ribosomal protein L39
# MRPS30 - mitochondrial ribosomal protein S30
# NETO2 - neuropilin (NRP) and tolloid (TLL)-like 2
# PIGH - phosphatidylinositol glycan anchor biosynthesis, class H
# PLS1 - plastin 1
# STEAP1 - six transmembrane epithelial antigen of the prostate 1
# TCTN3 - tectonic family member 3
# TIPIN - TIMELESS interacting protein
# TSGA14 - centrosomal protein 41kDa
# UBE2B - ubiquitin-conjugating enzyme E2B
# XRCC4 - X-ray repair complementing defective repair in Chinese hamster cells 4
#2 - HBB, HBD
#3
# BTG3 - BTG family, member 3
# FAM3C - family with sequence similarity 3, member C
# PIGH - phosphatidylinositol glycan anchor biosynthesis, class H
#4
# A_24_P169843 - NR
# A_24_P67408 - NR
# A_33_P3351615 - NR
# AA627135 - NR
# ABCF2 - ATP-binding cassette, sub-family F (GCN20), member 2
# ATG3 - autophagy related 3
# AY927536 - ribosomal protein L10
# BRD7 - bromodomain containing 7
# BTG3 - BTG family, member 3
# C10orf88 - chromosome 10 open reading frame 88
# C11orf73 - chromosome 11 open reading frame 73
# C11orf74 - chromosome 11 open reading frame 74
# C16orf80 - chromosome 16 open reading frame 80
# C16orf87 - chromosome 16 open reading frame 87
# C2orf76 - chromosome 2 open reading frame 76
# C3orf26 - cms1 ribosomal small subunit homolog (yeast)
# CACYBP - calcyclin binding protein
# CAPZA2 - capping protein (actin filament) muscle Z-line, alpha 2
# CFDP1 - craniofacial development protein 1
# CIRH1A - cirrhosis, autosomal recessive 1A (cirhin)
# CKS1B - CDC28 protein kinase regulatory subunit 1B
# COMMD10 - COMM domain containing 10
# CSTF2T - cleavage stimulation factor, 3' pre-RNA, subunit 2, 64kDa, tau variant
# DCTN5 - dynactin 5 (p25)
# DDX21 - DEAD (Asp-Glu-Ala-Asp) box helicase 21
# DNAJB9 - DnaJ (Hsp40) homolog, subfamily B, member 9
# E2F5 - E2F transcription factor 5, p130-binding
# ECT2 - epithelial cell transforming sequence 2 oncogene
# EED - embryonic ectoderm development
# EI24 - etoposide induced 2.4
# FAM3C - family with sequence similarity 3, member C
# FNBP1L - formin binding protein 1-like
# GLRX3 - glutaredoxin 3
# GPN1 - GPN-loop GTPase 1
# GPN3 - GPN-loop GTPase 3
# HBB
# KPNA4 - karyopherin alpha 4 (importin alpha 3)
# MAGOHB - mago-nashi homolog B (Drosophila)
# MCM2 - minichromosome maintenance complex component 2
# METAP2 - methionyl aminopeptidase 2
# MMGT1 - membrane magnesium transporter 1
# MRPL19 - mitochondrial ribosomal protein L19
# NAA50 - N(alpha)-acetyltransferase 50, NatE catalytic subunit
# NUP93 - nucleoporin 93kDa
# PAIP2 - poly(A) binding protein interacting protein 2
# PCNP - PEST proteolytic signal containing nuclear protein
# PIGH - phosphatidylinositol glycan anchor biosynthesis, class H
# PRDX3 - peroxiredoxin 3
# PSMC3 - proteasome (prosome, macropain) 26S subunit, ATPase, 3
# PTS - 6-pyruvoyltetrahydropterin synthase
# RBM7 - RNA binding motif protein 7
# RPF2 - ibosome production factor 2 homolog (S. cerevisiae)
# SDCBP - syndecan binding protein (syntenin)
# SKP1 - S-phase kinase-associated protein 1
# SNRPE - small nuclear ribonucleoprotein polypeptide E
# TAGLN2 - transgelin 2
# TGS1 - trimethylguanosine synthase 1
# TMEM126B - transmembrane protein 126B
# TSPAN13 - tetraspanin 13
# UAP1 - UDP-N-acteylglucosamine pyrophosphorylase 1
# XM_002342506 - NR
# YWHAQ - tyrosine 3-monooxygenase/tryptophan 5-monooxygenase activation protein, theta polypeptide
#5
# A_24_P273245 - NR
# C14orf142 - chromosome 14 open reading frame 142
# C1QBP, CHCHD3, CNIH, CYB5B, EED, EIF1, IER3IP1, LPL, MTDH, MTPN, NAE1, PJA1, PPP1CC, PRDX3, USP1
# COPB1 - coatomer protein complex, subunit beta 1
# DEK - DEK oncogene
# DPH2 - DPH2 homolog (S. cerevisiae)
# GRN - granulin
# HBXIP - late endosomal/lysosomal adaptor, MAPK and MTOR activator 5
# HMMR - hyaluronan-mediated motility receptor (RHAMM)
# MCM2 - minichromosome maintenance complex component 2
# MPG - N-methylpurine-DNA glycosylase
# MRPL39 - mitochondrial ribosomal protein L39
# PLEKHG4 - pleckstrin homology domain containing, family G (with RhoGef domain) member 4
# POLR2K - polymerase (RNA) II (DNA directed) polypeptide K, 7.0kDa
# PRDX2 - peroxiredoxin 2
# SNAPIN - SNAP-associated protein
# STXBP3, TCEB2
# TCTN3 - tectonic family member 3
# TSGA14 - centrosomal protein 41kDa
# UBE2Q2 - ubiquitin-conjugating enzyme E2Q family member 2
names(p.dat[p.dat<.01 & abs(fold.lin)>1.4])
#1
# A_24_P273245
#2 HBB
#2
# CHCHD3 - above
# COPB1 - coatomer protein complex, subunit beta 1
# DEK - DEK oncogene
# DPH2 - DPH2 homolog (S. cerevisiae)
# LPL - above
# MTPN - above
# PPP1CC - above
# PRDX2 - above
# TCEB2 - above
#3 - 0
#4
# ABCF2 - ATP-binding cassette, sub-family F (GCN20), member 2
# C11orf74 - chromosome 11 open reading frame 74
# C2orf76 - chromosome 2 open reading frame 76
# DCTN5 - dynactin 5 (p25)
# EI24 - etoposide induced 2.4
# GPN1 - GPN-loop GTPase 1
# GPN3 - GPN-loop GTPase 3
# MAGOHB - mago-nashi homolog B (Drosophila)
# PIGH - phosphatidylinositol glycan anchor biosynthesis, class H
# PSMC3 - proteasome (prosome, macropain) 26S subunit, ATPase, 3
# PTS - 6-pyruvoyltetrahydropterin synthase
# SNRPE - small nuclear ribonucleoprotein polypeptide E
#5
# CHCHD3 - coiled-coil-helix-coiled-coil-helix domain containing 3
# COPB1 - coatomer protein complex, subunit beta 1
# DEK - DEK oncogene
# DPH2 - DPH2 homolog (S. cerevisiae)
# LPL - lipoprotein lipase
# MTPN - myotrophin
# PPP1CC - protein phosphatase 1, catalytic subunit, gamma isozyme
# PRDX2 - peroxiredoxin 2
# TCEB2 - transcription elongation factor B (SIII), polypeptide 2 (18kDa, elongin B) the protein is not connected to PTS
names(p.dat[p.dat<.01])
#2
# A_33_P3298830 - NR
# AP4S1 - adaptor-related protein complex 4, sigma 1 subunit
# C11orf46 - ADP-ribosylation factor-like 14 effector protein
# C15orf37 - chromosome 15 open reading frame 37
# C4orf33 - chromosome 4 open reading frame 33
# ENST00000439198 - NR
# ENST00000512519 - NR
# GLP1R - glucagon-like peptide 1 receptor
# HBB
# MMP27 - matrix metallopeptidase 27
# VGLL1 - vestigial like 1 (Drosophila)
# XM_002342506 - NR
#3
# A_33_P3230369 - NR
# C13orf31 - laccase (multicopper oxidoreductase) domain containing 1
# DAXX - death-domain associated protein
# ENST00000340284 - NR
# ENST00000409517 - NR
# FAM170B - family with sequence similarity 170, member B
# FBXL21 - F-box and leucine-rich repeat protein 21 (gene/pseudogene)
# KCNJ5 - potassium inwardly-rectifying channel, subfamily J, member 5
# KRT82 - keratin 82
# LOC100133224 - NR
# MED27 - mediator complex subunit 27
# POFUT1 - mediator complex subunit 27
# PRSS45 - protease, serine, 45
# RGS13 - regulator of G-protein signaling 13
# RN28S1 - RNA, 28S ribosomal 5
# SOHLH1 - spermatogenesis and oogenesis specific basic helix-loop-helix 1
# VGLL1 - vestigial like 1 (Drosophila)
#4
# A_33_P3354574 - NR
# A_33_P3370612 - NR
# A_33_P3377714 - NR
# ABCF2 - ATP-binding cassette, sub-family F (GCN20), member 2
# ANKS3 - ankyrin repeat and sterile alpha motif domain containing 3
# ATP6V0E2 - ATPase, H+ transporting V0 subunit e2
# ATPBD4 - diphthamine biosynthesis 6
# C10orf84 - family with sequence similarity 204, member A
# C11orf74 - chromosome 11 open reading frame 74
# C13orf34 - bora, aurora kinase A activator
# C2orf29 - CCR4-NOT transcription complex, subunit 11
# C2orf60 - tRNA-yW synthesizing protein 5
# C2orf69 - chromosome 2 open reading frame 69
# C2orf76 - chromosome 2 open reading frame 76
# C9orf80 - INTS3 and NABP interacting protein
# CHAC2 - ChaC, cation transport regulator homolog 2 (E. coli)
# DCTN5 - dynactin 5 (p25)
# EI24 - etoposide induced 2.4
# ELAC1 - elaC ribonuclease Z 1
# ENST00000340284 - NR
# ENST00000356822 - NR
# ENST00000391369 - NR
# ENST00000414544 - GSN antisense RNA 1
# ENST00000434635 - NR
# FAM161A - family with sequence similarity 161, member A
# FAM170B - family with sequence similarity 170, member B
# FAM91A1 - family with sequence similarity 91, member A1
# FCHSD2 - FCH and double SH3 domains 2
# GNAQ - guanine nucleotide binding protein (G protein), q polypeptide
# GPN1 - GPN-loop GTPase 1
# GPN3 - GPN-loop GTPase 3
# GRIPAP1 - GRIP1 associated protein 1
# HACE1 - HECT domain and ankyrin repeat containing E3 ubiquitin protein ligase 1
# HARBI1 - harbinger transposase derived 1
# HIPK3 - homeodomain interacting protein kinase 3
# HPSE - heparanase
# INTS3 - integrator complex subunit 3
# IQSEC3 - IQ motif and Sec7 domain 3
# KLF11 - Kruppel-like factor 11
# L2HGDH - L-2-hydroxyglutarate dehydrogenase
# LARP4 - La ribonucleoprotein domain family, member 4
# LOC100129195 - NR
# LOC100131101 - NR
# LOC100288842 - UDP-GlcNAc:betaGal beta-1,3-N-acetylglucosaminyltransferase 5 pseudogene
# LOC390595 - ubiquitin associated protein 1-like
# LOC643802 - u3 small nucleolar ribonucleoprotein protein MPP10-like
# LOC729291 - uncharacterized LOC729291
# LRRC8B - leucine rich repeat containing 8 family, member B
# MAGOHB - mago-nashi homolog B (Drosophila)
# MED27 - mediator complex subunit 27
# MFN1 - mitofusin 1
# NKX3-2 - NK3 homeobox 2
# OGFOD1 - 2-oxoglutarate and iron-dependent oxygenase domain containing 1
# OR10H5 - olfactory receptor, family 10, subfamily H, member 5
# PGM3 - phosphoglucomutase 3
# PIGH - phosphatidylinositol glycan anchor biosynthesis, class H
# PIGM - phosphatidylinositol glycan anchor biosynthesis, class M
# POFUT1 - protein O-fucosyltransferase 1
# POGZ - pogo transposable element with ZNF domain
# POLE3 - polymerase (DNA directed), epsilon 3, accessory subunit
# POLR2A - polymerase (RNA) II (DNA directed) polypeptide A, 220kDa
# PSMC3 - proteasome (prosome, macropain) 26S subunit, ATPase, 3
# PTS - 6-pyruvoyltetrahydropterin synthase
# RAD1 - RAD1 homolog (S. pombe)
# RBM18 - RNA binding motif protein 18
# RCAN1 - regulator of calcineurin 1
# RNF40 - ring finger protein 40, E3 ubiquitin protein ligase
# RPRM - reprimo, TP53 dependent G2 arrest mediator candidate
# RPS4XP21 - ribosomal protein S4X pseudogene 21
# SHKBP1 - SH3KBP1 binding protein 1
# SNRNP27 - small nuclear ribonucleoprotein 27kDa (U4/U6.U5)
# SNRPE - small nuclear ribonucleoprotein polypeptide E
# SOHLH1 - spermatogenesis and oogenesis specific basic helix-loop-helix 1
# SRFBP1 - serum response factor binding protein 1
# SYF2 - SYF2 pre-mRNA-splicing factor
# TAF1A - TATA box binding protein (TBP)-associated factor, RNA polymerase I, A, 48kDa
# VGLL1 - vestigial like 1 (Drosophila)
# VSIG8 - V-set and immunoglobulin domain containing 8
# VSTM1 - V-set and transmembrane domain containing 1
# WDR92 - WD repeat domain 92
# WDSUB1 - WD repeat, sterile alpha motif and U-box domain containing 1
# ZBTB40 - zinc finger and BTB domain containing 40
# ZMAT5 - zinc finger, matrin-type 5
# ZNF230 - zinc finger protein 230
# ZNF828 - chromosome alignment maintaining phosphoprotein 1
names(p.dat[p.dat<.01 & abs(fold.lin)>2])
#1 - 0
#2 - HBB
#3 - 0
#4 - 0
#5 - 0
# Transform the p-value (-1*log10(p-value)) and create a volcano plot with the
# p-value and fold change vectors (see the lecture notes). Make sure to use a
# log10 transformation for the p-value and a log2 (R function log2()) transformation
# for the fold change. Draw the horizontal lines at fold values of 2 and -2 (log2 value=1)
# and the vertical p-value threshold line at p=.05 (remember that it is transformed in the plot).
#template
dev.off()
op <- par(mfrow=c(2,2))
pval_cut = .05
fc_cut = 2
p.trans <- -1 * log10(p.dat)
plot(range(p.trans),range(fold),type='n',xlab='-1*log10(p-value)',ylab='fold change',main='Volcano Plot\npre and post group (p<.05, fc>abs(2))')
points(p.trans,fold,col='black',pch=21,bg=1)
points(p.trans[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],col=1,bg=2,pch=21)
points(p.trans[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],col=1,bg=3,pch=21)
abline(v= -log10(pval_cut))
abline(h= -log2(fc_cut))
abline(h=log2(fc_cut))
#2
pval_cut = .01
fc_cut = 2
p.trans <- -1 * log10(p.dat)
plot(range(p.trans),range(fold),type='n',xlab='-1*log10(p-value)',ylab='fold change',main='Volcano Plot\npre and post group (p<.01, fc>abs(2))')
points(p.trans,fold,col='black',pch=21,bg=1)
points(p.trans[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],col=1,bg=2,pch=21)
points(p.trans[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],col=1,bg=3,pch=21)
abline(v= -log10(pval_cut))
abline(h= -log2(fc_cut))
abline(h=log2(fc_cut))
#3
pval_cut = .05
fc_cut = 1.4
p.trans <- -1 * log10(p.dat)
plot(range(p.trans),range(fold),type='n',xlab='-1*log10(p-value)',ylab='fold change',main='Volcano Plot\npre and post group (p<.05, fc>abs(1.4))')
points(p.trans,fold,col='black',pch=21,bg=1)
points(p.trans[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],col=1,bg=2,pch=21)
points(p.trans[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],col=1,bg=3,pch=21)
abline(v= -log10(pval_cut))
abline(h= -log2(fc_cut))
abline(h=log2(fc_cut))
#4
pval_cut = .01
fc_cut = 1.4
p.trans <- -1 * log10(p.dat)
plot(range(p.trans),range(fold),type='n',xlab='-1*log10(p-value)',ylab='fold change',main='Volcano Plot\npre and post group (p<.01, fc>abs(1.4))')
points(p.trans,fold,col='black',pch=21,bg=1)
points(p.trans[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],col=1,bg=2,pch=21)
points(p.trans[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],col=1,bg=3,pch=21)
abline(v= -log10(pval_cut))
abline(h= -log2(fc_cut))
abline(h=log2(fc_cut))
par(op)
# #5
# dev.off()
# pval_cut = .1
# fc_cut = 2
# p.trans <- -1 * log10(p.dat)
# plot(range(p.trans),range(fold),type='n',xlab='-1*log10(p-value)',ylab='fold change',main='Volcano Plot\npre and post group differences')
# points(p.trans,fold,col='black',pch=21,bg=1)
# points(p.trans[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold>log2(fc_cut))],col=1,bg=2,pch=21)
# points(p.trans[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],fold[(p.trans> -log10(pval_cut)&fold< -log2(fc_cut))],col=1,bg=3,pch=21)
# abline(v= -log10(pval_cut))
# abline(h= -log2(fc_cut))
# abline(h=log2(fc_cut))
|
c0828e1084985436cbcab5a8630c4f443e9a36da | 6051977d07e57b97c6c664e9a03732f4a6d2bdb3 | /scripts/RSnakeChunks/obsolete/gene_lists_comparison_JvH.R | 683d56cf68c940592bf18a6586141c744dc10f3d | [] | no_license | SnakeChunks/SnakeChunks | 0ce2fa1b1cc1c54bb836ed00682faea0c1fb4ea8 | c3cc20e39833724ce6feee1248599bf5a2a121fc | refs/heads/master | 2021-06-02T12:17:43.374666 | 2019-09-07T00:09:13 | 2019-09-07T00:09:13 | 95,784,510 | 21 | 9 | null | 2017-07-21T18:05:49 | 2017-06-29T14:10:38 | R | UTF-8 | R | false | false | 8,002 | r | gene_lists_comparison_JvH.R | ## compare gene list returned by different analyses
## - DEG genes detected by RNA-seq
## - TF target genes detected by ChIp-seq
## - reference target genes annotated in RegulonDB
library(VennDiagram)
dir.main <- "~/FNR_analysis"
setwd(dir.main)
parameters <- list(
"chip_genes" = "ChIP-seq/results/peaks/FNR_vs_input/homer/FNR_vs_input_cutadapt_bowtie2_homer_gene_list.txt",
"rna_genes" = "RNA-seq/results/diffexpr/FNR_vs_WT/DESeq2/FNR_vs_WT_cutadapt_bwa_featureCounts_DESeq2_gene_list.txt",
"gene.descriptions" = "data/regulonDB/GeneProductSet.txt",
"TFBS" = "data/regulonDB/BindingSiteSet.txt",
"TUs" = "data/regulonDB/TUSet.txt",
"TF" = "FNR",
"venn.format" = "png svg"
)
output <- list(
"dir" = "integration",
"venn" = "ChIP-RNA-regulons_Venn",
"annotated_genes" = "ChIP-RNA-regulons_table"
)
#### Load gene description table ####
# Columns:
# (1) Gene identifier assigned by RegulonDB
# (2) Gene name
# (3) Blattner number (bnumber) of the gene
# (4) Gene left end position in the genome
# (5) Gene right end position in the genome
# (6) DNA strand where the gene is coded
# (7) Product name of the gene
# (8) Evidence that supports the existence of the gene
# (9) PMIDs list
# (10) Evidence confidence level (Confirmed, Strong, Weak)
gene.table <- read.delim(file = parameters[["gene.descriptions"]],
comment.char = "#", as.is=TRUE,
quote = NULL)
names(gene.table) <- c("gene_id", "gene_name", "bnumber", "gene_left", "gene_right", "strand",
"product", "evidence", "PIMDs", "evidence_level")
# View(gene.table)
#### Load TFBS ####
# Columns:
# (1) Transcription Factor (TF) identifier assigned by RegulonDB
# (2) TF name
# (3) TF binding site (TF-bs) identifier assigned by RegulonDB
# (4) TF-bs left end position in the genome
# (5) TF-bs right end position in the genome
# (6) DNA strand where the TF-bs is located
# (7) TF-Gene interaction identifier assigned by RegulonDB (related to the "TF gene interactions" file)
# (8) Transcription unit regulated by the TF
# (9) Gene expression effect caused by the TF bound to the TF-bs (+ activation, - repression, +- dual, ? unknown)
# (10) Promoter name
# (11) Center position of TF-bs, relative to Transcription Start Site
# (12) TF-bs sequence (upper case)
# (13) Evidence that supports the existence of the TF-bs
# (14) Evidence confidence level (Confirmed, Strong, Weak)
TFBS <- read.delim(file = parameters[["TFBS"]],
comment.char = "#",
quote = NULL)
names(TFBS) <- c("TF_id", "TF_name", "TFBS_id", "TFBS_left", "TFBS_right", "strand",
"interaction_id", "TU_name", "effect", "promoter_name", "TFBS_center", "TFBS_sequence",
"evidence", "conf_level")
# View(TFBS)
#### Load transcription units ####
# Columns:
# (1) Transcription Unit identifier assigned by RegulonDB
# (2) Transcription unit name
# (3) Operon name containing the transcription unit
# (4) Name of the gene(s) contained in the transcription unit
# (5) Promoter Name
# (6) Evidence that supports the existence of the transcription unit
# (7) Evidence confidence level (Confirmed, Strong, Weak)
TUs <- read.delim(file = parameters[["TUs"]],
comment.char = "#",
quote = NULL)
names(TUs) <- c("TU_id", "TU_name", "operon_name", "gene_names", "promoter_name", "evidence", "conf_level")
# View(TUs)
message("Getting RegulonDB data for factor ", parameters[["TF"]])
#### Select reference sites ####
ref.sites <- subset(TFBS, TF_name == parameters[["TF"]])
if (nrow(ref.sites) == 0) {
stop("RegulonDB does not contain any binding site for transcription factor ", parameters[["TF"]])
}
message("\t", nrow(ref.sites), " TFBS")
#### Identify target transcription units ####
target.TUs <- unique(sort(as.vector(ref.sites$TU_name)))
message("\t", length(target.TUs), " TUs")
#### Get target genes from the TFBS ####
target.genes <- unique(sort(unlist(strsplit(x = as.vector(unlist(subset(TUs, TU_name %in% target.TUs, select = "gene_names"))), split = ",", fixed=TRUE))))
message("\t", length(target.genes), " target genes")
target.gene.ids <- unlist(subset(gene.table, gene_name %in% target.genes, select=bnumber))
#### Load gene lists ####
genes <- list(
"ChIPseq" = scan(parameters[["chip_genes"]], what = "character"),
"RNAseq" = scan(parameters[["rna_genes"]], what = "character"),
"regulon" = target.gene.ids
)
## Create output directory
dir.create(output[["dir"]], showWarnings = FALSE, recursive = TRUE)
#venn.plot <- venn.diagram(list(ChIP=chip, RNA=rna, Regulon=regulon), filename="{output}", imagetype="png", fill=rainbow(3))
for (venn.format in unlist(strsplit(parameters[["venn.format"]], split = " "))) {
venn.file <- file.path(output[["dir"]], paste(sep=".", output[["venn"]], venn.format))
message("Exporting Venn diagram", venn.file)
venn.plot <- venn.diagram(genes,
filename = venn.file,
imagetype = venn.format,
fill=rainbow(length(genes)))
}
#### Export summary table with the different criteria ####
row.names(gene.table) <- gene.table$gene_id
regulon.name <- paste(parameters[["TF"]], "regulon", sep="_")
gene.table[, "ChIPseq"] <- 0
gene.table[, "RNAseq"] <- 0
gene.table[, regulon.name] <- 0
gene.table[gene.table$bnumber %in% genes$ChIPseq, "ChIPseq"] <- 1
gene.table[gene.table$bnumber %in% genes$RNAseq, "RNAseq"] <- 1
gene.table[gene.table$bnumber %in% genes$regulon, regulon.name] <- 1
out.gene.table <- file.path(
output[["dir"]],
paste(sep=".", output[["annotated_genes"]], "tsv"))
message("Exporting annotated gene table: ", out.gene.table)
write.table(x = gene.table, sep="\t", quote=FALSE,
row.names = FALSE,
file = out.gene.table)
##### Export gff #####
##
## Format specifications: https://genome.ucsc.edu/FAQ/FAQformat.html#format3
## seqname - The name of the sequence. Must be a chromosome or scaffold.
## source - The program that generated this feature.
## feature - The name of this type of feature. Some examples of standard feature types are "CDS" "start_codon" "stop_codon" and "exon"li>
## start - The starting position of the feature in the sequence. The first base is numbered 1.
## end - The ending position of the feature (inclusive).
## score - A score between 0 and 1000. If the track line useScore attribute is set to 1 for this annotation data set, the score value will determine the level of gray in which this feature is displayed (higher numbers = darker gray). If there is no score value, enter ":.":.
## strand - Valid entries include "+", "-", or "." (for don't know/don't care).
## frame - If the feature is a coding exon, frame should be a number between 0-2 that represents the reading frame of the first base. If the feature is not a coding exon, the value should be ".".
## group - All lines with the same group are linked together into a single item.
gff <- data.frame(
"seqname" = "Chromsome",
"source" = "SnakeChunks",
"feature" = "gene",
"start" = gene.table$gene_left,
"end" = gene.table$gene_right,
"score" = ".",
"strand" = sub(pattern="reverse", replacement = "-", sub(pattern = "forward", replacement = "+", x = gene.table$strand)),
"frame" = ".",
"attribute" = paste(sep="", "gene_id: ", gene.table$bnumber)
)
chipseq.gff <- file.path(output[["dir"]], paste(sep="", output[["annotated_genes"]], "_ChIP-seq.tsv"))
message('Exporting GFF file for ChIP-seq results: ', chipseq.gff)
write.table(x = subset(gff, gene.table$ChIPseq == 1), file = chipseq.gff, row.names = FALSE, col.names = FALSE, sep="\t", quote=FALSE)
rnapseq.gff <- file.path(output[["dir"]], paste(sep="", output[["annotated_genes"]], "_RNA-seq.tsv"))
message('Exporting GFF file for RNA-seq results: ', rnaseq.gff)
write.table(x = subset(gff, gene.table$RNAseq == 1), file = rnaseq.gff, row.names = FALSE, col.names = FALSE, sep="\t", quote=FALSE)
|
f58348297f7b595ae062adc9bce4e2490b8c2612 | d6040231bfaf07a658d0822e9b7e6a9785ec6ba9 | /global.R | 97caac7ab9f173cabd7b446c4a789580b9ab993f | [] | no_license | markclements/pop_model | 320e72b83c3fac667c09e7a2c507ad781665eb75 | abb024b7f876a492d30d9c3fb38c8877ca4962ea | refs/heads/master | 2020-07-24T23:46:00.661661 | 2020-04-27T20:10:12 | 2020-04-27T20:10:12 | 208,087,926 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 69 | r | global.R | library(shiny)
library(tidyverse)
source("functions.R",local = TRUE) |
d1ef8ae7794cfbe38e5afccc5145a4e992cd7905 | c2e28f45847f8f5170d7ed90d406d9d5c3594625 | /man/laplace.samps.Rd | 1ae7457ce8ab99e9ae5ce3c2068e43e1c1e78011 | [] | no_license | mdedge/stfspack | 102d4ef512f21073dc2593db2265630040214357 | 3e7027d677c9017a0e3abaed7d99ef2ac7cf5d5d | refs/heads/master | 2020-03-29T07:03:16.471914 | 2018-09-21T22:33:01 | 2018-09-21T22:33:01 | 149,651,412 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 895 | rd | laplace.samps.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R_functions.R
\name{laplace.samps}
\alias{laplace.samps}
\title{Generate a matrix of samples from a Laplace distribution.}
\usage{
laplace.samps(mu = 0, sigma = 1, n = 25, nsamps = 10000)
}
\arguments{
\item{mu}{The expectation of the Laplace distribution from which to draw samples.}
\item{sigma}{The standard deviation of the Laplace distribution from which to draw samples.}
\item{n}{The number of independent observations to include in each sample.}
\item{nsamps}{The number of samples to generate.}
}
\value{
A matrix of independent Laplace-distributed random numbers with nsamps rows and n columns.
}
\description{
Draws Laplace samples and formats them into a matrix, where each row contains a sample.
}
\examples{
laplace.samps(10, 1, 5, 8)
}
\keyword{Laplace}
\keyword{distribution}
\keyword{simulation,}
|
ed4889c5abde157c474d1b685686aa873f6a92f3 | c9aefc772840dcc1121dcfb04a08fdc04a4ddbe5 | /R/suf_mcs_fun.R | 8d3fde7883bf235e628e7619f933090ab1b784a3 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SCCWRP/SQObioaccumulation | 1182463c71029a30fa145be686799d5ce046f301 | 6887a83c233a1108ec147e68e8c099728f7fa3ac | refs/heads/master | 2021-12-22T16:28:40.726991 | 2021-12-16T18:06:27 | 2021-12-16T18:06:27 | 203,891,093 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,918 | r | suf_mcs_fun.R | #' site use mcs function
#'
#' @param nsim number of simulations
#' @param constants constants inputs
#' @param mcsparms MCS parameter inputs
#'
#' @export
#'
suf_mcs_fun <- function(nsim, constants, mcsparms){
# site area and length
SA <- constants %>%
filter(Constant %in% 'SA') %>%
pull(Value)
SL <- constants %>%
filter(Constant %in% 'SL') %>%
pull(Value)
# home range mean and sd for guild species
hrvals <- mcsparms %>%
filter(grepl('^HR[0-9]', MCSvar)) %>%
rename(species = MCSvar) %>%
mutate(
var = case_when(
grepl('X$', species) ~ 'X',
grepl('SD$', species) ~ 'SD'
),
species = gsub('^HR', 'indic', species),
species = gsub('X$|SD$', '', species)
) %>%
pivot_wider(names_from = var, values_from = Value)
# home range sims
sufsims <- hrvals %>%
group_by(species) %>%
mutate(
suf = purrr::map(list(species), function(...){
# indic1, indic8, indic9
if(grepl('1$|8$|9$', species))
out <- genlognorm_fun(nsim, X, SD) %>%
mutate(
sims = SL / sims,
sims = ifelse(is.infinite(sims), 0, sims)
)
# indic2, indic3, indic4, indic5, indic7
if(grepl('2$|3$|4$|5$|7$', species))
out <- genlognorm_fun(nsim, X, SD) %>%
mutate(
sims = SA / sims,
sims = ifelse(is.infinite(sims), 0, sims)
)
# indic6
if(grepl('6$', species)){
out <- (SL * 1000) / pgamma(runif(nsim, 0, 1), shape = X, scale = SD)
simi <- seq(1:nsim)
out <- tibble(i = simi, sims = out)
}
return(out)
})
) %>%
dplyr::select(-SD, -X) %>%
unnest(suf) %>%
mutate(
sims = pmin(1, sims)
) %>%
rename(suf = sims)
return(sufsims)
} |
bc45237e61a00c4cefcd12e974058dd77f697561 | 66969d0f963a13fe475cf341c29b661676af1f39 | /man/parse_pairlist.Rd | 3f235bb7f8f7f76e014204a73c26aed6347ff4db | [] | no_license | dmurdoch/Rdpack | 5f9a73bf6f83d56ab3895c66d883f4d6b148cfea | 140b906e6fdbc832f292a72d30aa07cc3e5794a7 | refs/heads/master | 2021-05-08T11:16:11.135869 | 2018-02-03T06:31:43 | 2018-02-03T06:31:43 | 119,888,762 | 0 | 0 | null | 2018-02-01T20:20:51 | 2018-02-01T20:20:50 | null | UTF-8 | R | false | false | 3,089 | rd | parse_pairlist.Rd | \name{parse_pairlist}
\alias{parse_pairlist}
\alias{pairlist2f_usage}
\alias{pairlist2f_usage1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Parse formal arguments of functions}
\description{Parse formal arguments of functions and convert them to
f_usage objects.}
\usage{
parse_pairlist(x)
pairlist2f_usage1(x, name, S3class = "", S4sig = "", infix = FALSE,
fu = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a pairlist or a list of pairlists, see `Details'.}
\item{name}{function name.}
\item{S3class}{S3 class, see `Details'}
\item{S4sig}{S4 signature, see Details.}
\item{infix}{if \code{TRUE} the function usage is in infix form, see
Details.}
\item{fu}{if TRUE the object is a function, otherwise it is something
else (e.g. a variable or a constant like \code{pi} and \code{Inf}).
}
}
\details{
These functions are mostly internal.
\code{x} is a single pairlist object for \code{parse_pairlist} and
\code{pairlist2f_usage1}.
% For \code{pairlist2f_usage} it may be a list of pairlist objects.
The pairlist object is parsed into a list whose first component
contains the names of the arguments. The second component is a named
list containing the default values, converted to strings. Only
arguments with default values have entries in the second component
(so, it may be of length zero).
\code{pairlist2f_usage1} adds components \code{name} (function name),
\code{S3class}, \code{S4sig} and \code{infix}. \code{S3class} is set
for S3 methods, \code{S4sig} is the signature of an S4 method (as used
in Rd macro \verb{\S4method}). \code{infix} is \code{TRUE} for the
rare occations of usages of infix operators. The result is given
class "f_usage". This class has a method for \code{as.character} which
generates a text suitable for inclusion in Rd documentation.
}
\value{
For \code{parse_pairlist}, a list with the following components:
\item{argnames}{names of arguments, a character vector}
\item{defaults}{a named character vector containing the default
values, converted to character strings.}
For \code{pairlist2f_usage1}, an object with S3 class
\code{"f_usage"}. This is a list as for \code{parse_pairlist} and
the following additional components:
\item{name}{function name, a character string.}
\item{S3class}{S3 class, a character string.}
\item{S4sig}{S4 signature.}
\item{infix}{a logical value, \code{TRUE} for infix operators.}
% For \code{pairlist2f_usage}, a list of \code{"f_usage"} objects.
}
\author{Georgi N. Boshnakov}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{promptUsage}}
}
\examples{
parse_pairlist(formals(lm))
}
%\keyword{RdoProgramming}
\keyword{RdoBuild}
% pairlist2f_usage(x, nams, S3class = "", S4sig = "", infix = FALSE,
% fu = TRUE, verbose = TRUE)
% \item{nams}{function names, a character vector}
% \item{verbose}{if TRUE and function names are not supplied, issue a
% message.}
|
f3ca94689163d89a077c4e55fd9c9a86d618310a | ec0b68cec1561ae345f70e5a9ad5cf3257169b90 | /R programming/day01/r4.R | 074c8fdb347c5673926fa78eefb952ca5a901cd7 | [] | no_license | baeksangwoo/Connected_Project | 1c60769493b59312259869979fd2f319a9be5b33 | 50baf113e604b63c2acff96ddf46c91574f2ae5d | refs/heads/master | 2020-03-07T09:05:32.680596 | 2018-06-11T08:43:34 | 2018-06-11T08:43:34 | 127,380,826 | 0 | 0 | null | 2018-03-30T07:39:39 | 2018-03-30T04:18:32 | Java | UTF-8 | R | false | false | 243 | r | r4.R | v1<-c(70,80,90,100);
names(v1) <-c('ko','em','si','ma');
vv<-v1[2:4]
result<-mean(v1[-2:-4]);
print(length(v1))
print(NROW(v1))
vv2<-v1[c('ko','ma')]
vv2<-v1[c(1,4)]
vv2<-v1[-2:-3]
vv2<-v1[c(-2,-3)]
length(v1)
NROW(v1)
nrow(v1)
names(v1)[2] |
d1300ea44b5df0f92370f8e2241363336cb39964 | 184180d341d2928ab7c5a626d94f2a9863726c65 | /issuestests/RJafroc/man/UtilVarComponentsOR.Rd | 7e9f56774a1aff6a4acd68b5c311e374c604e6f6 | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,052 | rd | UtilVarComponentsOR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/UtilVarComponentsOR.R
\name{UtilVarComponentsOR}
\alias{UtilVarComponentsOR}
\title{Utility for Obuchowski-Rockette variance components}
\usage{
UtilVarComponentsOR(
dataset,
FOM,
FPFValue = 0.2,
covEstMethod = "Jackknife",
nBoots = 200
)
}
\arguments{
\item{dataset}{The dataset object}
\item{FOM}{The figure of merit}
\item{FPFValue}{Only needed for \code{LROC} data \strong{and} FOM = "PCL" or "ALROC";
where to evaluate a partial curve based figure of merit. The default is 0.2.}
\item{covEstMethod}{The covariance estimation method, "jackknife"
(the default) or "bootstrap".}
\item{nBoots}{The number of bootstraps, defaults to 200}
}
\value{
A list object containing the variance components.
}
\description{
Utility for Obuchowski-Rockette variance components
}
\details{
The variance components are obtained using \link{StSignificanceTesting}
with \code{method = "ORH"}.
}
\examples{
UtilVarComponentsOR(dataset02, FOM = "Wilcoxon")$varComp
}
|
d83a6bac74e38702e80833ee99c9590ac7d5638e | da966bef39c11a644dacda9a76731b16ad680cdf | /Au_Spike.R | 4548b9a75b166fe77adecb1593cfb7c3615bf346 | [] | no_license | andrewthomasjones/PtSpike | a822b5198d0201d1e8d3eb4b261cc3213d91eec8 | fa9018cf947bb52421bb86010cdd57e3fc5847bd | refs/heads/master | 2021-01-17T07:36:09.972008 | 2017-03-29T11:39:01 | 2017-03-29T11:55:23 | 83,767,119 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,752 | r | Au_Spike.R | #install.packages(c('EMMIXcontrasts', 'ggplot2', 'reshape')) #only needed if not already installed
#load packages
#source("https://bioconductor.org/biocLite.R")
#biocLite("limma")
#biocLite("impute")
#biocLite("samr")
#biocLite("edge")
#required
library(EMMIXcontrasts)
library(lattice)
library(edge)
library(samr)
library(limma)
#optional - for nicer plots
library(ggplot2)
library(reshape)
#read data, needs to be in same folder
load("./goldenspike.Rdata")
fit <- lmFit(goldenspike[,1:6],design=c(0,0,0,1,1,1))
fit <- eBayes(fit)
gold2<-as.matrix(goldenspike[,1:6])
colnames(gold2)<-NULL
stud<-build_study(gold2,grp=as.factor(c(1,1,1,2,2,2)), sampling = "static")
t1<-odp(stud)
goldenspike$odpscore<-qvalueObj(t1)$stat
odpOrder<-order(goldenspike$odpscore,decreasing = TRUE)
odpSortTscore<- goldenspike[odpOrder,]
n_genes<-length(odpSortTscore$isNull)
#cumulative sum
odpSortTscore$numNull<- cumsum(odpSortTscore$isNull)
odpSortTscore$propTrue<- (cumsum(odpSortTscore$isNull))/(1:n_genes)
#add extra column to goldenspike make clearer which are null genes
goldenspike$isNull<-abs(goldenspike$fold)==1
row_t.test<-function(row,A_cols,B_cols){
#var.equal=TRUE to pool variance
test<-t.test(row[A_cols], row[B_cols],var.equal=TRUE)
return(abs(test$statistic))
}
goldenspike$limmatscore<-abs(fit$F)
gslimmaOrder<-order(goldenspike$limmatscore,decreasing = TRUE)
gslimmaSortTscore<- goldenspike[gslimmaOrder,]
n_genes<-length(gslimmaSortTscore$isNull)
#cumulative sum
gslimmaSortTscore$numNull<- cumsum(gslimmaSortTscore$isNull)
gslimmaSortTscore$propTrue<- (cumsum(gslimmaSortTscore$isNull))/(1:n_genes)
goldenspike$tscore<-apply(goldenspike, 1, row_t.test, 1:3, 4:6)
gstOrder<-order(goldenspike$tscore,decreasing = TRUE)
gsSortTscore<- goldenspike[gstOrder,]
head(gsSortTscore,50)
n_genes<-length(gsSortTscore$isNull)
#cumulative sum
gsSortTscore$numNull<- cumsum(gsSortTscore$isNull)
gsSortTscore$propTrue<- (cumsum(gsSortTscore$isNull))/(1:n_genes)
qplot(x=1:1000, y=gsSortTscore$propTrue[1:1000], geom=c( 'line'), xlab="N", ylab="Number of null genes", main="t-score method")
#need as matrix
goldMat<-as.matrix(goldenspike[,1:6])
#g is number of clusters
#debug=0 turns off verbose output
#itmax=1000,epsilon=1e-4 are stop conditions
#n1=3,n2=3 sample sizes
#ncov=3,nvcov=1 covariance structure
goldEmmix<-emmixwire(goldMat,g=4,ncov=4,nvcov=1,n1=3,n2=3, debug=1,itmax=1000,epsilon=1e-4)
goldenspike$contrast<-abs(scores.wire(goldEmmix))
#sort
contrastOrder<-order(goldenspike$contrast,decreasing = TRUE)
gsSortContrast<- goldenspike[contrastOrder,]
#get number of nulls,#cumulative sum
gsSortContrast$numNull<- cumsum(gsSortContrast$isNull)
gsSortContrast$propTrue<- (cumsum(gsSortContrast$isNull))/(1:n_genes)
gsSortContrast$numNull[1000]
#xyplot(gsSortContrast$numNull[1:1000] ~ 1:1000)
qplot( x=1:1000, y=gsSortContrast$propTrue[1:1000], geom='line', xlab="N", ylab="Number of null genes", main="EMMIX-Contrast method")
########################################################3
SAM_s0<-SAM(x=as.matrix(goldenspike[,1:6]), y=c(1,1,1,2,2,2), resp.type="Two class unpaired", s0.perc=-1,nperms=100)
all_genes<-rbind(SAM_s0$siggenes.table$genes.up, SAM_s0$siggenes.table$genes.lo)
#sort
SAM0Order<-order(as.numeric(all_genes[,3]),decreasing = TRUE)
SAM0Sort<- goldenspike[as.numeric(all_genes[SAM0Order,2]),]
#get number of nulls,#cumulative sum
SAM0Sort$numNull<- cumsum(SAM0Sort$isNull)
SAM0Sort$propTrue<- (cumsum(SAM0Sort$isNull))/(1:(nrow(SAM0Sort)))
temp<-array(0,n_genes)
temp[1:nrow(SAM0Sort)] <-SAM0Sort$propTrue
SAM0Sort_padded<-temp
#################################################################################
SAM_std<-SAM(x=as.matrix(goldenspike[,1:6]), y=c(1,1,1,2,2,2), resp.type="Two class unpaired",nperms=100)
all_genes_2<-rbind(SAM_std$siggenes.table$genes.up, SAM_std$siggenes.table$genes.lo)
#sort
SAMOrder<-order(as.numeric(all_genes_2[,3]),decreasing = TRUE)
SAMSort<- goldenspike[as.numeric(all_genes_2[SAMOrder,2]),]
#get number of nulls,#cumulative sum
SAMSort$numNull<- cumsum(SAMSort$isNull)
SAMSort$propTrue<- (cumsum(SAMSort$isNull))/(1:(nrow(SAMSort)))
temp<-array(0,n_genes)
temp[1:nrow(SAMSort)] <-SAMSort$propTrue
SAMSort_padded<-temp
#plot together for niceness
combined <- data.frame(n = 1:nrow(goldenspike), contrast = gsSortContrast$propTrue, tscore = gsSortTscore$propTrue, SAM_s0=SAM0Sort_padded, SAM=SAMSort_padded, limma=gslimmaSortTscore$propTrue, odp=odpSortTscore$propTrue)
combined2<-melt(combined, id='n')
combined3<-subset(combined2, n<=1000)
p<-ggplot(data=combined3, aes(x=n, y=value, colour=variable)) + geom_line()
p<-p+scale_colour_discrete(name = "Method")+ scale_x_continuous(name = "N")+ scale_y_continuous(name = "Prop True Nulls")
p
|
419ef44a7f5e61258b05a8592e9c34ae2d2aa0ac | de0c103492d5c14cb74c32a60cd9642ec8a5e358 | /kpmg/kpmg.r | c3f27eadbe7ccee01f1fd866bff924e883ce0def | [] | no_license | jpiscionere/jpiscionere.github.io | bca0701b1702938ab474fa4ab1b960c1379adee9 | f258ade7ae36ed2fffa0f538109988037d3ab051 | refs/heads/master | 2021-07-12T09:46:33.970444 | 2021-06-27T23:54:04 | 2021-06-27T23:54:04 | 45,433,187 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,881 | r | kpmg.r |
#loading necessary packages
library('ggplot2')
library('ggmap')
qmap(location='melbourne')
library(corrplot)
data=read.csv("~/Aggregated-TAC-Hospitalisation-Stats.csv")
summary(data)
#making table of unique LGA
data_table=data.frame(table(data$LGA))
data_table$City=paste(data_table$Var1,"Australia")
#now to get total number of crashes per LGA, assuming that summing the three genders gives total number
which(data$LGA=="Alpine")
sum(data$male[which(data$LGA=="Alpine")])
#which(data$LGA==data_table$Var1[1])
for(i in seq(1:length(data_table$Var1))){
data_table$Freq[i]=
sum(data$male[which(data$LGA==data_table$Var1[i])]) +
sum(data$female[which(data$LGA==data_table$Var1[i])]) +
sum(data$unknownGender[which(data$LGA==data_table$Var1[i])])
}
data_table$Freq
#getting the latitude and longitude of LGA
latlon=geocode(data_table$City)
#the google api timed out on a few, so go back and get the ones it missed
length(latlon$lon)
for(i in seq(1:length(data_table$City))){
if(is.na(latlon$lon[i]) == 'TRUE')
{
latlon2=geocode(data_table$City[i])
latlon$lon[i]=latlon2$lon
latlon$lat[i]=latlon2$lat
}
}
is.na(latlon$lon)
#check to make sure the I got the correct lat/lon for one of the LGAs that I misssed initially.
latlon$lon[which(data_table$City=='Wangaratta Australia')]
geocode('Wangaratta Australia')
data_table$lon=latlon$lon
data_table$lat=latlon$lat
map <- get_map(location = c(lon = mean(data_table$lon), lat = mean(data_table$lat)), zoom = 7
, source = "google")
ggmap(map) + geom_point(data=data_table,size=data_table$Freq/50,color="red",alpha=0.3) +
#annotate("text", x = data_table$lon[which(data_table$Freq!=1)], y = data_table$lat[which(data_table$Freq!=1)],
# label = data_table$Freq[which(data_table$Freq!=1)]) +
xlab("") + ylab("")
#alright, Inner Melbourne seems to be the worse off here. The rural locations have far fewer crashes
#Let's find out what correlates with what.
#It would be helpful to sum up values for individual locations.
newdata <- data[c(-1:-3)]
newdata$City=0
M <- cor(newdata)
corrplot(M, method = "circle")
#this is unhelpful on many levels. It has too much data, everything seems to be correlated.
cor(newdata) #this gets the linear correlation coefficients between all the variables. Its erasing the geo data. We can see basic trends this way.
#Women are younger then men and are more likely to be Motorcyclists.
#Crashes in Rural areas appear to be fairly random and infrequent
library("Hmisc")
res2 <- rcorr(as.matrix(newdata))
res2
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
#This is going to be more easy to digest. Sorted this way, we see the lowest linear correlations.
#Accidents are much less likely to happen to women in rural locations.
#The p value tells us how much we should actually care about the correlations.
#I'm trying to find the most correlated variables here.
a=flattenCorrMatrix(res2$r, res2$P)
a=as.matrix(a)
#a[order(a[,3]),]
which(a[,4] > 0.9)
a[7,]
b=data.frame(a)
max(which(is.na(as.numeric(b$p))!=TRUE))
a[903,]
b$p=as.numeric(as.character(b$p))
b$cor=as.numeric(as.character(b$cor))
summary(b)
b[order(-b$cor),]
sig_data=b[which(b$p > 0.5),]
high_cor=b[which(b$cor > 0.95),]
sig_data[order(sig_data$cor),]
high_cor[order(high_cor$cor),]
newdata=data[c("userBicyclist","userDriver","userMotorcyclist","userPedestrian","hr0000to0559","hr0600to1159","hr1200to1759","hr1800to2359")]
M <- cor(newdata)
corrplot.mixed(M, lower.col = "black", number.cex = .7)
res2 <- rcorr(as.matrix(newdata))
res2 <- rcorr(as.matrix(newdata))
res2
#Ok, time to get serious. Let's answer this question:
#What variables correlate most strongly with crashes in melbourne?
#cleaning the data, making dates number and assigning a numeric value to each LGA in case we need it later.
#Also finding the variables that correlate most strongly with the number of accidents in Melbourne so we can narrow
#down the variables for the fit.
melbourne_data=data[which(data$locMelbourne > 0),]
melbourne_data$total_accidents=melbourne_data$locMelbourne
melbourne_data$dateFrom=as.numeric(melbourne_data$dateFrom)
melbourne_data$dateTo=as.numeric(melbourne_data$dateTo)
lga_code=c(1:length(unique(melbourne_data$LGA)))
lga_unique=unique(melbourne_data$LGA)
melbourne_data$LGA=as.numeric(melbourne_data$LGA)
res2 <- rcorr(as.matrix(melbourne_data))
a=flattenCorrMatrix(res2$r, res2$P)
b=data.frame(a)
b=b[which(b$column=='total_accidents'),]
b[order(-b$cor),]
#We want something useful. Let's use the hospital stay for a proxy of severity of accident. Let's see what factors contribut
#to the most severe accidents in Melbourne
melbourne_data=data[which(data$locMelbourne > 0),]
melbourne_data$total_accidents=melbourne_data$locMelbourne
melbourne_data$dateFrom=as.numeric(melbourne_data$dateFrom)
melbourne_data$dateTo=as.numeric(melbourne_data$dateTo)
lga_code=c(1:length(unique(melbourne_data$LGA)))
lga_unique=unique(melbourne_data$LGA)
melbourne_data$LGA=as.numeric(melbourne_data$LGA)
res2 <- rcorr(as.matrix(melbourne_data))
a=flattenCorrMatrix(res2$r, res2$P)
b=data.frame(a)
b=b[which(b$row=='stayGreater14'),]
b[order(-b$cor),]
M<-cor(melbourne_data)
corrplot.mixed(M, lower.col = "black", number.cex = .7)
#Definitely during rush hour going the wrong way. This is a definitive course of action: Melbourne needs to improve
#traffic information so that people don't go down the wrong road.
ggplot(data=subset(b,cor>0.65),aes(x=reorder(column,cor),cor ,fill=cor)) +
geom_col() + coord_flip() + ylab("Linear Correlation Coefficient") +
xlab("")
|
4336cf46ecb6154b20933ddea382b887728ae087 | f06723e2580bd4fad5446007a8be228978022413 | /man/getCOSMICSignatures.Rd | 2ca2ed24e65bfb090cc3c9e0bfa420d818834881 | [
"BSD-2-Clause"
] | permissive | Nik-Zainal-Group/signature.tools.lib | 6c4fce24c190ab4bb6c4b764741369d7760d851d | 3bd2daec7d77f0eb48be09ac384606d2102c68fd | refs/heads/master | 2023-09-04T12:13:14.923240 | 2023-08-21T16:27:45 | 2023-08-21T16:27:45 | 173,100,574 | 79 | 24 | NOASSERTION | 2022-10-28T10:42:25 | 2019-02-28T11:33:00 | R | UTF-8 | R | false | true | 739 | rd | getCOSMICSignatures.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signatures_utils.R
\name{getCOSMICSignatures}
\alias{getCOSMICSignatures}
\title{getCOSMICSignatures}
\usage{
getCOSMICSignatures(version = "latest", typemut = "subs", verbose = TRUE)
}
\arguments{
\item{version}{this is either "latest", which is v3.2 for SBS and DBS, or it is possible to specify "2" in combination with typemut="subs" to obtain the old COSMIC 30 signatures. For rearrangements, this function only returns the 6 breast cancer rearr signatures from Nik-Zainal et al. 2016.}
\item{typemut}{either subs, DNV or rearr}
}
\value{
reference signatures matrix
}
\description{
This function returns the COSMIC signatures for a given mutation type.
}
|
fdc183d727462ea20de8bbf4de0bda2b8c17f295 | 3f1aa8be40d971b34f417f5b8c420f9725a8772f | /man/removepoints.Rd | 2f41f2dcfda9d58f8e2fa515255111e3b7388c31 | [] | no_license | cran/MetaLandSim | 16f3a78b7478ef2eedf4fca307caa8d0858ba384 | 7903126e4e905cff603ecc97695a927bc8483064 | refs/heads/master | 2023-04-04T13:29:32.818818 | 2023-01-12T21:30:02 | 2023-01-12T21:30:02 | 26,391,540 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 802 | rd | removepoints.Rd | \name{removepoints}
\alias{removepoints}
\title{
Remove a given number of patches from the landscape
}
\description{
Randomly removes a given number of patches from the landscape.
}
\usage{
removepoints(rl, nr)
}
\arguments{
\item{rl}{
Object of class 'landscape'.
}
\item{nr}{
Number of patches to remove.
}
}
\value{
Returns an object of class 'landscape'.
}
\author{
Frederico Mestre and Fernando Canovas
}
\seealso{
\code{\link{rland.graph}}, \code{\link{addpoints}}
}
\examples{
data(rland)
#Checking the number of patches in the starting landscape:
rland$number.patches
#60
#Removing 10 patches from the landscape:
rl1 <- removepoints(rl=rland, nr=10)
#Checking the number of patches in the output landscape:
rl1$number.patches
#50
} |
f22f41c779bccd17fc1a273bf1f564157dadd93f | b2e2a2d40e9344035e0c50c07d8e05fe6b4e6a16 | /7.prediction-by-exponential-smoothing/7.2.5.HoltWinters.R | 56c7834bb027b4d2925f9c032ef97d93be69b051 | [] | no_license | imanojkumar/ts-analysis-by-R | 6f0527599f477be957babc09947d31705101ccb6 | 33718d4c6609ccdead55db72fc412e28adc78c73 | refs/heads/master | 2021-05-27T16:43:41.776384 | 2012-02-13T04:53:37 | 2012-02-13T04:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 262 | r | 7.2.5.HoltWinters.R | SNA <- read.csv("../data/GDPO1980TABLE.csv", skip=2, header=TRUE)
GDP <- ts(SNA$GDP, start=c(1980,1), frequency=4)
(GDP.HW1 <- HoltWinters(GDP,seasonal="mult"))
(GDP.HW1$fitted)
FITTED1 <- GDP.HW1$fitted[,1]
ts.plot(GDP,FITTED1,type="l",lty=c(1:2),col=c(1:2))
|
945bad5011b012f2267ec86205f713afb095fa58 | 006ce7aeb4a7b999a24474aa45bdeaf0586e67fc | /day3/data/fog20180423ver2.R | b3ba22115e90e8ce27c7c836873764e3b9c6baac | [] | no_license | hspark90/vis | 22935f3adaf32a76f2398af8a09a052b57e49658 | efb39bca7ad9c853181921e1f92b5dffa924ab98 | refs/heads/master | 2020-06-10T23:55:44.017851 | 2019-06-25T22:27:22 | 2019-06-25T22:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,720 | r | fog20180423ver2.R | require(maptools)
require(rgdal)
require(ggmap)
require("plotrix")
require("plyr")
require("colorRamps")
library(XML)
library(plyr)
library(sp)
library(stringr)
library(RCurl)
library(spTimer)
library(maps)
library(gstat)
library("RColorBrewer")
#setwd("S:/Users/TaeYong/안개")
#####안개
express<-readShapeLines("S:/Users/TaeYong/안개/express/express_WGS84.shp")
load(file="S:/Users/TaeYong/안개/data2(forti_exp).rda")
road.link<-(unique(data2$group))
id.n<-unique(data2$id)
link.gis<- matrix(rep(NA,9392*7),ncol=7)
for (i in 1:9392){
id.length <-length(data2[data2$id==id.n[i],1])
temp<- data2[data2$id==id.n[i],]
link.gis[i,2]<-temp[(id.length+1)/2,1]
link.gis[i,3]<-temp[(id.length+1)/2,2]
link.gis[i,1]<-paste(express$LINK_ID[i])
}
link.gis <- as.data.frame(link.gis)[,-7]
names(link.gis)<-c("LINK_ID","경도","위도","vis","speed","si")
link.gis$경도 <- as.numeric(paste(link.gis$경도))
link.gis$위도 <- as.numeric(paste(link.gis$위도))
########################################################################
url2<-iconv(getURL("http://www.weather.go.kr/weather/observation/currentweather.jsp?type=t99&mode=0&stn=0&auto_man=a",
.encoding="euc-kr"),from="euc-kr",to='UTF-8')
tables<-as.data.frame(readHTMLTable(url2,encoding='UTF-8'))
names(tables)<-
c("id","current","vis","cloud","l.cloud","Tcurrent","dew","sensible","prec","rh","dir","ws","hpa")
tables<-tables[tables$id!="지점",]
tables1<-tables[,c(1,3)]
stations<-read.csv("S:/Users/TaeYong/안개/위도.csv")
stations1 <- stations[,c(4,6,7)]
test1<-merge(tables1,stations1,by.x='id',by.y='지점명')
test1$vis<-paste(test1$vis)
test1$vis[test1$vis=="20 이상"]="20"
test1$vis<-as.numeric(test1$vis)
test2<-test1[complete.cases(test1),]
########################################################################################
vec.dist <-rep(NA,dim(test2)[1])
for (i in 1:dim(test2)[1]){
vec.dist[i]<-min(spT.geo.dist(as.numeric(test2[i,c("경도","위도")]),as.data.frame(link.gis)))
}
map('world', 'South Korea', fill=TRUE, col="lightgrey", xlim=c(125,130.4), ylim=c(34,39))
plot(express, col='white', xlim=c(125,130.4), ylim=c(34,39), add=TRUE, lwd=2)
points(test1[vec.dist<2,c("경도","위도")], pch=16, col="red")
points(test1[vec.dist>2,c("경도","위도")], pch=16, col="blue")
map.axes()
coordinates(test2) <-c("경도","위도")
link.gis2<-link.gis
coordinates(link.gis2) <-c("경도","위도")
gis2.idw<-gstat::idw(vis~ 1, test2, newdata=link.gis2, idp=2.0)
link.gis$vis<-gis2.idw$var1.pred
cuts <-seq(0,20,length.out=12)
pred.level<-cut(link.gis$vis,cuts,brewer.pal(n = 11, name = "RdYlGn"))
map('world', 'South Korea', fill=TRUE, col="lightgrey", xlim=c(125,130.4), ylim=c(34,39))
points(link.gis$경도, link.gis$위도, col=paste(pred.level), cex=0.3, pch=15)
map.axes()
express2<-express
express2@data$vis<-link.gis$vis
express2@data$viscol<-link.gis$pred.level
#####속도
url = "http://data.ex.co.kr/openapi/odtraffic/trafficAmountByRealtime?key=3314135116&type=xml"
raw.data <- xmlParse(url)
real.data<-ldply(xmlToList(raw.data), function(x) { data.frame(x[!names(x)=="author"]) } )
##################################################################################
load(file="S:/Users/TaeYong/안개/final_vds.rda")#vds4
#######################################################################################
new.vds<-vds4[,3:5]
names(new.vds)<-c("vdsId","경도","위도")
v.data<-real.data[,c("speed","vdsId")]
new.v.data<-v.data[which(as.numeric(paste(v.data$speed))>0),]
v.vds<-merge(new.v.data, new.vds, by="vdsId")
coordinates(v.vds) <-c("경도","위도")
gis2.idw2<-gstat::idw(speed~ 1, v.vds, newdata=link.gis2, idp=2.0)
link.gis$speed<-gis2.idw2$var1.pred
# summary(link.gis$speed)
# rainbow(n = 24, start=0, end=4/6)
cuts <-seq(0,110,length.out=25)
pred.level<-cut(link.gis$speed,cuts,rainbow(n = 24, start=0, end=2/6))
table(pred.level)
cuts
map('world', 'South Korea', fill=TRUE, col="lightgrey", xlim=c(125,130.4), ylim=c(34,39))
points(link.gis$경도, link.gis$위도, col=paste(pred.level), cex=0.3, pch=15)
map.axes()
express2@data$speed<-link.gis$speed
express2@data$sd <- 0.694*link.gis$speed+link.gis$speed^2/(254*0.63)
express2@data$rwi <- express2@data$sd/express2@data$vis
###############################################################################
library(RgoogleMaps)
library(plotGoogleMaps)
express3<-express2
express3@proj4string =CRS('+proj=longlat +datum=WGS84')
m1=plotGoogleMaps(express3,zcol=31,colPalette=rainbow(n = 24, start=0, end=2/6),control.width='50%',control.height='50%',
api="https://maps.googleapis.com/maps/api/js?key=AIzaSyCkuAgeN7WipKLaNSUAJTeoTRceEFYKOKc&callback=initMap")
|
83adc60974fb04cd29f2e0d6e4e0bbed48307b77 | b30eb6eb8aa9071af0a892aa9eeedf8b8eab99c4 | /plot2.R | 8e75f315b6be48dddf97f70a075bd376cefc8caa | [] | no_license | votadlos/ExData_Plotting1 | 62b362a55f5a2cf3e595ce468396f35a294af9f4 | 2e4e65980b59542881ded0ba6642d9a9edea8389 | refs/heads/master | 2020-12-25T00:06:01.481456 | 2015-02-08T09:19:05 | 2015-02-08T09:19:05 | 30,287,713 | 0 | 0 | null | 2015-02-04T07:52:01 | 2015-02-04T07:52:01 | null | UTF-8 | R | false | false | 563 | r | plot2.R | require(sqldf)
file <- c("../household_power_consumption.txt")
#data subset
data_subset <- read.csv.sql(file, header = T, sep=";", sql = "select * from file where Date IN ('1/2/2007', '2/2/2007')" )
#NA change
data_subset[data_subset =="?"] <- NA
#date convert
data_subset$Date<-as.POSIXct(paste(data_subset$Date, data_subset$Time), format="%d/%m/%Y %H:%M:%S")
#plot image
png(filename="figure/plot2.png", width = 480, height = 480)
plot(data_subset$Date, data_subset$Global_active_power, type="l",
xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
82a3811c7cce5caab0b377b419bfc27456374d76 | c5de5d072f5099e7f13b94bf2c81975582788459 | /R Extension/RMG/Projects/CollateralAdHoc/Legacy/DownstreamGas.R | 7879696e74cab9c3de806c2636555a703fb80328 | [] | no_license | uhasan1/QLExtension-backup | e125ad6e3f20451dfa593284507c493a6fd66bb8 | 2bea9262841b07c2fb3c3495395e66e66a092035 | refs/heads/master | 2020-05-31T06:08:40.523979 | 2015-03-16T03:09:28 | 2015-03-16T03:09:28 | 190,136,053 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,496 | r | DownstreamGas.R | ###############################################################################
# DownstreamGas.R
#
# Author: e14600
#
memory.limit( 4095 )
library(reshape)
source("H:/user/R/RMG/Utilities/RLogger.R")
source("H:/user/R/RMG/Utilities/load.R")
source("H:/user/R/RMG/Utilities/dollar.R")
source("H:/user/R/RMG/Utilities/Database/SAS/VPort.R")
################################################################################
# File Namespace
#
DownstreamGas = new.env(hash=TRUE)
################################################################################
DownstreamGas$getCNEBooks <- function()
{
cneBooks =
"CEIDEMHG
CEISPMHG
CEISPOHG
CEIVENHG
CEIBSMTM
CEIOTMTM
OGDENDEM
OGDENPHYS
CESAED
CESMED
CETMED
OGDENMOD
OGDENHGP
OGDENHGS
CCGSUPACC
FINLEGACC
SUPLEGACC
SUPLEGNPNS
SUPNONFRMACC
FINAECOHDG
FINANRHDG
FINCHIHDG
FINDAWNHDG
FINDEMHDG
FINLEGCG
FINMICHHDG
FINNORHDG
FINOGTHDG
FINPGEHDG
FINSOCHDG
FINTCOHDG
CCGFINMTM
FINLEGMTM
EOGDEM1
NFG1DEM1
NFG2DEM1
NFG3DEM1
NFGESSDEM
TCOISSDEM1
TGTFSSDEM1
TGTFSSDEM2
TGTISSDEM1
TGTNNSDEM1
EOGPHYS
NFG1PHYS
NFG2PHYS
NFG3PHYS
NFGESSPHY
TCOISSPHY
TGTFSSPHYS
TGTFSSPHYS2
TGTNNSPHYS
CNESAED
CNESMED
EOGMOD
NFG1MOD
NFG2MOD
NFG3MOD
NFGESSMOD
TGTFSSMOD
TGTFSSMOD2
TGTISSMOD
TGTNNSMOD
EOGHGP1
EOGHGS1
NFG1HGP1
NFG1HGS1
NFG2HGP1
NFG2HGS1
NFG3HGP1
NFG3HGS1
NFGESSHGP
NFGESSHGS
TCOISSHGP
TCOISSHGS
TGTFSSHGP1
TGTFSSHGP2
TGTFSSHGS1
TGTFSSHGS2
TGTISSHGP1
TGTISSHGS1
TGTNNSHGP1
TGTNNSHGS1
CNESUPED
CETAED
CNETAED
CNETRNED
CORNERTPHDG
NOVOL
SUPTPHDG
CORNERPRICAP
CORNERRECCAP
CORNERSECCAP
SUPTPCAP
SUPTPMOD
"
cneBooks = strsplit( cneBooks, "\n" )[[1]]
cneBooks = sort( unique( cneBooks ) )
return( cneBooks )
}
################################################################################
DownstreamGas$getBaltimoreBooks <- function()
{
asOfDate = "2008-10-10"
accrualBooks = VPort$booksForPortfolio( "CPS Accrual Portfolio", asOfDate )
mtmBooks = VPort$booksForPortfolio( "CPS Mark to Market Portfolio", asOfDate )
books = sort( unique( c( accrualBooks, mtmBooks ) ) )
return( books )
}
################################################################################
DownstreamGas$getStorageBooks <- function()
{
storageBooks = "CEHUSTED
ENCAIBT
NATLIBT
RMROMACC
ANRDEM
BAYSTDEM
CENHDDEM
ENBDEM
ENCADEM
GOOSDEM
LIBSTDEM
LODI2DEM
LODIDEM
MICHBDEM
MICHDEM
NATLDEMB
NATLDEMC
NATLDEMD
NATSTDEM
STGBDEM
STGCHDEM
UNICDEM
UNIDDEM
UNIDEM
UNIDEMB
UNOFDEM
MTMSTED
MTMTPED
ANRPHYS
BAYSTPHY
CENHDPHY
ENBPHY
ENCAPHY
GOOSPHYS
LIBSTPHY
LODI2PHYS
LODIPHYS
MICHBPHY
MICHPHYS
NATLPHYB
NATLPHYS
NIMOPHYS
STGBPHY
STGCHPHY
STORPHYS
UNICPHY
UNIDPHY
UNIPHY
UNIPHYB
UNOFPHY
STORAED
STORAED2
STORMED
STORMED2
ACCNGED
ANRMOD
BAYSTMOD
CANSTOR
CENHDMOD
CENHDPH2
CENHDPH3
DVACCRUS
ECANSTOR
ENBMOD
ENBRDACC
ENCAMOD
ENCMOD2
ENCPHY2
GOOSMOD
LIBSTMOD
LODI2MOD
LODIMOD
MICHBMOD
MICHED
MICHMOD
NATLMODB
NATLMODC
NATLMODD
NATLSTOR
NATSTRED
NIMOMOD
NIMOPYMT
RNSPTGAS
STCSTRED
STGBMOD
STGCHMOD
STOR1ED
STORFIN
SWSTRGAS
UNICMOD
UNIDMOD
UNIMOD
UNIMODB
UNISTED
UNOFMOD
VIRSTRED
WESTSTOR
ANRHDG
BAYSTHGP
BAYSTHGS
CENHDHDG
ECANSTHG
ENBHDG
ENCAFX
ENCAHDG
ENCAHDGP
GOOSHGP
GOOSHGS
LIBSTHGP
LIBSTHGS
LODI2HGP
LODI2HGS
LODIHGP
LODIHGS
MCPHYSNG
MICHBHGP
MICHBHGS
MICHHDG
MICHHDGS
MTMHGSTR
MTMHGTPT
MTMSTVOL
NATLHDGB
NATLHDGC
NATLHGCP
NATSTRHG
RMROMMTM
STGBHGP
STGBHGS
STGCHHDG
STORFNHG
SWSTRHDG
UNIBHGS
UNICHDGP
UNICHDGS
UNIDHDGP
UNIDHDGS
UNIHDG
UNIHDGB
UNOFHGP
UNOFHGS
WESSTRHG
"
storageBooks = strsplit( storageBooks, "\n" )[[1]]
storageBooks = sort( unique( storageBooks ) )
return( storageBooks )
}
################################################################################
DownstreamGas$getTransportBooks <- function()
{
transportBooks = "ALLTPHGA
ALLTPIBT
CALACCED
FPACHDG
GASFXPED
NASTR5ED
NEACCED
NETPIBT
NGPLHDGA
NGPLIBT
NGPSTRED
NSTARNPS
RMBAACC
RMDMACC
TRPNPSED
UNIBIBT
WIRACCED
FORMERED
TRANAED2
TRNAED
TRNAED2
TRNMED
TRNMED2
ALGTHDG2
ALGTPHDG
ALLTPHDG
ANRTHDG2
ANRTPHDG
BAYSTHDG
BROTRHED
CANTRHDG
CGTTPHDG
COLTPHDG
DKTPED
DKTPHDG
DOMTPHDG
DUKTPHDG
DVACCRUT
EMPTPHDG
ENOITHDG
GLGTHDG
IRQTHDG2
IRQTPHDG
MELTRAED
MGTTPHDG
MID2HGED
NBTPHDG
NE2TPED
NECAPHDG
NGPTHDG2
NGPTPHDG
NNGTPHDG
NOVTPHDG
NRGTRHDG
NWPPHDG
ONEITHDG
OZKTPHDG
PGETPHDG
PNGTPHDG
PPLTPHDG
RMBAMTM
RMDMMTM
SNTTPHDG
SWGASHDG
SWTPHGED
TCPLHDG
TCPLHED
TENCTHDG
TENTHDG2
TENTPHDG
TETTPHDG
TRNKPHDG
TRZTHDG2
TRZTPHDG
VIRTRAED
WESTRNHG
ALGTPCAP
ALGTPDEM
ALLTPAM2
ALLTPAMT
ALLTPCAP
ANRTPCAP
BAYSTCAP
BORTRAED
CALTRAED
CANTRANS
CEGTTMOD
CGTTPCAP
CGTTPDEM
COLTPCAP
DKTPCAP
DOMTPCAP
DUKTPCAP
DVACCRUB
ECANCAPC
EMPTPCAP
ENBSTRED
ENOGEMOD
ENOITCAP
GLGTCAP
IRQTPCAP
IRQTPDEM
KAPTRAED
NBTPCAP
NECAPGAS
NGPTPCAP
NGPTPDEM
NNGTPCAP
NNGTPDEM
NOVTPCAP
NRGTRCAP
NWPTPCAP
ONEITCAP
OZKTPCAP
PGETPCAP
PMCAPGAS
PMCAPHDG
PNGTPCAP
PPLTPCAP
PPLTPDEM
RKCAPGAS
SNTTPCAP
SWCAPGAS
SWTPCPED
TCPLCAP
TCPLCED
TENCTCAP
TENTPCAP
TENTPDEM
TETTPCAP
TETTPDEM
TRNKPCAP
TRZTPCAP
TRZTPDEM
WESTTRAN
WIRTRAED
ALGTCAP2
ANRTCAP2
GASACHDG
IRQTCAP2
MGTTPCAP
MID2TPED
NE2HGED
NGPTCAP2
TENTCAP2
TRZTCAP2
"
transportBooks = strsplit( transportBooks, "\n" )[[1]]
transportBooks = sort( unique( transportBooks ) )
return( transportBooks )
}
################################################################################
DownstreamGas$getDownstreamBooks <- function(cneBooks, baltimoreBooks )
{
BOOK_FILE = "H:/user/R/RMG/Projects/CollateralAdHoc/AllDownstreamBooksPlusCNEAndBaltimore.csv"
allBooks = read.csv(BOOK_FILE)
dsBooks = sort( unique( allBooks$SUB_BOOK ) )
# dsBooks = setdiff( dsBooks, cneBooks )
# dsBooks = setdiff( dsBooks, baltimoreBooks )
return( dsBooks )
}
################################################################################
data = read.csv( "S:/Risk/Temporary/CollateralAllocation/20081010/SourceData/AllPos_AGMTH_preibt_10OCT08.csv" )
cneBooks = DownstreamGas$getCNEBooks()
baltimoreBooks = DownstreamGas$getBaltimoreBooks()
storageBooks = DownstreamGas$getStorageBooks()
transportBooks = DownstreamGas$getTransportBooks()
dsBooks = DownstreamGas$getDownstreamBooks( cneBooks, baltimoreBooks )
downstreamData = subset( data, book_name %in% dsBooks )
#downstreamData = subset( downstreamData, Tenor != "PRE" )
dim( downstreamData )
dollar( sum( downstreamData$Exposure, na.rm = TRUE ) )
downstreamData$EXPOSURE_FLAG = "External"
downstreamData$EXPOSURE_FLAG[ which( downstreamData$counterparty %in% dsBooks )] = "Houston - Downstream"
downstreamData$EXPOSURE_FLAG[ which( downstreamData$counterparty %in% baltimoreBooks )] = "Baltimore"
downstreamData$EXPOSURE_FLAG[ which( downstreamData$counterparty %in% cneBooks )] = "CNE"
downstreamData$EXPOSURE_FLAG[ grep("^XG", downstreamData$counterparty ) ] = "Houston - Storage"
downstreamData$EXPOSURE_FLAG[ grep("^XM", downstreamData$counterparty ) ] = "Houston - Transport"
names(downstreamData) = toupper( names( downstreamData ) )
downstreamData = downstreamData[, sort(names(downstreamData)) ]
names(downstreamData)[which(names(downstreamData) == "EXPOSURE")] = "value"
finalData = cast( downstreamData,
COUNTERPARTY + CREDIT_NETTINGAGREEMENT ~ EXPOSURE_FLAG,
sum, na.rm = TRUE, fill = 0, margins = c("grand_col") )
write.csv( downstreamData, row.names=FALSE, file="C:/Documents and Settings/e14600/Desktop/downstreamData.csv" )
write.csv( finalData, row.names=FALSE, file="C:/Documents and Settings/e14600/Desktop/exposureByFlagData.csv" )
ibts = subset(downstreamData, downstreamData$BOOK_NAME %in% dsBooks & downstreamData$COUNTERPARTY %in% dsBooks)
ibtBooks = sort( unique( ibts$BOOK_NAME ) )
for( book in ibtBooks )
{
cpList = sort( unique( subset( ibts, BOOK_NAME == book )$COUNTERPARTY ) )
for( cp in cpList )
{
val1 = sum( ibts$value[ which(ibts$COUNTERPARTY==cp & ibts$BOOK_NAME==book)], na.rm=TRUE )
val2 = sum( ibts$value[ which(ibts$COUNTERPARTY==book & ibts$BOOK_NAME==cp)], na.rm=TRUE )
if( val1 != (-1*val2) )
{
rLog("cp1:", book, "cp2:", cp, "val1=", val1, "val2=", val2)
}
}
}
################################################################################
# Lets try getting numbers directly from rmsys
# delete the expired positions per the Raft query
library(RODBC)
dsnPath = "FileDSN=//NAS-OMF-01/cpsshare/All/Risk/Software/R/prod/Utilities/DSN/RMSYSP.dsn"
conString = paste( dsnPath, ";UID=rmsys_read;PWD=rmsys_read;", sep="")
chan = odbcDriverConnect(conString)
asOfDate = as.Date( "2008-10-10" )
rmsysQuery = paste( "SELECT
deal_number,
contract_number,
deal_type,
source_system,
dealer,
counterparty,
trading_entity,
dealt_date,
value_date,
maturity_date,
settlement_date,
buy_sell_flag,
commodity,
volume,
price,
notional_amount,
notional_amount_currency,
market_price,
mtm,
mtm_currency,
undelivered_amount,
book,
call_put,
strike_price,
premium,
delta,
unit,
hub,
region,
broker,
accounting,
instrument,
exp_date
FROM rmsys.raft_deal
WHERE cob_date = '", format( asOfDate, "%d%b%y" ),
"' and trading_entity = 'CPS'", sep="" )
rmsysData = sqlQuery(chan, rmsysQuery)
rmsysFilterData = sqlQuery(chan, "SELECT MAX(exp_date), contract_number, value_date, maturity_date
FROM rmsys.raft_deal
WHERE cob_date = '10Oct08'
AND trading_entity = 'CPS'
GROUP BY contract_number, value_date, maturity_date")
odbcClose(chan)
names(rmsysFilterData)[1] = "EXP_DATE"
rmsysFilterData = subset(rmsysFilterData, EXP_DATE <= as.POSIXct('2008-10-10') )
rmsysFilterData$DELETE = TRUE
rmsysClean = merge( rmsysData, rmsysFilterData, all.x=TRUE )
rowsToRemove = which(
rmsysClean$DELETE == TRUE &
rmsysClean$INSTRUMENT == 'FINSWAP' &
rmsysClean$DEAL_TYPE %in% c("FFSW", "BSW", "SWINGSWAP") )
if( length(rowsToRemove) != 0 ) rmsysClean = rmsysClean[ -rowsToRemove,]
# put the rmsys data in the same order as the secdb data
rmsysClean = rmsysClean[ ,c("DEAL_NUMBER", "DEAL_TYPE", "SOURCE_SYSTEM",
"DEALER", "COUNTERPARTY", "TRADING_ENTITY", "DEALT_DATE", "VALUE_DATE",
"MATURITY_DATE", "SETTLEMENT_DATE", "BUY_SELL_FLAG", "COMMODITY", "VOLUME",
"PRICE", "NOTIONAL_AMOUNT", "NOTIONAL_AMOUNT_CURRENCY", "MARKET_PRICE",
"MTM", "MTM_CURRENCY", "BOOK", "CONTRACT_NUMBER")]
classes = sapply( rmsysClean, data.class )
rmsysClean[ , which( classes == "POSIXt" ) ] =
lapply( rmsysClean[ , which( classes == "POSIXt" ) ],
function(x) { as.Date(x) } )
rmsysClean[ , which( classes == "factor" ) ] =
lapply( rmsysClean[ , which( classes == "factor" ) ],
function(x) { as.character(x) } )
downstreamData = subset( rmsysClean, BOOK %in% dsBooks )
#downstreamData = subset( downstreamData, VALUE_DATE >= as.Date("2008-10-10") )
dim( downstreamData )
sum( downstreamData$MTM, na.rm = TRUE )
downstreamData$EXPOSURE_FLAG = "External"
downstreamData$EXPOSURE_FLAG[ which( downstreamData$COUNTERPARTY %in% dsBooks )] = "Houston - Downstream"
downstreamData$EXPOSURE_FLAG[ which( downstreamData$COUNTERPARTY %in% baltimoreBooks )] = "Baltimore"
downstreamData$EXPOSURE_FLAG[ which( downstreamData$COUNTERPARTY %in% cneBooks )] = "CNE"
downstreamData$EXPOSURE_FLAG[ grep("^XG", downstreamData$COUNTERPARTY ) ] = "Houston - Storage"
downstreamData$EXPOSURE_FLAG[ grep("^XM", downstreamData$COUNTERPARTY ) ] = "Houston - Transport"
downstreamData = downstreamData[, sort(names(downstreamData)) ]
write.csv( downstreamData, row.names=FALSE, file="C:/Documents and Settings/e14600/Desktop/downstreamData.csv" )
names(downstreamData)[which(names(downstreamData) == "MTM")] = "value"
finalData = cast( downstreamData,
COUNTERPARTY ~ EXPOSURE_FLAG,
sum, na.rm = TRUE, fill = 0, margins = c("grand_col") )
write.csv( finalData, row.names=FALSE, file="C:/Documents and Settings/e14600/Desktop/exposureByFlagData.csv" )
|
4e2ea3893fb5685ab5e05fb84070650d4d9d3067 | bed1302cf910433a9490d063876927eaaf19b374 | /Bysykkel.R | 77cba9d1b686aff5e58aaa30bfb43ca10df1199a | [] | no_license | gunvorskj/work-organization-gunvor | 87eec5e783c5eacc7bdb709a270efdac8803902e | 8ba08e1040ef26ba497b021e2fde5ca5fbfeeea2 | refs/heads/master | 2020-05-23T13:21:48.011711 | 2019-05-22T13:51:25 | 2019-05-22T13:51:25 | 186,774,873 | 0 | 1 | null | 2019-05-15T08:06:42 | 2019-05-15T07:41:14 | null | UTF-8 | R | false | false | 1,579 | r | Bysykkel.R | library(tidyverse)
bysykkel <- read.csv('Sykkel.csv',sep = ";")
bysykkeltibble <- as_tibble(bysykkel)
##most popular start station####
bysykkeltibble %>%
count(start_station_name) %>%
arrange(desc (n)) %>%
view
###most popular end station####
bysykkeltibble %>%
count(end_station_name) %>%
arrange(desc (n)) %>%
view
###longest/shortest duration###
bysykkeltibble %>%
count(duration) %>%
arrange(desc (duration)) %>%
view
bysykkeltibble %>%
count(duration) %>%
arrange( (duration)) %>%
view
###most popular pair of start and end station###
bysykkeltibble %>%
count(start_station_name, end_station_name) %>%
arrange(desc(n)) %>%
view()
###plot the number of hires and returns###
bysykkeltibble %>%
gather(key = key, value = value, start_station_name, end_station_name) %>%
count(key, value) %>%
ggplot (aes(x = value, y = n, fill = key)) + geom_col(position=position_dodge())
###plot the distrubution of hire duration##
bysykkeltibble %>%
ggplot(aes(x = duration)) + geom_histogram() + xlim(10, 3000)
###median duration of each station###
bysykkeltibble %>%
group_by(start_station_name) %>%
summarise(median_duration=median(duration))
###map this information###
bysykkeltibble %>%
filter(start_station_longitude<10) %>%
group_by(start_station_name, start_station_latitude, start_station_longitude) %>%
summarise(meddur = median(duration)) %>%
ggplot(aes(x = start_station_longitude, y = start_station_latitude, size = meddur)) + geom_point()
|
33e5fa5ea8fd91daa9d078daaef447f705a887e6 | bf0d3ee072cc1de3e310a09f26c29b62df827051 | /man/Peptides-class.Rd | d1c4e87ff166314b6d70ee51a78d32d05bc050d2 | [] | no_license | xrobin/MCMS | eaf5b2111f5dbe959a79d061f1a0e998764be23f | 84acb5e3072ee7d5b7a311121905ef7feb6060a0 | refs/heads/master | 2021-04-30T14:37:47.405270 | 2021-04-06T11:08:15 | 2021-04-06T11:08:15 | 121,221,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 768 | rd | Peptides-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PeptidesClass.R
\docType{class}
\name{Peptides-class}
\alias{Peptides-class}
\title{Set of peptides}
\description{
A class that describe a set of peptides. Use the \code{\link{Peptides}} function for easy object creation
}
\section{Slots}{
\describe{
\item{\code{c}}{the concentration ratios (per sample pair) as a named numeric (name is sampleX_sampleY, ...)}
\item{\code{o}}{the occupancy ratios (per sample) as a named numeric (name is sampleX, ...)}
\item{\code{num.c,num.o}}{number of o and c parameters}
\item{\code{names.c,names.o}}{names of the pair to which each c, and of the sample to which each o applies.}
\item{\code{protein}}{the \code{\link{Protein-class}} object}
}}
|
6b2dad4b325ac8d6702c5374c658969e83e4eeb3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/pgirmess/examples/polycirc.rd.R | cc864d271d91c34bdfd68444b365ff83cf75427e | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 261 | r | polycirc.rd.R | library(pgirmess)
### Name: polycirc
### Title: Computes the polygon coordinates of a circle
### Aliases: polycirc
### Keywords: manip
### ** Examples
plot(1:10,1:10,type="n",asp=1)
polygon(polycirc(5),col="blue")
polygon(polycirc(2,c(5,5)), col="red")
|
f898c7c6ab642771c0bfa4e302503ec751a6835f | f72414dd06ff27eaff2b087379820f25d45907ae | /man/ngsLCA_rank.Rd | 16beead2101ca281b9ee2e024708776d91f300a0 | [] | no_license | wyc661217/ngsLCA | 4513005e4d8c97c5b1088a83dd72d719377001d3 | 21b5e5987bc5040d8456f04eb39c0edeee8cad04 | refs/heads/master | 2023-04-18T07:00:15.874512 | 2022-04-07T16:27:22 | 2022-04-07T16:27:22 | 401,632,712 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,281 | rd | ngsLCA_rank.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ngsLCA_rank.R
\name{ngsLCA_rank}
\alias{ngsLCA_rank}
\title{Classify Taxa to Taxonomic Ranks}
\usage{
ngsLCA_rank(path, run = "run01", rank.name = "species,genus,family")
}
\arguments{
\item{path}{working directory, same to \code{\link{ngsLCA_profile}}.}
\item{run}{name of the run, default is "run01".}
\item{rank.name}{a comma separated vector listing the taxonomic ranks that will be used for classifying taxa profiles; default is "species,genus,family"}
}
\value{
Taxa profiles clustered into taxa ranks.
}
\description{
Classify the combined taxa profile (and grouped taxa profiles generated by \code{\link{ngsLCA_group}} if available) into user-defined taxonomic ranks. Results will be in "path/run/taxonomic_profiles/taxa_ranks/".
}
\examples{
ngsLCA_rank(path=system.file("extdata","lca_files",package="ngsLCA"),
run="run01",
rank.name="species,genus")
## This will classify the combined taxa profile (and
## grouped taxa profiles if available) of "run01" into
## species and genus, by merging all taxa below a species
## into that species, and all taxa below a genus into
## that genus. Generated files will be in
## "path/run01/taxonomic_profiles/taxa_ranks/".
}
|
41261188ba4eced202f19ffbb890df2602c1053a | a5ced02be5ef57cfc093b9a77fbb71cdb18d9d76 | /R/get_soilseries_from_NASIS.R | 94731378d4fd5fc2f6f142ff22162a894fa8628e | [] | no_license | Emory-ENVS-SihiLab/soilDB | e882de8337a3f3bd9943046c781f42a473723669 | fca026cc1039f3f8936b70d0efe8c092950db4ee | refs/heads/master | 2023-08-13T16:25:03.372504 | 2021-09-18T00:48:57 | 2021-09-18T00:48:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,523 | r | get_soilseries_from_NASIS.R | #' Get records from the Soil Classification (SC) database
#'
#' These functions return records from the Soil Classification database, either
#' from the local NASIS database (all series) or via web report (named series
#' only).
#'
#' @aliases get_soilseries_from_NASIS get_soilseries_from_NASISWebReport
#'
#' @param stringsAsFactors logical: should character vectors be converted to
#' factors? This argument is passed to the `uncode()` function. It does not
#' convert those vectors that have set outside of `uncode()` (i.e. hard coded).
#'
#' @param dsn Optional: path to local SQLite database containing NASIS
#' table structure; default: `NULL`
#'
#' @param delimiter _character_. Used to collapse `taxminalogy` records where multiple values are used to describe strongly contrasting control sections. Default `" over "` creates combination mineralogy classes as they would be used in the family name.
#'
#' @return A \code{data.frame}
#'
#' @author Stephen Roecker
#'
#' @keywords manip
#'
#' @export get_soilseries_from_NASIS
get_soilseries_from_NASIS <- function(stringsAsFactors = default.stringsAsFactors(),
dsn = NULL, delimiter = " over ") {
q.soilseries <- "
SELECT soilseriesname, soilseriesstatus, benchmarksoilflag, soiltaxclasslastupdated, mlraoffice, taxclname, taxorder, taxsuborder, taxgrtgroup, taxsubgrp, taxpartsize, taxpartsizemod, taxceactcl, taxreaction, taxtempcl, taxfamhahatmatcl, originyear, establishedyear, descriptiondateinitial, descriptiondateupdated, statsgoflag, soilseriesiid, areasymbol, areaname, areaacres, obterm, areatypename, soilseriesedithistory
FROM soilseries ss
INNER JOIN area a ON a.areaiid = ss.typelocstareaiidref
INNER JOIN areatype at ON at.areatypeiid = ss.typelocstareatypeiidref
ORDER BY soilseriesname;"
q.min <- "SELECT soilseriesiidref, minorder, taxminalogy FROM soilseriestaxmineralogy
ORDER BY soilseriesiidref, minorder;"
channel <- dbConnectNASIS(dsn)
if (inherits(channel, 'try-error'))
return(data.frame())
# exec query
d.soilseries <- dbQueryNASIS(channel, q.soilseries, close = FALSE)
d.soilseriesmin <- dbQueryNASIS(channel, q.min)
# recode metadata domains
d.soilseries <- uncode(d.soilseries, stringsAsFactors = stringsAsFactors, dsn = dsn)
d.soilseriesmin <- uncode(d.soilseriesmin, stringsAsFactors = stringsAsFactors, dsn = dsn)
# prep
d.soilseries$soiltaxclasslastupdated <- format(as.Date.POSIXct(d.soilseries$soiltaxclasslastupdated), "%Y")
# aggregate mineralogy data (ordered by minorder, combined with "over")
d.minagg <- aggregate(d.soilseriesmin$taxminalogy,
list(soilseriesiid = d.soilseriesmin$soilseriesiidref),
paste0, collapse = delimiter)
colnames(d.minagg) <- c("soilseriesiid", "taxminalogy")
res <- merge(
d.soilseries,
d.minagg,
by = "soilseriesiid",
all.x = TRUE,
incomparables = NA,
sort = FALSE
)
# reorder column names
return(res[,c("soilseriesiid", "soilseriesname", "soilseriesstatus", "benchmarksoilflag",
"soiltaxclasslastupdated", "mlraoffice", "taxclname", "taxorder",
"taxsuborder", "taxgrtgroup", "taxsubgrp", "taxpartsize", "taxpartsizemod",
"taxceactcl", "taxreaction", "taxtempcl", "taxminalogy", "taxfamhahatmatcl",
"originyear", "establishedyear", "descriptiondateinitial", "descriptiondateupdated",
"statsgoflag", "soilseriesedithistory", "areasymbol", "areaname",
"areaacres", "obterm", "areatypename")])
}
get_soilseries_from_NASISWebReport <- function(soils, stringsAsFactors = default.stringsAsFactors()) {
url <- "https://nasis.sc.egov.usda.gov/NasisReportsWebSite/limsreport.aspx?report_name=get_soilseries_from_NASISWebReport"
d.ss <- lapply(soils, function(x) {
args = list(p_soilseriesname = x)
d = parseWebReport(url, args)
})
d.ss <- do.call("rbind", d.ss)
# set factor levels according to metadata domains
d.ss[!names(d.ss) %in% c("mlraoffice", "taxminalogy")] <- uncode(d.ss[!names(d.ss) %in% c("mlraoffice", "taxminalogy")],
db = "SDA", stringsAsFactors = stringsAsFactors)
d.ss[names(d.ss) %in% c("mlraoffice")] <- uncode(d.ss[names(d.ss) %in% c("mlraoffice")],
db = "LIMS", stringsAsFactors = stringsAsFactors)
# return data.frame
return(d.ss)
}
|
8f343fce5d0e0196b39d80a3fb42ce4f65292b01 | 12bd3d5a0377fb2c709b99b71f0f43bfedace4c2 | /R/IsMissingSpecies.R | 9f80fc8f619d9c8f8e0098a2f5a52cf8e9430df9 | [] | no_license | bbanbury/phrynomics | 58666270c415c46cdf1e2b7faab34f865fbf3a35 | 42c393473d0627d5c2b95989f0f6036dc5038c70 | refs/heads/master | 2023-05-25T05:13:17.346748 | 2023-05-16T00:15:38 | 2023-05-16T00:15:38 | 14,935,999 | 25 | 7 | null | 2023-05-16T00:15:39 | 2013-12-04T21:13:46 | R | UTF-8 | R | false | false | 1,488 | r | IsMissingSpecies.R | #' Missing Species Vector
#'
#' This function will determine if entire species are missing data (across all sites of a locus). Species names are important for this function, so if they are read in incorrectly, if could affect the results. They should be in the format where each species shares a unique flag and are then numbered (for example, species1, species2, species3 would be three individuals of the same species). If you want to check species see the function \code{GetSpecies}.
#' @param locus A single locus (can have multiple sites)
#' @param SpeciesNames Vector of species names that will cluster individuals. This will likely be rownames(SNPdataset)
#' @param chatty Option to print details to screen
#' @export
#' @return Returns a TRUE/FALSE vector for each locus in the dataset
#' @seealso \link{ReadSNP} \link{WriteSNP} \link{GetSpecies} \link{RemoveMissingSpeciesLoci}
#' @examples
#' data(fakeData)
#' Spnames <- rownames(fakeData)
#' IsMissingSpecies(fakeData[,1], Spnames)
#' IsMissingSpecies(fakeData[,2], Spnames)
#' IsMissingSpecies(fakeData[,3], Spnames)
IsMissingSpecies <- function(locus, SpeciesNames, chatty=FALSE){
species <- GetSpecies(SpeciesNames)
for(i in sequence(length(species))) {
combinedLocus <- paste(locus[grep(species[i], SpeciesNames)], collapse="")
if(all(strsplit(combinedLocus, "")[[1]] == "N")) {
if(chatty)
print(paste("Species", species[i], "has all missing"))
return(FALSE)
}
}
return(TRUE)
}
|
825bdc55bf914d196ff15bae80077ec970f6a13c | d71f89f90448ca3dae2efe3cfd2212e59d41eec4 | /man/GenerateMulticolinearityMeasures.Rd | c82325f051c799982d837170d24a9066dbce9eaf | [] | no_license | warnbergg/regone | ce2b5f9db882b40c025fc8a669e9d65f5404e3a8 | 851be8e37259b1b0f599038e08ae14c27365b454 | refs/heads/master | 2023-08-22T02:18:39.488804 | 2021-10-13T14:11:07 | 2021-10-13T14:11:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 957 | rd | GenerateMulticolinearityMeasures.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenerateMulticolinearityMeasures.R
\name{GenerateMulticolinearityMeasures}
\alias{GenerateMulticolinearityMeasures}
\title{GenerateMulticolinearityMeasures}
\usage{
GenerateMulticolinearityMeasures(data, dv, fit, dir = "./", save.plots = TRUE)
}
\arguments{
\item{data}{data.frame Data used to fit the model. Used for pair-wise correlation analysis and eigenvalue system analysis. No default.}
\item{dv}{Character vector of length 1. Dependent variable. No default.}
\item{fit}{lm object. Fitted model. No default}
\item{dir}{Character vector of lenght 1. Directory in which to store the plot. Ignored if save.plot is FALSE. Defaults to "."}
\item{save.plots}{Logical vector of length 1. If TRUE the VIF plot and correlation heatmap are saved to disk. Defaults to TRUE.}
}
\description{
Produces variance inflation factors, eigenvalue system analysis for the fitted model.
}
|
0afa0c21abbda135c40949bd0bd4b7b21bcc1eb3 | b09d92bc7db5b477250e0f6444682ba9c350d404 | /Bagging and RF from Book without Glucose.R | 81cdcc323a60699c68e2301c3914fa155124a2fe | [] | no_license | allie-touchstone/Diabetes-Analysis | 6887030932ce5f2ff831d6116d5ac71c5536e70e | 3df59529bf1f9b3640ae2ab1df86f22e2104a650 | refs/heads/main | 2023-07-14T15:29:46.240964 | 2021-08-27T13:56:15 | 2021-08-27T13:56:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 974 | r | Bagging and RF from Book without Glucose.R | # Bagging
library(randomForest)
diabetes = read.csv("diabetes.csv", dec =',')
diabetes1 = factor(ifelse(diabetes$diabetes == "No diabetes", "No Diabetes", "Diabetes"))
diabetes = data.frame(diabetes, diabetes1)
train = sample(1:nrow(diabetes), nrow(diabetes)/2)
diabetes.train = diabetes[train,]
diabetes.test = diabetes[-train,]
diabetes1.test = diabetes1[-train]
set.seed(1)
bag.diabetes = randomForest(diabetes1~.-patient_number-diabetes-glucose,data=diabetes,subset=train,mtry=13,importance=TRUE)
bag.diabetes # Error Rate is 18.46% (increase of 5.64% from with glucose)
(2+157)/195 # 81.54% Success Rate (decrease of 5.63% from with glucose)
# Random Forest
set.seed(1)
rf.diabetes = randomForest(diabetes1~.-patient_number-diabetes-glucose,data=diabetes,subset=train,mtry=7,importance=TRUE)
rf.diabetes
(3+157)/195 # 82.05% Success Rate (decrease of 7.18% from with glucose)
importance(rf.diabetes)
# BMI, Hip, Age are the most important variables without glucose
|
f148519e6abce069cf8af9cbc8791d2964be8372 | 484204c37e1d7c0ac9b0dc13a797021017eeab40 | /man/summarise.dtplyr_step.Rd | bad0c0c642e05affc24e89e6b788b872b9d8722d | [
"MIT"
] | permissive | romainfrancois/dtplyr | 6b492d1a29b4ae42ecdd4e0640531a062d777789 | 9200cecf9c5297a350284401edbe99fa35c15515 | refs/heads/main | 2023-08-29T15:02:30.991425 | 2021-10-15T18:37:46 | 2021-10-15T18:37:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,718 | rd | summarise.dtplyr_step.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/step-subset-summarise.R
\name{summarise.dtplyr_step}
\alias{summarise.dtplyr_step}
\title{Summarise each group to one row}
\usage{
\method{summarise}{dtplyr_step}(.data, ..., .groups = NULL)
}
\arguments{
\item{.data}{A \code{\link[=lazy_dt]{lazy_dt()}}.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Name-value pairs of summary
functions. The name will be the name of the variable in the result.
The value can be:
\itemize{
\item A vector of length 1, e.g. \code{min(x)}, \code{n()}, or \code{sum(is.na(y))}.
\item A vector of length \code{n}, e.g. \code{quantile()}.
\item A data frame, to add multiple columns from a single expression.
}}
\item{.groups}{\Sexpr[results=rd]{lifecycle::badge("experimental")} Grouping structure of the result.
\itemize{
\item "drop_last": dropping the last level of grouping. This was the
only supported option before version 1.0.0.
\item "drop": All levels of grouping are dropped.
\item "keep": Same grouping structure as \code{.data}.
}
When \code{.groups} is not specified, it defaults to "drop_last".
In addition, a message informs you of that choice, unless the result is ungrouped,
the option "dplyr.summarise.inform" is set to \code{FALSE},
or when \code{summarise()} is called from a function in a package.}
}
\description{
This is a method for the dplyr \code{\link[=summarise]{summarise()}} generic. It is translated to
the \code{j} argument of \verb{[.data.table}.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
dt <- lazy_dt(mtcars)
dt \%>\%
group_by(cyl) \%>\%
summarise(vs = mean(vs))
dt \%>\%
group_by(cyl) \%>\%
summarise(across(disp:wt, mean))
}
|
02ec8558252ef756944d4f29fa88cd1b76c310db | d906c58a00ec380f472307e73bb3feecf38eb142 | /main.r | 4ead6a5644fb4089efc3c45890181e66c93c38a9 | [] | no_license | baldrech/MizerEvo | e443302b6dc4613c870c00c6c4c210c7a42677b7 | fa9d6258b84748ad8cf95b2690083459e3007f9e | refs/heads/master | 2022-12-10T01:10:45.253482 | 2020-09-03T01:07:11 | 2020-09-03T01:07:11 | 105,944,053 | 1 | 3 | null | 2020-09-03T01:07:14 | 2017-10-05T21:51:27 | R | UTF-8 | R | false | false | 56,692 | r | main.r | # This is the main script where you run the simulation (or experiment new things)
#setting things up -----------------------
rm(list = ls())
require(scales)
require(ggplot2)#because always need these two
require(reshape2)
require(plyr)# for aaply
require(grid)# for grid.newpage (plotSummary)
require(abind) # to use abind (bind of arrays)
require(rmarkdown)
require(RColorBrewer)
require(tictoc)
require(limSolve)
#require(tidyverse)
source("MizerParams-class.r") #to get the Constructor
source("selectivity_funcs.r") #to get the knife_edge function
source("methods.r") #I'm doing my own methods then!
source("summaryFunction.r") #to have all the GetSomething functions
source("plotFunction.r") #to draw the plots
source("TBM1.r") # the model from mizer (more like a set up)
source("model.r") # my model
source("utility.r") # helpful functions
# # little script to check sim content ----------------
# a<- get(load("eta5/fisheries/run1/run.Rdata"))
# a@params@species_params$knife_edge_size
# a@params@interaction
# a@params@species_params$r_max
# multi species simulations ----------
file_name = "/Sim9"
noInter = F
# PARAMETERS
# physio
no_sp = 9
min_w_inf <- 10
max_w_inf <- 1e5
RMAX = T
w_inf <- 10^seq(from=log10(min_w_inf), to = log10(max_w_inf), length=no_sp) # for fisheries gear
# varying param
# parameters worth checking: h, ks, z0pre, sigma, beta, f0, erepro, w_pp_cutoff
# defaults
h = 85
ks = 4
z0pre = 2
sigma = 1
beta = 100
f0 = 0.5
erepro = 1
w_pp_cutoff = 1
interaction = 0.5
overlap = 0.5
eta = 0.25
mAmplitude = 0.2
mu=1
kappa = 0.05
if(noInter)
{
w_pp_cutoff = 1e5
interaction = 0
overlap = 0
kappa = 0.5
}
# fisheries
gear_names <- rep("FishingStuff", no_sp)
knife_edges <- w_inf * eta
# other
t_max = 50
no_run = 60
no_sim = 10
i_start = 1
# or
simulationVec <- c(10)
# initialisation phase (4000 yr)
#for (i in i_start:no_sim)
for (i in simulationVec)
{
# switch(i,
# "1" = {mu = 0.01},
# "2" = {mu = 0.1},
# "3" = {mu = 0.5},
# "4" = {mu = 1},
# "5" = {mu = 1.5},
# "6" = {mu = 3},
# "7" = {mu = 5},
# {})
tic()
cat(sprintf("Simulation number %g\n",i))
path_to_save = paste(getwd(),file_name,"/init/run", i, sep = "")
sim <- myModel(no_sp = no_sp, eta = eta, t_max = t_max, no_run = no_run, min_w_inf = min_w_inf,extinct = T,
max_w_inf = max_w_inf, RMAX = RMAX,
ken = F, initTime = 1, initPool = 9, ks = ks, z0pre = z0pre, f0 = f0, overlap = overlap, sigma = sigma, beta = beta, w_pp_cutoff = w_pp_cutoff,
kappa = kappa,
OptMutant = "M5", mAmplitude = mAmplitude, mu= mu,
effort = 0, #knife_edge_size = knife_edges, gear_names = gear_names,
save_it = T, path_to_save = path_to_save,
print_it = T, normalFeeding = F, Traits = "eta")
#rm(sim)
for (j in 1:20) gc()
toc()
}
# simulation after initialisation
folder <- paste(getwd(),file_name,sep="")
initFolder <- paste(folder,"/init",sep="")
dirContent <- dir(initFolder)[1:11]
#dirContent <- "run4"
no_run = 60
i_start = 2
# NO fisheries
for (i in i_start:length(dirContent))
{
# switch(i,
# "1" = {mu = 0.01},
# "2" = {mu = 0.1},
# "3" = {mu = 0.5},
# "4" = {mu = 1},
# "5" = {mu = 1.5},
# "6" = {mu = 3},
# "7" = {mu = 5},
# {})
if (file.exists(paste(initFolder,"/",dirContent[i],"/run.Rdata",sep = "")))
{
sim <- get(load(paste(initFolder,"/",dirContent[i],"/run.Rdata",sep = "")))
path_to_save <- paste(folder,"/normal/",dirContent[i],sep = "")
cat(sprintf("Using %s\n",i))
output <- myModel(no_sp = no_sp, eta = eta, t_max = t_max, no_run = no_run, min_w_inf = min_w_inf,extinct = T,
max_w_inf = max_w_inf, RMAX = RMAX,
ken = F, initTime = 1, initPool = 9, ks = ks, z0pre = z0pre, f0 = f0, overlap = overlap, sigma = sigma, beta = beta, w_pp_cutoff = w_pp_cutoff,
kappa = kappa,
OptMutant = "M5", mAmplitude = mAmplitude, mu = mu, initCondition = sim,
effort = 0, #knife_edge_size = knife_edges, gear_names = gear_names,
save_it = T, path_to_save = path_to_save,
print_it = T, normalFeeding = F, Traits = "eta")
rm(output)
for (j in 1:20) gc()
}
}
# Fisheries
dirContent <- "run6"
for (i in i_start:length(dirContent))
{
# switch(i,
# "1" = {mu = 0.01},
# "2" = {mu = 0.1},
# "3" = {mu = 0.5},
# "4" = {mu = 1},
# "5" = {mu = 1.5},
# "6" = {mu = 3},
# "7" = {mu = 5},
# {})
if (file.exists(paste(initFolder,"/",dirContent[i],"/run.Rdata",sep = "")))
{
sim <- get(load(paste(initFolder,"/",dirContent[i],"/run.Rdata",sep = "")))
path_to_save <- paste(folder,"/fisheries/",dirContent[i],sep = "")
cat(sprintf("Using %s\n",i))
output <- myModel(no_sp = no_sp, eta = eta, t_max = t_max, no_run = no_run, min_w_inf = min_w_inf,extinct = T,
max_w_inf = max_w_inf, RMAX = RMAX,
ken = F, initTime = 1, initPool = 9, ks = ks, z0pre = z0pre, f0 = f0, overlap = overlap, sigma = sigma, beta = beta, w_pp_cutoff = w_pp_cutoff,
kappa = kappa,
OptMutant = "M5", mAmplitude = mAmplitude, mu= mu, initCondition = sim,
effort = 0.8, knife_edge_size = knife_edges, gear_names = gear_names,
save_it = T, path_to_save = path_to_save,
print_it = T, normalFeeding = F, Traits = "eta")
rm(output)
for (j in 1:20) gc()
}
}
# Varying effort
for (i in i_start:length(dirContent))
{
for (effort in c(seq(0.1,0.7,0.1),0.9,1))
{
if (file.exists(paste(initFolder,"/",dirContent[1],"/run.Rdata",sep = "")))
{
sim <- get(load(paste(initFolder,"/",dirContent[1],"/run.Rdata",sep = "")))
path_to_save <- paste(folder,"/fisheries/effort",effort,"/",dirContent[i],sep = "")
cat(sprintf("Using %s\n",i))
output <- myModel(no_sp = no_sp, eta = eta, t_max = t_max, no_run = no_run, min_w_inf = min_w_inf,extinct = T,
max_w_inf = max_w_inf, RMAX = RMAX,
ken = F, initTime = 1, initPool = 9, ks = ks, z0pre = z0pre, f0 = f0, overlap = overlap, sigma = sigma, beta = beta, w_pp_cutoff = w_pp_cutoff, kappa = kappa,
OptMutant = "M5", mAmplitude = mAmplitude, mu= mu, initCondition = sim,
effort = effort, knife_edge_size = knife_edges, gear_names = gear_names,
save_it = T, path_to_save = path_to_save,
print_it = T, normalFeeding = F, Traits = "eta")
rm(output)
for (j in 1:20) gc()
}
}
}
#with parallel / need to update the function---------------------------
rm(list = ls())
library(parallel)
library(ggplot2)#because always need these two
library(reshape2)
library(plyr)# for aaply
library(grid)# for grid.newpage (plotSummary)
library(abind) # to use abind (bind of arrays)
library(rmarkdown)
library(RColorBrewer)
library(tictoc)
source("MizerParams-class.r") #to get the Constructor
source("selectivity_funcs.r") #to get the knife_edge function
source("methods.r") #I'm doing my own methods then!
source("summaryFunction.r") #to have all the GetSomething functions
source("plotFunction.r") #to draw the plots
source("TBM1.r") # the model from mizer (more like a set up)
source("model.r") # my model
source("utility.r")
#(optional) record start time, for timing
ptm=proc.time()
tic()
#unsure what this setting does
options(warn=-1) #?
#Adjust this for num of targeted cpu/cores
# e.g. Numcores = detectCores()-1
where = paste(getwd(),"/parallel",sep="")
numcores=4
cl <- makeForkCluster(getOption("cl.cores", numcores), outfile = "")
sim <- clusterApplyLB(cl
,x=1:numcores
,fun=multiRun
,no_sp = 9
,t_max = 50
,mu = 5
,no_run = 80
,min_w_inf = 10
,max_w_inf = 10e5
,effort = 0
)
stopCluster(cl)
## Option 1: future package (and safely)
library(future)
plan(multiprocess)
## optionally, safely
safe_multiRun <- purrr::safely(multiRun)
sim <- future::future_lapply(1:numcores, safe_multiRun, no_sp , )
library(purrr)
## Option 2: purrr package
safe_multiRun <- purrr::safely(multiRun)
sim <- purrr::map(1:numcores, safe_multiRun, no_sp = 9, ...)
#(optional) compare end with start time, for timing
# saving
for (i in 1:length(sim))
{
path_to_save = paste(where,"/run",i,sep="")
ifelse(!dir.exists(file.path(path_to_save)), dir.create(file.path(path_to_save),recursive = T), FALSE)
saveRDS(file = paste(path_to_save,"/run.RDS",sep=""),object = sim[[i]])
}
print((proc.time()-ptm)/60.0)
toc()
# working on that right now -------------------
rownames(interactionBeta) <- c("1","2","3","4")
colnames(interactionBeta) <- c("1","2","3","4")
rownames(interactionAlpha) <- c("1","2","3","4","5")
colnames(interactionAlpha) <- c("1","2","3","4","5")
interactionAlpha<-interactionAlpha[-3,-3]
which(rownames(interactionAlpha) != rownames(interactionBeta))
a <- rownames(interactionAlpha)
b <- rownames(interactionBeta)
c <- which(!(a %in% b))
interactionSave <- rbind(interactionBeta,interactionAlpha[c,])
interactionSave <- cbind(interactionSave,interactionAlpha[,c])
# investigate this fucking growth
object <- get(load("ParamChap1/init/run4/run.Rdata"))
# Plot realised intake versus maximum intake of small and large individuals to see what is causing decrease in growth at large sizes.
# Is it different among large and small species?
# Is it food limitation or is metabolism too high?
# If this is food limitation that affects growth, would changing PPMR or feeding kernel improve growth?
# And how much do you need to change it for some substantial effect to happen?
#look at mortality
plotScythe(object)
# trait value picking
# Trait = eta
mAmplitude = 0.05
eta = 0.5
sd = as.numeric(mAmplitude * eta) # standard deviation
#x <- eta + rnorm(1, 0, sd) # change a bit eta
df = NULL
for (i in 1:10000) df <- c(df,( rnorm(1, 0, sd)))
summary(df)
plot(df)
plot(density(df))
x <- seq(-0.5,0.5, length=500)
y<-dnorm(x,mean=0, sd=0.025)
plot(x,y, type="l")
# Plots of every kind of output + for loop to see the variation of one parameter ------------------
res = 1000 # figure resolution
subdir = "/weighted" # where to store the plots
parameter = "none" # name of parameter varying for plot title
t_max = 100
no_sp = 4
mu = 5
for (i in c(1 %o% 10^(-5:0)))
{
output <- myModel(no_sp = no_sp, t_max = t_max, mu = mu, OptMutant = "yo", no_run = 1,
min_w_inf = 10, ks=2, max_w_inf = 10000,
#param = sim@params, # option to give param of another sim to have mutant relations
effort = 0, data = TRUE)
# when data = true, mutation do not work but I get the values of lots of function at each time step of the simulation
# do that for short runs
# sort the output
energy = output[[1]]
rd = output[[2]]
eggs = output[[3]]
sim = output[[4]]
food = output[[5]]
m2 = output[[6]]
z = output[[7]]
m2_background = output[[8]]
phi_fish = output[[9]]
phi_pltk = output[[10]]
end = dim(energy)[1]
# thing to fix: if I give parameters to the sim, it won't have the right name (sp name instead of ecotype)
# if there are no mutants I guess its fine
dimnames(sim@n)$sp = sim@params@species_params$ecotype
ifelse(!dir.exists(file.path(dir, subdir)), dir.create(file.path(dir, subdir)), FALSE) #create the file if it does not exists
dir.create(file.path(dir,subdir,"/reproduction")) # create tree files to ease comparison
dir.create(file.path(dir,subdir,"/growth"))
dir.create(file.path(dir,subdir,"/mortality"))
dir.create(file.path(dir,subdir,"/spawn"))
dir.create(file.path(dir,subdir,"/RDD"))
dir.create(file.path(dir,subdir,"/feeding"))
# plots ----------------
plotDynamics(sim)
setwd(paste(dir,subdir, sep = "")) #to have the figures in the right directory
mytitle = paste("biomass_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
plotSS(sim)
setwd(paste(dir,subdir, sep = "")) #to have the figures in the right directory
mytitle = paste("sizespectrum_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# RDI
rdi <- rd[,,1]
RDI <- melt(rdi)
print(ggplot(RDI) +
geom_line(aes(x=Time,y=value ,colour = as.factor(Species))) +
scale_x_continuous(name = "Time") +
scale_y_log10(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Reproduction Density Independent"))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("rdi_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# RDD
rdd <- rd[,,2]
RDD <- melt(rdd)
print(ggplot(RDD) +
geom_line(aes(x=Time,y=value ,colour = as.factor(Species))) +
scale_x_continuous(name = "Time") +
scale_y_log10(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Reproduction Density Dependent"))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("rdd_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# ratio RDD/RDI
ratio <- rdd/rdi
RAT <- melt(ratio)
print(ggplot(RAT) +
geom_line(aes(x=Time,y=value ,colour = as.factor(Species))) +
scale_x_continuous(name = "Time") +
scale_y_log10(name = "Ratio", breaks = c(1 %o% 10^(-5:-2)) ) +
scale_colour_discrete(name = "Species") +
ggtitle("RDD/RDI"))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("RddRdi_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# e
# energy after metabolism, for the moment equal between every species
e <- energy[,,,1]
etot = apply(e, c(1,2), sum)
E <- melt(etot)
ggplot(E) +
geom_line(aes(x=Time,y=value, colour = as.factor(Species))) + # the as.factor convert to discrete as linetype doesnt work with continuous value
scale_x_continuous(name = "Time") +
scale_y_continuous(name = "Energy")+
scale_colour_discrete(name = "Species") +
ggtitle("Total energy available after metabolism")
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("energy_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# energy by weight by sp at simulation end
eSP = e[end,,]
ESP <- melt(eSP)
ggplot(ESP) +
geom_line(aes(x=Size,y=value,colour = as.factor(Species))) +
scale_x_log10(name = "Weight") +
scale_y_continuous(name = "Energy")+
scale_colour_discrete(name = "Species") +
ggtitle("Energy available after metabolism by weight")
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("energy_size_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
#growth
# energy for through time
g <- energy[,,,3]
gtot = apply(g, c(1,2), sum)
G <- melt(gtot)
ggplot(G) +
geom_line(aes(x=Time,y=value, colour = as.factor(Species)))+
scale_x_continuous(name = "Time") +
scale_y_continuous(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Energy available for growth")
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("growth_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# plot of energy by weight by sp at simulation end
gSP = g[end,,]
GSP <- melt(gSP)
ggplot(GSP) +
geom_line(aes(x=Size,y=value,colour = as.factor(Species)))+
scale_x_log10(name = "Weight") +
scale_y_continuous(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Energy available for growth by weight")
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("growth_size_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# reproduction
# energy through time
s <- energy[,,,2]
stot = apply(s, c(1,2), sum)
S <- melt(stot)
print(ggplot(S) +
geom_line(aes(x=Time,y=value, colour = as.factor(Species)))+
scale_x_continuous(name = "Time") +
scale_y_continuous(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Energy available for reproduction"))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("reproduction_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# energy by weight by sp at simulation end
sSP = s[end,,]
stot = apply(s, c(1,2), sum)
SSP <- melt(sSP)
print(ggplot(SSP) +
geom_line(aes(x=Size,y=value,colour = as.factor(Species))) +
scale_x_log10(name = "Weight") +
scale_y_continuous(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Energy available for reproduction by weight"))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("reproduction_size_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# energy by weight by sp at simulation end and weighted by n
sSP = s[end,,]
sSPN = sSP * sim@n[dim(sim@n)[1],,]
SSPN <- melt(sSPN)
print(ggplot(SSPN) +
geom_line(aes(x=Size,y=value,colour = as.factor(Species))) +
scale_x_log10(name = "Weight") +
scale_y_log10(name = "Eggs in g/m3") +
scale_colour_discrete(name = "Species") +
ggtitle("Real reproduction"))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("weighted_reproduction_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# plot of number of eggs by sp by size at end sim
EGG = melt(eggs)
print(ggplot(EGG) +
geom_line(aes(x=Time,y=value,colour = as.factor(Species))) +
scale_x_continuous(name = "TIme") +
scale_y_log10(name = "Eggs in g/m3") +
scale_colour_discrete(name = "Species") +
ggtitle("Boudarie condition"))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("spawn_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# feeding
# throught time
feeding <- energy [,,,4]
ftot = apply(feeding, c(1,2), sum)
FEED <- melt(ftot)
ggplot(FEED) +
geom_line(aes(x=Time,y=value, colour = as.factor(Species)))+
scale_x_continuous(name = "Time") +
scale_y_continuous(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Energy issue from feeding")
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("feeding_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# energy by weight by sp at simulation end
fSP = feeding[end,,]
FSP <- melt(fSP)
ggplot(FSP) +
geom_line(aes(x=Size,y=value,colour = as.factor(Species)))+
scale_x_log10(name = "Weight") +
scale_y_continuous(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Energy issue from feeding by weight")
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("feeding_size_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# Phi
a = phi_fish[end,1,]
A = melt(a)
b= phi_pltk[end,1,]
B = melt(b)
feeding = energy[end,1,,4] # feeding level of one sp as they have the same profile
Fe = melt(feeding)
S = melt(sim@params@search_vol[1,]) # search volume
# plot of phi and others
ggplot()+
geom_line(data = A,aes(x = as.numeric(rownames(A)), y = value, color = "Phi fish")) +
geom_line(data = B, aes(x = as.numeric(rownames(B)), y = value, color = "Phi plankton")) +
# geom_line(data = Fe, aes(x = as.numeric(rownames(Fe)), y = value, color = "Feeding level")) +
#geom_line(data = S, aes(x = as.numeric(rownames(S)), y = value, color = "Search Volume")) +
scale_x_log10(name = "Predator size",breaks = c(1 %o% 10^(-10:5)))+
scale_y_continuous(name = "value of phy prey")+
ggtitle("Relative proportion of food eaten between plankton and fish")
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("phi_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# Mortality
# predation mortality
a = m2[end,,]
A = melt(a)
ggplot(A) +
geom_line(aes(x = PreySize, y = value, color = as.factor(PreySp)))+
scale_x_log10()+
scale_y_continuous( limits = c(0,30))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("PredMort_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# total mortality
a = z[end,,]
A = melt(a)
ggplot(A) +
geom_line(aes(x = PreySize, y = value, color = as.factor(PreySp)))+
scale_x_log10()+
scale_y_continuous( limits = c(0,30))
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("TotMort_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
#mortality on plankton
a = m2_background[end,]
A = melt(a)
A = cbind(A,rownames(A))
colnames(A) = c("value", "size")
ggplot(A) +
geom_line(aes(x = as.numeric(size), y = value, group = 1))+
scale_y_log10() +
scale_x_log10()
setwd(paste(dir,subdir, sep = ""))
mytitle = paste("PlktMort_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
#weighted plots -------------------
for (j in seq(t_max,t_max*10,t_max))
{
time = j
if (time == 1000) time = 992 # I know my sim is weird (last step is 992)
# reproduction by weight by sp at simulation end and weighted by n
s <- energy[,,,2]
sSP = s[time,,]
sSPN = sSP * sim@n[time,,]
SSPN <- melt(sSPN)
name = paste("Real reproduction at time ",time, sep ="")
print(ggplot(SSPN) +
geom_line(aes(x=Size,y=value,colour = as.factor(Species))) +
scale_x_log10(name = "Size") +
scale_y_log10(name = "Eggs in g/m3") +
scale_colour_discrete(name = "Species") +
ggtitle(name))
setwd(paste(dir,subdir,"/reproduction", sep = ""))
mytitle = paste("weighted_reproduction_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# growth by weight by sp at simulation end and weighted by n
g <- energy[,,,3]
gSP = g[time,,]
gSPN = gSP * sim@n[time,,]
GSPN <- melt(gSPN)
name = paste("Real growth at time ",time, sep ="")
ggplot(GSPN) +
geom_line(aes(x=Size,y=value,colour = as.factor(Species)))+
scale_x_log10(name = "Size") +
scale_y_log10(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle(name)
setwd(paste(dir,subdir,"/growth", sep = ""))
mytitle = paste("growth_size_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# energy by weight by sp at simulation end
feeding <- energy[,,,4]
fSP = feeding[time,,]
fSPN = fSP * sim@n[time,,]
FSPN <- melt(fSPN)
name = paste("Energy issue from feeding weighted by abundance of species at time ",time, sep ="")
ggplot(FSPN) +
geom_line(aes(x=Size,y=value,colour = as.factor(Species)))+
scale_x_log10(name = "Size") +
scale_y_log10(name = "Feeding level") +
scale_colour_discrete(name = "Species") +
ggtitle(name)
setwd(paste(dir,subdir,"/feeding", sep = ""))
mytitle = paste("feeding_size_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# # predation rate (to set up)
# pred <- food[,,,4]
# fSP = feeding[time,,]
# fSPN = fSP * sim@n[time,,]
# FSPN <- melt(fSPN)
# name = paste("Energy issue from feeding weighted by abundance of species at time ",time, sep ="")
# ggplot(FSPN) +
# geom_line(aes(x=Size,y=value,colour = as.factor(Species)))+
# scale_x_log10(name = "Size") +
# scale_y_log10(name = "Feeding level") +
# scale_colour_discrete(name = "Species") +
# ggtitle(name)
#
# setwd(paste(dir,subdir, sep = ""))
# mytitle = paste("feeding_size_", parameter, "_",i,".png", sep = "")
# dev.print(png, mytitle, width = res, height = 0.6*res)
# total mortality
mortality = z[time,,]
mN = mortality * sim@n[time,,]
MN = melt(mN)
name = paste("Total mortality at time ",time, sep ="")
ggplot(MN) +
geom_line(aes(x = PreySize, y = value, color = as.factor(PreySp)))+
scale_x_log10()+
scale_y_log10()+
scale_colour_discrete(name = "Species") +
ggtitle(name)
setwd(paste(dir,subdir,"/mortality", sep = ""))
mytitle = paste("TotMort_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# egg number
EGG = melt(eggs)
print(ggplot(EGG) +
geom_line(aes(x=Time,y=value,colour = as.factor(Species))) +
scale_x_continuous(name = "TIme") +
scale_y_log10(name = "Eggs in g/m3") +
scale_colour_discrete(name = "Species") +
ggtitle("Boudarie condition"))
setwd(paste(dir,subdir,"/spawn", sep = ""))
mytitle = paste("spawn_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# RDD
rdd <- rd[,,2]
RDD <- melt(rdd)
print(ggplot(RDD) +
geom_line(aes(x=Time,y=value ,colour = as.factor(Species))) +
scale_x_continuous(name = "Time") +
scale_y_log10(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Reproduction Density Dependent"))
setwd(paste(dir,subdir,"RDD", sep = ""))
mytitle = paste("rdd_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
# RDI
rdi <- rd[,,1]
RDI <- melt(rdi)
RDI <- RDI[RDI$value >= min_value,]
print(ggplot(RDI) +
geom_line(aes(x=Time,y=value ,colour = as.factor(Species))) +
scale_x_continuous(name = "Time") +
scale_y_log10(name = "Energy") +
scale_colour_discrete(name = "Species") +
ggtitle("Reproduction Density Independent"))
setwd(paste(dir,subdir,"/RDI", sep = ""))
mytitle = paste("rdi_",time,"_", parameter, "_",i,".png", sep = "")
dev.print(png, mytitle, width = res, height = 0.6*res)
}
}
# predation traits analyses / detail of the predation equation here (not updated though)--------------
# phi prey
# n_eff_prey is the total prey abundance by size exposed to each predator
# (prey not broken into species - here we are just working out how much a predator eats - not which species are being eaten - that is in the mortality calculation
n_eff_prey <- sweep(object@interaction %*% n, 2, object@w * object@dw, "*")
# Quick reference to just the fish part of the size spectrum
idx_sp <- (length(object@w_full) - length(object@w) + 1):length(object@w_full)
# predKernal is predator x predator size x prey size
# So multiply 3rd dimension of predKernal by the prey abundance
# Then sum over 3rd dimension to get total eaten by each predator by predator size
phi_prey_species <- rowSums(sweep(object@pred_kernel[,,idx_sp,drop=FALSE],c(1,3),n_eff_prey,"*"),dims=2)
# Eating the background
phi_prey_background <- rowSums(sweep(object@pred_kernel,3,object@dw_full*object@w_full*n_pp,"*"),dims=2)
return(phi_prey_species+phi_prey_background)
#feeding level
encount <- object@search_vol * phi_prey
# calculate feeding level
f <- encount/(encount + object@intake_max)
return(f)
#pred rate
n_total_in_size_bins <- sweep(n, 2, object@dw, '*')
pred_rate <- sweep(object@pred_kernel,c(1,2),(1-feeding_level)*object@search_vol*n_total_in_size_bins,"*")
return(pred_rate)
#pred kernel
res@pred_kernel[] <- object$beta
res@pred_kernel <- exp(-0.5*sweep(log(sweep(sweep(res@pred_kernel,3,res@w_full,"*")^-1,2,res@w,"*")),1,object$sigma,"/")^2)
res@pred_kernel <- sweep(res@pred_kernel,c(2,3),combn(res@w_full,1,function(x,w)x<w,w=res@w),"*") # find out the untrues and then multiply
# trait study --------------
# draw plots that show the growth rate with different trait varying
# need some n values to get the rest
sim <- myModel(no_sp = 9, t_max = 50, mu = 5, OptMutant = "yo", RMAX = TRUE, hartvig = TRUE)
endList <- length(sim) # shortcut to have ref to the last simulation which has the right dim, names, ...
PSim <- sim[[endList]] # if I want to look at params and such I'm taking the last sim
PSim@params@species_params
plotDynamics(PSim)
end = dim(PSim@n)[1]
# and some parameters
eta = 0.25
z0pre = 0.84
n = 0.75 # exponent of maximum intake (scaling of intake)
q = 0.8 # exponent of search volume
kappa = 0.005 # ressource spectrum carrying capacity
lambda = 2+q-n # exponent of the background spectrum.
h = 85 # factor of maximum intake
f0 = 0.6 # average feeding level of the community/feeding level of small individuals feeding on background
# Asymptotic size
min_w_inf = 10
max_w_inf = 10e5
w_inf <- 10^seq(from=log10(min_w_inf), to = log10(max_w_inf), length=1000) # asymptotic mass of the species
w_mat <- w_inf * eta
z0 <- z0pre * w_inf^(n-1)
size = data.frame(w_inf,w_mat,z0)
ggplot(size) +
geom_line(aes(x = w_inf, y = w_mat, color = "Maturation size")) +
geom_line(aes(x = w_inf, y = z0, color = "Background mortality")) +
scale_x_log10(name = "Asymptotic size") +
scale_y_log10(name = "Size") +
ggtitle("Effect of varition of asymptotic size")
# w_mat is only used in psi (allocation reproduction)
# w_inf is used for h and I dont know what that is
# PPMR
beta = 100 # preferred predator-prey weight ratio
sigma = 1.3 # width of selection function
beta = seq (10,200,10)
alpha_e <- sqrt(2*pi) * sigma * beta^(lambda-2) * exp((lambda-2)^2 * sigma^2 / 2)
gamma <- h * f0 / (alpha_e * kappa * (1-f0))
PPMR <- data.frame(beta,gamma)
ggplot(PPMR)+
geom_line(aes(x = beta, y = gamma))+
ggtitle("Gamma function of beta")
# impact of beta variation
beta_min = 10
beta_max = 200
dBeta = 10
results = list()
for (i in seq (beta_min,beta_max,dBeta))
{
sim <- myModel(no_sp = 9, t_max = 50, OptMutant = "yo", RMAX = TRUE, min_w_inf = 10, max_w_inf = 10000, beta = i, extinct = FALSE, hartvig = TRUE)
sim <- sim[[endList]]
a = getPhiPrey(object = sim@params, n=sim@n[end,,], n_pp = sim@n_pp[end,])
b = getFeedingLevel(object = sim@params, n=sim@n[end,,], n_pp = sim@n_pp[end,], phi_prey = a)
betaPred = cbind(a[2,],b[2,])
name <- paste('beta',i,sep='')
results[[name]] = betaPred
}
#plots
# beta by size
res = 600
for (i in seq (beta_min,beta_max,dBeta))
{
name <- paste('beta',i,sep='')
pred = as.data.frame(results[[name]])
print(
ggplot(pred) +
geom_line(aes(x = as.numeric(rownames(pred)), y=V2, colour = "Feeding level"), group = 1)+
geom_line(aes(x = as.numeric(rownames(pred)), y=V1,colour = "Phi prey"), group = 1)+
scale_x_log10(name = "Size")+
scale_y_continuous(name = "Function output",limits = c(0,0.7))+
ggtitle(name)
)
setwd(paste(dir,"/Traits/Beta", sep = ""))
mytitle = paste(name,".png", sep = "")
dev.print(png, mytitle, width = res, height = res)
}
# global impact of beta on feeding and phi when summing weights
bigBeta = matrix(data = NA, nrow = length(seq (beta_min,beta_max,dBeta)), ncol = 2, dimnames = list(c(seq (beta_min,beta_max,dBeta)), c("Phi","Feed")))
for (i in seq (beta_min,beta_max,dBeta))
{
name <- paste('beta',i,sep='')
bigBeta[i/dBeta,] = colSums(results[[name]])
}
bigBeta = as.data.frame(bigBeta)
ggplot(bigBeta) +
geom_line(aes(x = as.numeric(rownames(bigBeta)), y=Feed, colour = "Feeding level"), group = 1)+
geom_line(aes(x = as.numeric(rownames(bigBeta)), y=Phi,colour = "Phi prey"), group = 1)+
scale_x_continuous(name = "Beta value")+
scale_y_continuous(name = "Function output")+
ggtitle("Impact of beta")
setwd(paste(dir,"/Traits/Beta", sep = ""))
mytitle = paste("betaVar",".png", sep = "")
dev.print(png, mytitle, width = res, height = res)
# sigma
sigma_min = 0.1
sigma_max = 2
dSigma = 0.1
results = list()
for (i in seq (sigma_min,sigma_max,dSigma))
{
sim <- myModel(no_sp = 9, t_max = 50, OptMutant = "yo", RMAX = TRUE, min_w_inf = 10, max_w_inf = 10000, sigma = i, extinct = FALSE, hartvig = TRUE)
sim <- sim[[endList]] # if I want to look at params and such I'm taking the last sim
a = getPhiPrey(object = sim@params, n=sim@n[end,,], n_pp = sim@n_pp[end,])
b = getFeedingLevel(object = sim@params, n=sim@n[end,,], n_pp = sim@n_pp[end,], phi_prey = a)
sigmaPred = cbind(a[2,],b[2,])
name <- paste('sigma',i,sep='')
results[[name]] = sigmaPred
}
#plots
# sigma by size
res = 600
for (i in seq (sigma_min,sigma_max,dSigma))
{
name <- paste('sigma',i,sep='')
pred = as.data.frame(results[[name]])
print(
ggplot(pred) +
geom_line(aes(x = as.numeric(rownames(pred)), y=V2, colour = "Feeding level"), group = 1)+
geom_line(aes(x = as.numeric(rownames(pred)), y=V1,colour = "Phi prey"), group = 1)+
scale_x_log10(name = "Size")+
scale_y_continuous(name = "Function output",limits = c(0,0.8))+
ggtitle(name)
)
setwd(paste(dir,"/Traits/Sigma", sep = "")) #to have the figures in the right directory
mytitle = paste(name,".png", sep = "")
dev.print(png, mytitle, width = res, height = res)
}
# energy by sigma
bigSigma = matrix(data = NA, nrow = length(seq (sigma_min,sigma_max,dSigma)), ncol = 2, dimnames = list(c(seq (sigma_min,sigma_max,dSigma)), c("Phi","Feed")))
for (i in seq (sigma_min,sigma_max,dSigma))
{
name <- paste('sigma',i,sep='')
idx = i/dSigma
bigSigma[idx,] = colSums(results[[name]])
}
bigSigma = as.data.frame(bigSigma)
ggplot(bigSigma) +
geom_line(aes(x = as.numeric(rownames(bigSigma)), y=Feed, colour = "Feeding level"), group = 1)+
geom_line(aes(x = as.numeric(rownames(bigSigma)), y=Phi,colour = "Phi prey"), group = 1)+
scale_x_continuous(name = "Sigma value")+
scale_y_continuous(name = "Function output")+
ggtitle("Impact of sigma")
setwd(paste(dir,"/Traits/Sigma", sep = "")) #to have the figures in the right directory
mytitle = paste("sigmaVar",".png", sep = "")
dev.print(png, mytitle, width = res, height = res)
# Other parameters variation I dont remember what that is------------------------------
#psi
psi = PSim@params@psi
#psi = as.data.frame(psi)
PSI = melt(psi)
ggplot(data = PSI, aes(x = w, y = value), group = sp) +
geom_point() +
scale_x_continuous(breaks = c(1 %o% 10^(-3:5)))
res@psi[] <- unlist(tapply(res@w,1:length(res@w),function(wx,w_inf,w_mat,n)
{
((1 + (wx/(w_mat))^-10)^-1) * (wx/w_inf)^(1-n)
}
,w_inf=object$w_inf,w_mat=object$w_mat,n=n))
# metabolsim maintenance
es@std_metab[] <- unlist(tapply(res@w,1:length(res@w),function(wx,ks,p)
ks * wx^p
, ks=object$ks,p=p))
ks = 4
p = 0.75
size = as.numeric(dimnames(PSim@n)$w)
metabolism = ks*size^p
ratio = metabolism/size
truc = cbind(size,metabolism,ratio)
truc = as.data.frame(truc)
ggplot(truc)+
geom_line(aes(x=size,y=metabolism)) +
geom_line(aes(x=size, y=ratio))+
scale_x_log10() +
scale_y_log10()
# plot biomass sum by family, to see if the relative biomass difference between the species change when I introduce new ecotypes
# I could add stars when a new ecotype appear on the graph to do that need to do a geom_point (data = , aes ...)
truc = getBiomass(sim)
dimnames(truc)$sp <- sim@params@species_params$species
truc <- as.data.frame(truc)
Struc <- sapply(unique(names(truc)[duplicated(names(truc))]),
function(x) Reduce("+", truc[ , grep(x, names(truc))]) ) # magic thing that sum col with same names
names(dimnames(Struc)) <- list("Time","Species")
TRUC = melt(Struc)
ggplot(TRUC)+
geom_line(aes(x = Time, y = value, colour = as.factor(Species)))
# egg interference
# I = exp(-(log(mi/mj)^2)/2*sigma^2)
# f= I *sum(vol search rate * n * dw ) of w
n_total_in_size_bins <- sweep(n, 2, object@dw, '*')
object@search_vol*n_total_in_size_bins
# plot function of egg reduction
no_sp = 10
sim <- myModel(no_sp = no_sp, t_max = 20, mu = 0, OptMutant = "yo", RMAX = TRUE, cannibalism = 1, r_mult = 1e0, erepro = 0.001, p =0.75, ks=4, extinct = FALSE, k0 = 25)
endList <- length(sim) # shortcut to have ref to the last simulation which has the right dim, names, ...
PSim <- sim[[endList]] # if I want to look at params and such I'm taking the last sim
r_max = PSim@params@species_params$r_max
# plotFeedingLevel(PSim)
# plotM2(PSim)
# plotDynamics(PSim)
# plotSS(PSim)
rdi = seq(0,1,0.001) #fake rdi to get values
# mizer egg production
a = matrix(nrow = length(rdi), ncol = no_sp, dimnames = list(as.character(rdi),as.character(c(1:no_sp))))
names(dimnames(a)) = list("RDI","Species")
for(i in 1:dim(a)[1])
{
for (j in 1:dim(a)[2])
{
a[i,j] = r_max[j] * rdi[i] / (r_max[j]+rdi[i])
}
}
# a is the matrix showing the recruitment (RDI processed by rmax) in function of the rdi
MEgg = melt(a)
ggplot(MEgg) +
geom_line(aes(x = RDI, y = value, colour = as.factor(Species))) +
scale_y_continuous(name = "Recruitement", limits = c(0,0.08)) +
geom_abline(intercept = 0, slope = 1)
b = sweep(a,2,r_max,"/") # a divided by rmax for the graph
MEggR = melt(b)
ggplot(MEggR) +
geom_line(aes(x = RDI, y = value, colour = as.factor(Species))) +
scale_y_continuous(name = "Recruitement/Rmax", limits = c(0,2.5)) +
scale_x_continuous(name = "RDI") +
geom_abline(intercept = 0, slope = 1)
# what changes when I poke parameters ? nothing
#test starvation mortality
# feeding plots----------------
#behvior of gamma with the traits
# gamma study
n = 0.75 # exponent of maximum intake (scaling of intake)
p = 0.75 # exponent of standard metabolism
q = 0.8 # exponent of search volume
lambda = 2+q-n # exponent of the background spectrum.
h = 85 # factor of maximum intake
beta = 100 # preferred predator-prey weight ratio
sigma = 1.3 # width of selection function
f0 = 0.6 # average feeding level of the community/feeding level of small individuals feeding on background
kappa = 0.008 # ressource spectrum carrying capacity
#plots
# beta
beta = seq(50,150,1)
sigma = 1.3
gamma <- h * f0 / ((sqrt(2*pi) * sigma * beta^(lambda-2) * exp((lambda-2)^2 * sigma^2 / 2)) * kappa * (1-f0))
betaM = matrix(data = cbind(beta,gamma), nrow = length(beta), ncol = 2, dimnames = list(NULL,c("beta","gamma")))
betaDF = as.data.frame(betaM)
ggplot(betaDF)+
geom_line(aes(x = beta, y =gamma))+
scale_x_continuous(name = "beta (PPMR)")+
scale_y_continuous(name = "gamma (factor for search volume)")
setwd(paste(dir,subdir, sep = ""))
mytitle = "beta_gamma.png"
dev.print(png, mytitle, width = res, height = 0.6*res)
#sigma
sigma = seq(0.1,2.5,0.025)
beta = 100
gamma <- h * f0 / ((sqrt(2*pi) * sigma * beta^(lambda-2) * exp((lambda-2)^2 * sigma^2 / 2)) * kappa * (1-f0))
sigmaM = matrix(data = cbind(sigma,gamma), nrow = length(sigma), ncol = 2, dimnames = list(NULL,c("beta","gamma")))
sigmaDF = as.data.frame(sigmaM)
ggplot(sigmaDF)+
geom_line(aes(x = sigma, y =gamma))+
scale_y_log10(breaks = c(1000,5000,10000,50000))+
scale_x_continuous(name = "sigma (diet breadth)")+
scale_y_continuous(name = "gamma (factor for search volume)")
setwd(paste(dir,subdir, sep = ""))
mytitle = "sigma_gamma.png"
dev.print(png, mytitle, width = res, height = 0.6*res)
# building a matrix to plot a surface
beta = seq(10,200,1)
sigma = seq(0.5,2.5,0.025)
mat = matrix(data = NA, nrow = length(sigma),ncol = length(beta))
for(i in 1:dim(mat)[1])
for (j in 1:dim(mat)[2])
mat[i,j] = h * f0 / ((sqrt(2*pi) * sigma[i] * beta[j]^(lambda-2) * exp((lambda-2)^2 * sigma[i]^2 / 2)) * kappa * (1-f0))
dimnames(mat) = list(as.character(sigma),as.character(beta))
data = melt(mat)
colnames(data) = c("sigma","beta", "gamma")
data$logG = log10(data$gamma) # check log values
# ggplot(data)+
# geom_raster(aes(x = sigma, y = beta, fill = logG))+
# scale_fill_gradient(low = "white",high = "black")
ggplot(data)+
geom_raster(aes(x = sigma, y = beta, fill = gamma))+
scale_fill_gradient(low = "white",high = "black")+
scale_x_continuous(name = "sigma (diet breadth)")+
scale_y_continuous(name = "beta (PPMR)")
setwd(paste(dir,subdir, sep = ""))
mytitle = "sigma_beta.png"
dev.print(png, mytitle, width = res, height = 0.6*res)
# the result is shit
# relationship between traits ------------------
# beta/sigma ratio
for (i in SpIdx)
{
# empty matrix of ecotype of species i by time
A = matrix(0, ncol = SumPar$timeMax[1], nrow = dim(TT[TT$Lineage == i,])[1], dimnames = list(as.numeric(TT$Ecotype[TT$Lineage == i]), c(1:SumPar$timeMax[1])))
# fill the matrix with ones when the ecotype exists
for (x in 1:nrow(A)) # I'm sure I can do an apply but don't know how
{
for (j in 1:ncol(A))
{
if (TT$Apparition[x] <= j & TT$Extinction[x] >= j) A[x,j] = 1
}}
# a is a matrix of 0 and 1 showing if the ecotype is present or not at time t
# change the ones by the trait value of the ecotype
BetaA = A * TT[TT$Lineage == i,]$PPMR
SigmaA = A * TT[TT$Lineage == i,]$Diet_breadth
# calculate mean trait value at each time step
no_trait = apply(A,2,sum) # this vector is the number of traits present at each time step
BetaSum = apply(BetaA,2,sum) # this vector is the sum of the traits value at each time step
SigmaSum = apply(SigmaA,2,sum) # this vector is the sum of the traits value at each time step
BetaMean = BetaSum/no_trait # this is the mean trait value at each time step
SigmaMean = SigmaSum/no_trait # this is the mean trait value at each time step
# Matrix with all traits combination and at what time they go extinct
TTi = TT[TT$Lineage == i,]
comb=data.frame(TTi$Ecotype,TTi$Apparition,TTi$Extinction,TTi$PPMR,TTi$Diet_breadth)
# plot of extinction of combinations
# title = paste("Combination of PPMR and diet breath value of species ",i, sep = "")
# print(
# ggplot(comb) +
# geom_point(aes(x=TTi.PPMR,y=TTi.Diet_breadth, color = TTi.Extinction)) +
# scale_x_continuous(name = "PPMR") +
# scale_y_continuous(name = "Diet breath") +
# scale_color_continuous(name = "Extinction time in year / dt") +
# ggtitle(title)
# )
# name = paste("Extinction BetaSigma of species",i, sep = "")
# setwd(paste(dir,subdir, sep = ""))
# mytitle = paste(name,".png", sep = "")
# dev.print(png, mytitle, width = res, height = 2/3* res)
#
# #plot of apparition of combinations
# print(
# ggplot(comb) +
# geom_point(aes(x=TTi.PPMR,y=TTi.Diet_breadth, color = TTi.Apparition)) +
# scale_x_continuous(name = "PPMR") +
# scale_y_continuous(name = "Diet breath") +
# scale_color_continuous(name = "Apparition time in year / dt") +
# ggtitle(title)
# )
# name = paste("Apparition BetaSigma of species",i, sep = "")
# setwd(paste(dir,subdir, sep = ""))
# mytitle = paste(name,".png", sep = "")
# dev.print(png, mytitle, width = res, height = 2/3* res)
# Mean combination values throughout sim
stat = data.frame(BetaMean,SigmaMean)
#get rid of duplicates
stat = stat[!duplicated(stat),]
# need to work on the time because of fucked up legend
stat = cbind(stat,rownames(stat))
dimnames(stat)[[2]] = list("BetaMean", "SigmaMean", "Time")
stat$Time <- as.numeric(substr(stat$Time,1,5)) # read the time and delete all conditions on it (like factor) # the 5 means handling number up to 10^5
print(
ggplot(stat) +
geom_point(data = stat, aes(x=BetaMean,y=SigmaMean, color = Time)) +
scale_x_continuous(name = "PPMR") +
scale_y_continuous(name = "Diet breath") +
scale_color_continuous(name = "Time in year / dt", low = "blue", high = "red")
)
name = paste("MeanBS_SP",i, sep = "")
setwd(paste(dir,subdir, sep = ""))
mytitle = paste(name,".png", sep = "")
dev.print(png, mytitle, width = res, height = 2/3* res)
}
#rmax------------
# some rmax plots
#Plot rmax per species and rdd per ecotypes
#rdd is when rmax is applied
rdd = rd[,,2] #rdd per ecotype per size
rdi = rd[,,1]
rmax = sim@params@species_params$r_max # rmax per species
RDD = apply(rdd,c(1,2),sum)# sum through sizes
RDI = apply(rdi,c(1,2),sum)
# I need to decide for a specific time and which species I want to look at
# And I need to run a small simulation to get some ecotypes and then run it with the data function
dimnames(RDD)$Species = sim@params@species_params$species # ecotypes have the same name
dimnames(RDI)$Species = sim@params@species_params$species
# choose a species
i = 5
ecoName = sim@params@species_params[sim@params@species_params$species == i,]$ecotype # name of the ecotypes in the species
RDDSp = RDD[,which(colnames(RDD)==i)]
colnames(RDDSp) = ecoName #values are isolated from other species and have the right names
RDISp = RDI[,which(colnames(RDI)==i)]
colnames(RDISp) = ecoName
egg = apply(RDISp,2, function (x) x/rmax[i]) # x axis (rdi/rmax)
reproduction = apply(RDDSp,2, function (x) x/rmax[i]) # y axis ( rdd/rmax)
EGG = melt(egg)
REPRO = melt(reproduction)
graphdata = EGG
graphdata$repro = REPRO$value # make only one dataframe
#add the total spawn from the species
RDItot = apply(RDISp,1,sum)
RDDtot = apply(RDDSp,1,sum)
# rmax with real data
ggplot(graphdata)+
geom_line(aes(x = value, y = repro, color = as.factor(Species)))+
geom_hline(yintercept = 1)# rmax
#example with fake values
SumPar = sim@params@species_params
truc = sim@n
# put 0 in sim@n when w < w_mat
for (i in 1:dim(truc)[1]) # for each time step
{
for (j in 1:dim(truc)[2]) # for each ecotypes
{
w_lim = sim@params@species_params$w_mat[j] # get the maturation size of the ecotype
S <- numeric(length(sim@params@w))
S[sapply(w_lim, function(i) which.min(abs(i - sim@params@w)))] <- 1 # find what w bin is the closest of the maturation size
NoW_mat = which(S == 1) # what is the col number of that size bin
truc[i,j,1:NoW_mat-1] <-0 # everything under this value become 0
}
}
abundanceM = apply(truc, c(1,2),sum) # sum the abundance left
#2 normalisation per species
colnames(abundanceM) = sim@params@species_params$species
abundanceNormal = matrix(0,nrow = dim(abundanceM)[1], ncol = dim(abundanceM)[2])
# I am getting rid of the species which went instinct at the begining
SpIdx = NULL
for (i in unique(sim@params@species_params$species))
if (sum(abundanceM[,i]) != 0 & dim(SumPar[SumPar$species == i,])[1] != 1)
SpIdx = c(SpIdx,i)
# I also need to get rid of the species that went extinct without having mutants (no trait variation)
for (i in SpIdx)
{
abundanceSp = abundanceM # save to manip
abundanceSp[,which(colnames(abundanceM) != i)] = 0 # make everything but the targeted species to go 0 to have correct normalisation
abundanceSp = sweep(abundanceSp,1,apply(abundanceSp,1,sum),"/") # normalise
abundanceSp[is.nan(abundanceSp)] <-0
abundanceNormal = abundanceNormal + abundanceSp # I just need to add them up to get the final matrix
}
colnames(abundanceNormal) = SumPar$ecotype
LastAb = abundanceNormal[dim(abundanceNormal)[1],]# normalised abundance of the ecotypes at the last simulation step
rmaxN = sim@params@species_params$r_max
rmaxN = rmaxN * LastAb # normlised rmax following the ecotypes abundance
# I have the rmax value for the last abundance step
# fake rdi values
rdi = seq(0,0.0001,0.00000001)
RDI = matrix(rdi , length(rdi) , length(rmaxN) )
colnames(RDI) = SumPar$ecotype
RDD1 = sweep(RDI,2,-rmaxN) # rdi+rmax
RDD2 = sweep(RDI,2,rmaxN,"*") # rdi*rmax
RDD = RDD2/RDD1 # reproduction with rmax applied (rmax is different for each ecotype)
egg = sweep(RDI,2,rmaxN,"/") # x axis (rdi/rmax)
reproduction = sweep(RDD,2,rmaxN,"/") # y axis ( rdd/rmax)
#get rid of Nan / Inf
i = 1
while (i <= dim(egg)[2])
{
if (is.nan(egg[1, i]))
{
egg = egg[, -i]
reproduction = reproduction[, -i]
}
i = i + 1
}
EGG = melt(egg)
REPRO = melt(reproduction)
graphdata = EGG
dimnames(graphdata)[[2]] = list("col","species","rdi")
graphdata$repro = REPRO$value # make only one dataframe
graphdata$bloodline = sapply(graphdata[,2], function(x) as.numeric(unlist(strsplit(as.character(x), "")))[1])
#shows all ecotypes
ggplot(graphdata)+
geom_point(aes(x = rdi, y = repro, color = as.factor(species)))+
scale_x_log10()+
geom_hline(yintercept = 1)# rmax
#same color within species
ggplot(graphdata)+
geom_point(aes(x = rdi, y = repro, color = as.factor(bloodline), group = species))+
scale_x_log10()+
geom_hline(yintercept = 1)# rmax
#only one specific species
# select the species
i = 3
graphdataSp = graphdata[which(graphdata$bloodline==i),]
ggplot(graphdataSp)+
geom_point(aes(x = rdi, y = repro, color = as.factor(species)))+
scale_x_log10()+
geom_hline(yintercept = 1)
#fisheries scenarios---------------------
source("TBM1.r") # the model from mizer (more like a set up)
source("model.r") # my model
## scenario 1 biggest species fished, selectivity above maturation size
#asymptotic size
no_sp = 9
min_w_inf <- 10
max_w_inf <- 1e5
w_inf <- 10^seq(from=log10(min_w_inf), to = log10(max_w_inf), length=no_sp)
#dividing species between the gears (fished and non-fished)
# other_gears <- w_inf >= 10000
gear_names <- rep("None", no_sp)
#gear_names[other_gears] <- "FishingStuff"
#setting up knife edge
knife_edges <- w_inf * 0.35 # slightly above maturation size
# fisheries are primitve now, so I need to set the knife edge a lot above the size of the species that I do not want to fish
#knife_edges[1:6] <-1e6
knife_edges <- 1000
output <- myModel(no_sp = no_sp, t_max = 100, no_run = 20,
kappa = 1, min_w_inf = min_w_inf, max_w_inf = max_w_inf, h = 90,
effort = 0.2, knife_edge_size = knife_edges, gear_names = gear_names)
sim = processing(output, plot = T, where = paste(dir,"/scenario1",sep=""))
gc()
## scenario 2 biggest species fished, selectivity at maturation size
#asymptotic size
no_sp = 9
min_w_inf <- 10
max_w_inf <- 1e5
w_inf <- 10^seq(from=log10(min_w_inf), to = log10(max_w_inf), length=no_sp)
#dividing species between the gears (fished and non-fished)
other_gears <- w_inf >= 10000
gear_names <- rep("None", no_sp)
gear_names[other_gears] <- "FishingStuff"
#setting up knife edge
knife_edges <- w_inf * 0.25 # at maturation size
# fisheries are primitve now, so I need to set the knife edge a lot above the size of the species that I do not want to fish
knife_edges[1:6] <-1e6
output <- myModel(no_sp = no_sp, t_max = 100, no_run = 40,
kappa = 0.05, min_w_inf = min_w_inf, max_w_inf = max_w_inf, h = 95,
effort = 0.4, knife_edge_size = knife_edges, gear_names = gear_names)
sim = processing(output, plot = F, where = paste(dir,"/scenario2",sep=""))
gc()
## scenario 3 small species fished, selectivity above maturation size
#asymptotic size
no_sp = 9
min_w_inf <- 10
max_w_inf <- 1e5
w_inf <- 10^seq(from=log10(min_w_inf), to = log10(max_w_inf), length=no_sp)
#dividing species between the gears (fished and non-fished)
# 100 to 1000
other_gears <- w_inf <= 1000 & w_inf >=100
gear_names <- rep("None", no_sp)
gear_names[other_gears] <- "FishingStuff"
#setting up knife edge
knife_edges <- w_inf * 0.35 # above maturation size
# fisheries are primitve now, so I need to set the knife edge a lot above the size of the species that I do not want to fish
knife_edges[1:2] <-1e6
knife_edges[6:9] <-1e6
output <- myModel(no_sp = no_sp, t_max = 100, no_run = 20,
kappa = 1, min_w_inf = min_w_inf, max_w_inf = max_w_inf, h = 90,
effort = 0.4, knife_edge_size = knife_edges, gear_names = gear_names)
sim = processing(output, plot = T, where = paste(dir,"/scenario3",sep=""))
## scenario 4 small species fished, selectivity at maturation size
#asymptotic size
no_sp = 9
min_w_inf <- 10
max_w_inf <- 1e5
w_inf <- 10^seq(from=log10(min_w_inf), to = log10(max_w_inf), length=no_sp)
#dividing species between the gears (fished and non-fished)
# 100 to 1000
other_gears <- w_inf <= 1000 & w_inf >=100
gear_names <- rep("None", no_sp)
gear_names[other_gears] <- "FishingStuff"
#setting up knife edge
knife_edges <- w_inf * 0.25 # at maturation size
# fisheries are primitve now, so I need to set the knife edge a lot above the size of the species that I do not want to fish
knife_edges[1:2] <-1e6
knife_edges[6:9] <-1e6
output <- myModel(no_sp = no_sp, t_max = 100, no_run = 20,
kappa = 1, min_w_inf = min_w_inf, max_w_inf = max_w_inf, h = 90,
effort = 0.4, knife_edge_size = knife_edges, gear_names = gear_names)
sim = processing(output, plot = T, where = paste(dir,"/scenario4",sep=""))
## scenario 5 everyone fished, selectivity above maturation size
#asymptotic size
no_sp = 9
min_w_inf <- 10
max_w_inf <- 1e5
w_inf <- 10^seq(from=log10(min_w_inf), to = log10(max_w_inf), length=no_sp)
#dividing species between the gears (fished and non-fished)
other_gears <- w_inf >=50
gear_names <- rep("None", no_sp)
gear_names[other_gears] <- "FishingStuff"
#setting up knife edge
knife_edges <- w_inf * 0.35 # at maturation size
# fisheries are primitve now, so I need to set the knife edge a lot above the size of the species that I do not want to fish
knife_edges[1:2] <-1e6
output <- myModel(no_sp = no_sp, t_max = 100, no_run = 20,
kappa = 1, min_w_inf = min_w_inf, max_w_inf = max_w_inf, h = 90,
effort = 0.4, knife_edge_size = knife_edges, gear_names = gear_names)
sim = processing(output, plot = T, where = paste(dir,"/scenario5",sep=""))
## scenario 6 everyone fished, selectivity at maturation size
#asymptotic size
no_sp = 9
min_w_inf <- 10
max_w_inf <- 1e5
w_inf <- 10^seq(from=log10(min_w_inf), to = log10(max_w_inf), length=no_sp)
#dividing species between the gears (fished and non-fished)
other_gears <- w_inf >=50
gear_names <- rep("None", no_sp)
gear_names[other_gears] <- "FishingStuff"
#setting up knife edge
knife_edges <- w_inf * 0.25 # at maturation size
# fisheries are primitve now, so I need to set the knife edge a lot above the size of the species that I do not want to fish
knife_edges[1:2] <-1e6
output <- myModel(no_sp = no_sp, t_max = 100, no_run = 20,
kappa = 1, min_w_inf = min_w_inf, max_w_inf = max_w_inf, h = 90,
effort = 0.4, knife_edge_size = knife_edges, gear_names = gear_names)
sim = processing(output, plot = T, where = paste(dir,"/scenario6",sep=""))
# does not happen anymore -----------------------------
#checking for errors
param = output[[2]]@species_params
paramS = data.frame(param$species,param$ecotype,param$pop,param$extinct,param$run,param$error)
#in case of umbrella
output[[length(output)]] = NULL # delete last half sim
param = NULL
for (i in 1:length(output)) param = rbind(param,output[[i]]@params@species_params) # create the dataframe for species
param <- param[order(param$ecotype, param$extinct, decreasing=TRUE),]
param <- param[!duplicated(param$ecotype),]
SummaryParams = param[order(param$pop,param$ecotype),]
FinalParam <- MizerParams(SummaryParams, min_w =0.001, max_w=10000 * 1.1, no_w = 100, min_w_pp = 1e-10, w_pp_cutoff = 0.5, n = 0.75, p=0.75, q=0.8, r_pp=4, kappa=0.1, lambda = 2.05) #create the mizer param from the dataframe
result=list(output,FinalParam) # put it in the right disposition
rm(output)
sim = processing(result,plot = T, where = "/umbrella")
|
66cb4c5e09db7c15c11a9a254dc2aaf8d78369ab | 59af957f8cfcfc4d5e7d7b92593d69bbe55118b3 | /exercises/1.1/Exercise_1.1_DavidHuangWM.R | 3dbb5750bb4988a17c3c91e681238bc7ce99641e | [] | no_license | ds-wm/atsa-2021 | 3da1841019b33bb6641b80c41c0d263925bc7fad | dd6c475c88fd4673116f221d55d4561a4adcf2ac | refs/heads/main | 2023-04-26T09:24:59.832836 | 2021-05-18T22:20:16 | 2021-05-18T22:20:16 | 332,797,907 | 1 | 3 | null | 2021-02-19T18:42:31 | 2021-01-25T15:48:05 | Jupyter Notebook | UTF-8 | R | false | false | 2,376 | r | Exercise_1.1_DavidHuangWM.R | # Exercise 1.1
## Calculate the missing values in the following table.
t <- c(0, pi/4, pi/2, 3*pi/4, pi)
# Actually calculate the values
Y1t <- round(sin(t), digits=4) # round... to avoid any nasty e^-01 calculation inaccuracies
Y2t <- round(sin(t + pi/2), digits=4)
Mt <- (Y1t+Y2t)/2
# Fit them into a nice dataframe for viewing
my.df <- data.frame(t, Y1t, Y2t, Mt)
# View
print(my.df)
## Calculate the mean for the realization $Y(t) = \sin(t + \pi / 2)$ for $t \in (0, 100)$.
# .2 suffix to differentiate variables from part 1.
# let's see what happens at regular 1 int differences
t.2 <- seq(0,100)
Yt.2 <- sin(t.2 + pi/2)
mean.2.1 <- mean(Yt.2)
print(mean(Yt.2))
# let's try splitting it even smaller (more splits)
t.2 <- seq(0,100,length.out=1001)
Yt.2 <- sin(t.2 + pi/2)
mean.2.2 <- mean(Yt.2)
print(mean(Yt.2))
# we have shown here that it clearly varies even with more values of t.
cat("So over a sequence of (0, 1, 2, ..., 99, 100), the mean would be", mean.2.1)
cat("Though, the realization oscillates between -1 and 1.", "\nSo the more realistic solution would actually 0.")
## What is the difference between the ensemble mean and the mean of a given realization?
cat("The ensemble mean provides us with the average over all of the outcome values, which covers the entire range, whereas the mean of a given realization provides us with the average over a specific given time.")
## Add the missing time series to the plot given below. Make the line dashed blue to match the legend.
# Create a sequence to 100 and scale values to (0, 25)
t <- c(0:100)
t <- t * 25/100
# Define the time series
Yt1 <- sin(t)
Yt2 <- sin(t + pi/2)
# Plot our time series
plot(
t,
Yt1,
ylim = c(-1.1, 1.25),
type = "l",
col = "red",
lwd = 1,
lty = 1,
xlab = "Time",
ylab = NA
)
# we can also use par(new=TRUE) and keep the plot() function instead.
# but this clouds up the y-axis. So let's just go with lines().
lines(
t,
Yt2,
type = "l",
col = "blue",
lwd = 1,
lty = 2,
)
# legend background transparent because it's clogging up my screen
legend(
"top",
inset=0.01,
col=c("red","blue"),
lty=c(1,2),
lwd=c(1,1),
legend = c(
expression(sin(t)),
expression(sin(t+pi/2))),
bg="transparent",
box.col="white",
horiz=TRUE
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.