content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
# PMML: Predictive Modelling Markup Language # # Part of the Rattle package for Data Mining # # Handle lm and glm models. # # Time-stamp: <2011-01-13 19:48:25 Graham Williams> # # Copyright (c) 2009 Togaware Pty Ltd # # This files is part of the Rattle suite for Data Mining in R. # # Rattle is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Rattle is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Rattle. If not, see <http://www.gnu.org/licenses/>. ######################################################################## # Linear Model PMML exporter # # Implemented: 070528 rguha@indiana.edu based on Graham's template for # handling rpart trees. # # Modified: 080201 by Zementis, Inc. (info@zementis.com) to add the # capability to export binary logistic regression models using glm. # # Modified: 090103 by Graham Williams to add transforms framework. pmml.lm <- function(model, model.name="Linear_Regression_Model", app.name="Rattle/PMML", description="Linear Regression Model", copyright=NULL, transforms=NULL, dataset=NULL, weights=NULL, ...) { if (! inherits(model, "lm")) stop("Not a legitimate lm object") require(XML, quietly=TRUE) # Collect the required information. # For a regression, all variables will have been used except those # with a NA coefficient indicating singularities. We mark # singularities as inactive shortly. terms <- attributes(model$terms) field <- NULL field$name <- names(terms$dataClasses) # 101009 Check for a "(weights)" data class and remove it. This # arises in the glm models when a Weight variable is used. Not sure # why the glm model records this here. weights <- which(field$name == "(weights)") if (length(weights)) field$name <- field$name[-weights] orig.names <- field$name field$class <- terms$dataClasses if (length(weights)) field$class <- field$class[-weights] orig.class <- field$class # 090103 Support transforms if available. if (supportTransformExport(transforms)) { field <- unifyTransforms(field, transforms) transforms <- activateDependTransforms(transforms) } number.of.fields <- length(field$name) target <- field$name[1] # 090501 Identify those who are singularities. For numerics, this is # easy since the names are just the variable names. For categorics # this gets tricky because the names include the levels. So we need # to keep in inactive the actual variable name, if all coefficients # for that variable are NAs. inactive <- names(which(is.na(coef(model)))) active <- names(which(!is.na(coef(model)))) # 110113 For the following grep, with certain Japanes characters we # see the string including a "[" in some encoding and causes the # grep to fail. We can't do the usual Encoding<- "UTF-8" trick since # the characters already look like UTF-8. But using enc2utf8 works - # does it hurt doing it always, or just when we have Japanese? Needs # testing. field$name <- enc2utf8(field$name) # These are the actual variable names. 110113 Should be using grepl # rather than grep and then tmp>0! tmp <- sapply(sapply(field$name, grep, inactive), length) inactive.vars <- names(tmp[tmp>0]) tmp <- sapply(sapply(field$name, grep, active), length) active.vars <- names(tmp[tmp>0]) # Now remove any which have any non-NA levels. This final list is # passed on as the definitive list of nonactive variables inactive <- setdiff(inactive.vars, active.vars) for (i in 1:number.of.fields) { # We don't need to bother with ylevels since lm doesn't do # factor predictions. if (field$class[[field$name[i]]] == "factor") # 081004 gjw Test if the data is available in the model, as it # would be for a glm (but not an lm), since if the target # variable is categoric then the levels are not recorded in # xlevels for the target variable, so we will need to get the # levels from the data itself. if (is.null(model$data)) field$levels[[field$name[i]]] <- model$xlevels[[field$name[i]]] else field$levels[[field$name[i]]] <- levels(model$data[[field$name[i]]]) } # PMML pmml <- pmmlRootNode("3.2") # PMML -> Header pmml <- append.XMLNode(pmml, pmmlHeader(description, copyright, app.name)) # PMML -> DataDictionary pmml <- append.XMLNode(pmml, pmmlDataDictionary(field, weights=weights)) # PMML -> RegressionModel # Added by Zementis so that code can also export binary logistic # regression glm models built with binomial(logit). 090303 gjw This # looks dangerous, assuming the third argument is the model # type. For now, go with it, but set a default model type in case # the call has less than two arguments. A general lm model has data # as the third part of the call, thus we need to accept that as a # genuine model and not an unknown model type! For now, default # to generating lm PMML. if (model$call[[1]] == "lm") model.type <- "lm" else if (model$call[[1]] == "glm") model.type <- model$family$family else model.type <- "unknown" if (model.type == "binomial") { the.model <- xmlNode("RegressionModel", attrs=c(modelName=model.name, # 100915 Wen-Ching Lin of Zementis noted # this was regression but should be # classification. functionName="classification", algorithmName="glm", normalizationMethod="softmax", targetFieldName=target)) } else if (model.type == "poisson") { the.model <- xmlNode("RegressionModel", attrs=c(modelName=model.name, functionName="regression", algorithmName="glm", normalizationMethod="exp", targetFieldName=target)) } else if (model.type == "gaussian") { the.model <- xmlNode("RegressionModel", attrs=c(modelName=model.name, functionName="regression", algorithmName="glm", targetFieldName=target)) } else if (model.type == "lm") { # The original code for linear regression models. the.model <- xmlNode("RegressionModel", attrs=c(modelName=model.name, functionName="regression", algorithmName="least squares", targetFieldName=target)) } else stop("pmml.lm: Not a supported family object: ", model.type) # PMML -> RegressionModel -> MiningSchema the.model <- append.XMLNode(the.model, pmmlMiningSchema(field, target, inactive)) # PMML -> TreeModel -> LocalTransforms if (supportTransformExport(transforms)) the.model <- append.XMLNode(the.model, pmml.transforms(transforms)) # PMML -> RegressionModel -> RegressionTable coeff <- coefficients(model) # 100519 From Wen of Zementis For singularities the coefficient is # NA. From DMG the specification says: # # <xs:attribute name="coefficient" type="REAL-NUMBER" use="required" /> # # So replace NAs with 0. The effect should be the same. coeff[is.na(coeff)] <- 0 coeffnames <- names(coeff) # 090306 Handle the case where the intercept is not in the # coefficients, and hence is 0? if (coeffnames[[1]] == "(Intercept)") intercept <- as.numeric(coeff[[1]]) else intercept <- 0 # Added by Graham Williams so that code identifies a targetCategory # for binary logistic regression glm models built with # binomial(logit) and 091009 adds an extra RegressionTable # (regTable2) to explicitly refer to the second target category, as # recommended in PMML 4.0 specs. and having an intercept of 0.0. regTable2 <- NULL if (model.type == "binomial") { # 090117 Identify the two possible values for the target variable, # and select the second as the target. Extend the PMML specs so I # can add the other value as well, since I need that when # generating C code to return a class rather than a probability. values <- sort(unique(model$data[[target]])) alternative.value <- as.character(values[1]) target.value <- as.character(values[2]) regTable <- xmlNode("RegressionTable", attrs=c(targetCategory=target.value, intercept=intercept)) regTable2 <- xmlNode("RegressionTable", attrs=c(targetCategory=alternative.value, intercept="0.0")) } else { regTable <- xmlNode("RegressionTable", attrs=c(intercept=intercept)) } # 080620 gjw The PMML spec (at least the Zementis validator) # requires NumericPredictors first and then # CategoricalPredictors. Simplest approach is to loop twice!! # Hopefully, this is not a significant computational expense. for (i in 1:length(orig.names)) { name <- orig.names[[i]] if (name == target) next klass <- orig.class[[name]] if (klass == 'numeric') { predictorNode <- xmlNode("NumericPredictor", attrs=c(name=name, exponent="1", coefficient=as.numeric(coeff[which(coeffnames==name)]))) regTable <- append.XMLNode(regTable, predictorNode) } } for (i in 1:length(orig.names)) { name <- orig.names[[i]] if (name == target) next klass <- orig.class[[name]] if (klass == 'factor') { levs <- model$xlevels[[name]] # 081019 gjw Add in a zero coefficient for the base level. In # this way, we communicate through the PMML which level is the # base. Can be useful in then comparing with the full list of # levels available for this variable and determining levels that # are just missing from the training. Note that xlevels does not # include any levels that were not modelled (i.e., missing # levels from the training data). We do this by iterating over # all the modelled levels (levs, i.e., all values in xlevels) # instead of all but the first level (levs[-1], i.e., the base # level). When we have the first level, we simply note the # coefficient as 0. 090306 This was updated to remove the # assumption that the first level has a 0 coefficient. This is # not the case in simple lm models (e.g., exampe(lm); # pmml(lm.D90)). for (l in levs) { tmp <- paste(name, l, sep='') # 090306 Change this test from one that assumes a 0 # coefficient for the first level, to one that has a 0 # coefficient for any missing level. coefficient <- ifelse(!length(which(coeffnames == tmp)), 0.00, as.numeric(coeff[which(coeffnames == tmp)])) predictorNode <- xmlNode("CategoricalPredictor", attrs=c(name=name, value=markupSpecials(l), coefficient=coefficient)) regTable <- append.XMLNode(regTable, predictorNode) } } } the.model <- append.XMLNode(the.model, regTable) if (! is.null(regTable2)) the.model <- append.XMLNode(the.model, regTable2) # Add to the top level structure. pmml <- append.XMLNode(pmml, the.model) return(pmml) }
/src/pmml.lm.R
no_license
rafael158/rattle
R
false
false
12,107
r
# PMML: Predictive Modelling Markup Language # # Part of the Rattle package for Data Mining # # Handle lm and glm models. # # Time-stamp: <2011-01-13 19:48:25 Graham Williams> # # Copyright (c) 2009 Togaware Pty Ltd # # This files is part of the Rattle suite for Data Mining in R. # # Rattle is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Rattle is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Rattle. If not, see <http://www.gnu.org/licenses/>. ######################################################################## # Linear Model PMML exporter # # Implemented: 070528 rguha@indiana.edu based on Graham's template for # handling rpart trees. # # Modified: 080201 by Zementis, Inc. (info@zementis.com) to add the # capability to export binary logistic regression models using glm. # # Modified: 090103 by Graham Williams to add transforms framework. pmml.lm <- function(model, model.name="Linear_Regression_Model", app.name="Rattle/PMML", description="Linear Regression Model", copyright=NULL, transforms=NULL, dataset=NULL, weights=NULL, ...) { if (! inherits(model, "lm")) stop("Not a legitimate lm object") require(XML, quietly=TRUE) # Collect the required information. # For a regression, all variables will have been used except those # with a NA coefficient indicating singularities. We mark # singularities as inactive shortly. terms <- attributes(model$terms) field <- NULL field$name <- names(terms$dataClasses) # 101009 Check for a "(weights)" data class and remove it. This # arises in the glm models when a Weight variable is used. Not sure # why the glm model records this here. weights <- which(field$name == "(weights)") if (length(weights)) field$name <- field$name[-weights] orig.names <- field$name field$class <- terms$dataClasses if (length(weights)) field$class <- field$class[-weights] orig.class <- field$class # 090103 Support transforms if available. if (supportTransformExport(transforms)) { field <- unifyTransforms(field, transforms) transforms <- activateDependTransforms(transforms) } number.of.fields <- length(field$name) target <- field$name[1] # 090501 Identify those who are singularities. For numerics, this is # easy since the names are just the variable names. For categorics # this gets tricky because the names include the levels. So we need # to keep in inactive the actual variable name, if all coefficients # for that variable are NAs. inactive <- names(which(is.na(coef(model)))) active <- names(which(!is.na(coef(model)))) # 110113 For the following grep, with certain Japanes characters we # see the string including a "[" in some encoding and causes the # grep to fail. We can't do the usual Encoding<- "UTF-8" trick since # the characters already look like UTF-8. But using enc2utf8 works - # does it hurt doing it always, or just when we have Japanese? Needs # testing. field$name <- enc2utf8(field$name) # These are the actual variable names. 110113 Should be using grepl # rather than grep and then tmp>0! tmp <- sapply(sapply(field$name, grep, inactive), length) inactive.vars <- names(tmp[tmp>0]) tmp <- sapply(sapply(field$name, grep, active), length) active.vars <- names(tmp[tmp>0]) # Now remove any which have any non-NA levels. This final list is # passed on as the definitive list of nonactive variables inactive <- setdiff(inactive.vars, active.vars) for (i in 1:number.of.fields) { # We don't need to bother with ylevels since lm doesn't do # factor predictions. if (field$class[[field$name[i]]] == "factor") # 081004 gjw Test if the data is available in the model, as it # would be for a glm (but not an lm), since if the target # variable is categoric then the levels are not recorded in # xlevels for the target variable, so we will need to get the # levels from the data itself. if (is.null(model$data)) field$levels[[field$name[i]]] <- model$xlevels[[field$name[i]]] else field$levels[[field$name[i]]] <- levels(model$data[[field$name[i]]]) } # PMML pmml <- pmmlRootNode("3.2") # PMML -> Header pmml <- append.XMLNode(pmml, pmmlHeader(description, copyright, app.name)) # PMML -> DataDictionary pmml <- append.XMLNode(pmml, pmmlDataDictionary(field, weights=weights)) # PMML -> RegressionModel # Added by Zementis so that code can also export binary logistic # regression glm models built with binomial(logit). 090303 gjw This # looks dangerous, assuming the third argument is the model # type. For now, go with it, but set a default model type in case # the call has less than two arguments. A general lm model has data # as the third part of the call, thus we need to accept that as a # genuine model and not an unknown model type! For now, default # to generating lm PMML. if (model$call[[1]] == "lm") model.type <- "lm" else if (model$call[[1]] == "glm") model.type <- model$family$family else model.type <- "unknown" if (model.type == "binomial") { the.model <- xmlNode("RegressionModel", attrs=c(modelName=model.name, # 100915 Wen-Ching Lin of Zementis noted # this was regression but should be # classification. functionName="classification", algorithmName="glm", normalizationMethod="softmax", targetFieldName=target)) } else if (model.type == "poisson") { the.model <- xmlNode("RegressionModel", attrs=c(modelName=model.name, functionName="regression", algorithmName="glm", normalizationMethod="exp", targetFieldName=target)) } else if (model.type == "gaussian") { the.model <- xmlNode("RegressionModel", attrs=c(modelName=model.name, functionName="regression", algorithmName="glm", targetFieldName=target)) } else if (model.type == "lm") { # The original code for linear regression models. the.model <- xmlNode("RegressionModel", attrs=c(modelName=model.name, functionName="regression", algorithmName="least squares", targetFieldName=target)) } else stop("pmml.lm: Not a supported family object: ", model.type) # PMML -> RegressionModel -> MiningSchema the.model <- append.XMLNode(the.model, pmmlMiningSchema(field, target, inactive)) # PMML -> TreeModel -> LocalTransforms if (supportTransformExport(transforms)) the.model <- append.XMLNode(the.model, pmml.transforms(transforms)) # PMML -> RegressionModel -> RegressionTable coeff <- coefficients(model) # 100519 From Wen of Zementis For singularities the coefficient is # NA. From DMG the specification says: # # <xs:attribute name="coefficient" type="REAL-NUMBER" use="required" /> # # So replace NAs with 0. The effect should be the same. coeff[is.na(coeff)] <- 0 coeffnames <- names(coeff) # 090306 Handle the case where the intercept is not in the # coefficients, and hence is 0? if (coeffnames[[1]] == "(Intercept)") intercept <- as.numeric(coeff[[1]]) else intercept <- 0 # Added by Graham Williams so that code identifies a targetCategory # for binary logistic regression glm models built with # binomial(logit) and 091009 adds an extra RegressionTable # (regTable2) to explicitly refer to the second target category, as # recommended in PMML 4.0 specs. and having an intercept of 0.0. regTable2 <- NULL if (model.type == "binomial") { # 090117 Identify the two possible values for the target variable, # and select the second as the target. Extend the PMML specs so I # can add the other value as well, since I need that when # generating C code to return a class rather than a probability. values <- sort(unique(model$data[[target]])) alternative.value <- as.character(values[1]) target.value <- as.character(values[2]) regTable <- xmlNode("RegressionTable", attrs=c(targetCategory=target.value, intercept=intercept)) regTable2 <- xmlNode("RegressionTable", attrs=c(targetCategory=alternative.value, intercept="0.0")) } else { regTable <- xmlNode("RegressionTable", attrs=c(intercept=intercept)) } # 080620 gjw The PMML spec (at least the Zementis validator) # requires NumericPredictors first and then # CategoricalPredictors. Simplest approach is to loop twice!! # Hopefully, this is not a significant computational expense. for (i in 1:length(orig.names)) { name <- orig.names[[i]] if (name == target) next klass <- orig.class[[name]] if (klass == 'numeric') { predictorNode <- xmlNode("NumericPredictor", attrs=c(name=name, exponent="1", coefficient=as.numeric(coeff[which(coeffnames==name)]))) regTable <- append.XMLNode(regTable, predictorNode) } } for (i in 1:length(orig.names)) { name <- orig.names[[i]] if (name == target) next klass <- orig.class[[name]] if (klass == 'factor') { levs <- model$xlevels[[name]] # 081019 gjw Add in a zero coefficient for the base level. In # this way, we communicate through the PMML which level is the # base. Can be useful in then comparing with the full list of # levels available for this variable and determining levels that # are just missing from the training. Note that xlevels does not # include any levels that were not modelled (i.e., missing # levels from the training data). We do this by iterating over # all the modelled levels (levs, i.e., all values in xlevels) # instead of all but the first level (levs[-1], i.e., the base # level). When we have the first level, we simply note the # coefficient as 0. 090306 This was updated to remove the # assumption that the first level has a 0 coefficient. This is # not the case in simple lm models (e.g., exampe(lm); # pmml(lm.D90)). for (l in levs) { tmp <- paste(name, l, sep='') # 090306 Change this test from one that assumes a 0 # coefficient for the first level, to one that has a 0 # coefficient for any missing level. coefficient <- ifelse(!length(which(coeffnames == tmp)), 0.00, as.numeric(coeff[which(coeffnames == tmp)])) predictorNode <- xmlNode("CategoricalPredictor", attrs=c(name=name, value=markupSpecials(l), coefficient=coefficient)) regTable <- append.XMLNode(regTable, predictorNode) } } } the.model <- append.XMLNode(the.model, regTable) if (! is.null(regTable2)) the.model <- append.XMLNode(the.model, regTable2) # Add to the top level structure. pmml <- append.XMLNode(pmml, the.model) return(pmml) }
#---------------------------------------------------------------------- # Tom's demonstration example. # # Purpose: Fast local data prepapration for multi-model scoring option # from H2O Web UI menu (aka steam). #---------------------------------------------------------------------- # Source setup code to define myIP and myPort and helper functions. # If you are having trouble running this, just set the condition to FALSE # and hardcode myIP and myPort. if (TRUE) { # Set working directory so that the source() below works. setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) if (FALSE) { setwd("/Users/tomk/0xdata/ws/h2o/R/tests/testdir_demos") } source('../findNSourceUtils.R') options(echo=TRUE) filePath <- normalizePath(locate("smalldata/airlines/allyears2k_headers.zip")) } else { stop("need to hardcode ip and port") # myIP = "127.0.0.1" # myPort = 54321 library(h2o) PASS_BANNER <- function() { cat("\nPASS\n\n") } filePath <- "https://raw.github.com/0xdata/h2o/master/smalldata/airlines/allyears2k_headers.zip" } conn <- h2o.init(ip=myIP, port=myPort, startH2O=FALSE) df = h2o.importFile(conn, filePath, "df") s = h2o.runif(df) air.train = df[s <= 0.8,] h2o.assign(air.train, ("air.train")) air.valid = df[s > 0.8,] h2o.assign(air.valid, ("air.valid")) # Data set column headers # Year,Month,DayofMonth,DayOfWeek,DepTime,CRSDepTime,ArrTime,CRSArrTime,UniqueCarrier,FlightNum,TailNum,ActualElapsedTime,CRSElapsedTime,AirTime,ArrDelay,DepDelay,Origin,Dest,Distance,TaxiIn,TaxiOut,Cancelled,CancellationCode,Diverted,CarrierDelay,WeatherDelay,NASDelay,SecurityDelay,LateAircraftDelay,IsArrDelayed,IsDepDelayed myX = c("Year", "Month", "DayofMonth", "DayOfWeek", "CRSDepTime", "CRSArrTime", "UniqueCarrier", "FlightNum", "CRSElapsedTime", "Origin", "Dest", "Distance") myY = "IsDepDelayed" air.gbm = h2o.gbm(data = air.train, validation = air.valid, x = myX, y = myY, n.trees = c(5, 10), interaction.depth = c(3, 5), importance = TRUE) air.drf = h2o.randomForest(data = air.train, validation = air.valid, x = myX, y = myY, ntree = c(5, 10), depth = c(5, 10), importance = TRUE, type = "BigData") air.srf = h2o.randomForest(data = air.train, validation = air.valid, x = myX, y = myY, ntree = c(5, 10), depth = c(5, 10), importance = TRUE, type = "fast") air.glm = h2o.glm(data = air.train, x = myX, y = myY, family = "binomial", alpha = c(0.1, 0.2, 0.5), use_all_factor_levels = TRUE, variable_importances = TRUE) air.dl = h2o.deeplearning(data = air.train, validation = air.valid, x = myX, y = myY, classification = TRUE, activation = c("Tanh", "Rectifier"), hidden = list(c(5, 5), c(10,10)), use_all_factor_levels = TRUE, variable_importances = TRUE) # Scrub out Last.value's so the Store View isn't cluttered. h2o.rm(conn, grep(pattern = "Last.value", x = h2o.ls(conn)$Key, value = TRUE)) message = sprintf("%sPoint your web browser to: http://%s:%s/steam/index.html\n%s", "----------\n\n", conn@ip, conn@port, "\n----------\n" ) cat(message) PASS_BANNER()
/R/tests/testdir_demos/runit_demo_tk_steam.R
permissive
ivanliu1989/h2o
R
false
false
3,604
r
#---------------------------------------------------------------------- # Tom's demonstration example. # # Purpose: Fast local data prepapration for multi-model scoring option # from H2O Web UI menu (aka steam). #---------------------------------------------------------------------- # Source setup code to define myIP and myPort and helper functions. # If you are having trouble running this, just set the condition to FALSE # and hardcode myIP and myPort. if (TRUE) { # Set working directory so that the source() below works. setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) if (FALSE) { setwd("/Users/tomk/0xdata/ws/h2o/R/tests/testdir_demos") } source('../findNSourceUtils.R') options(echo=TRUE) filePath <- normalizePath(locate("smalldata/airlines/allyears2k_headers.zip")) } else { stop("need to hardcode ip and port") # myIP = "127.0.0.1" # myPort = 54321 library(h2o) PASS_BANNER <- function() { cat("\nPASS\n\n") } filePath <- "https://raw.github.com/0xdata/h2o/master/smalldata/airlines/allyears2k_headers.zip" } conn <- h2o.init(ip=myIP, port=myPort, startH2O=FALSE) df = h2o.importFile(conn, filePath, "df") s = h2o.runif(df) air.train = df[s <= 0.8,] h2o.assign(air.train, ("air.train")) air.valid = df[s > 0.8,] h2o.assign(air.valid, ("air.valid")) # Data set column headers # Year,Month,DayofMonth,DayOfWeek,DepTime,CRSDepTime,ArrTime,CRSArrTime,UniqueCarrier,FlightNum,TailNum,ActualElapsedTime,CRSElapsedTime,AirTime,ArrDelay,DepDelay,Origin,Dest,Distance,TaxiIn,TaxiOut,Cancelled,CancellationCode,Diverted,CarrierDelay,WeatherDelay,NASDelay,SecurityDelay,LateAircraftDelay,IsArrDelayed,IsDepDelayed myX = c("Year", "Month", "DayofMonth", "DayOfWeek", "CRSDepTime", "CRSArrTime", "UniqueCarrier", "FlightNum", "CRSElapsedTime", "Origin", "Dest", "Distance") myY = "IsDepDelayed" air.gbm = h2o.gbm(data = air.train, validation = air.valid, x = myX, y = myY, n.trees = c(5, 10), interaction.depth = c(3, 5), importance = TRUE) air.drf = h2o.randomForest(data = air.train, validation = air.valid, x = myX, y = myY, ntree = c(5, 10), depth = c(5, 10), importance = TRUE, type = "BigData") air.srf = h2o.randomForest(data = air.train, validation = air.valid, x = myX, y = myY, ntree = c(5, 10), depth = c(5, 10), importance = TRUE, type = "fast") air.glm = h2o.glm(data = air.train, x = myX, y = myY, family = "binomial", alpha = c(0.1, 0.2, 0.5), use_all_factor_levels = TRUE, variable_importances = TRUE) air.dl = h2o.deeplearning(data = air.train, validation = air.valid, x = myX, y = myY, classification = TRUE, activation = c("Tanh", "Rectifier"), hidden = list(c(5, 5), c(10,10)), use_all_factor_levels = TRUE, variable_importances = TRUE) # Scrub out Last.value's so the Store View isn't cluttered. h2o.rm(conn, grep(pattern = "Last.value", x = h2o.ls(conn)$Key, value = TRUE)) message = sprintf("%sPoint your web browser to: http://%s:%s/steam/index.html\n%s", "----------\n\n", conn@ip, conn@port, "\n----------\n" ) cat(message) PASS_BANNER()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/genepop_detective.R \name{genepop_detective} \alias{genepop_detective} \title{Explore Genepop data structure.} \usage{ genepop_detective(genepop, variable = "Pops") } \arguments{ \item{genepop}{the genepop data to be manipulated. This can be either a file path or a dataframe read in with tab separation, header=FALSE , quote="", and stringsAsFactors=FALSE. This will be the standard genepop format with the first n+1 rows corresponding to the n loci names, or a single comma delimited row of loci names followed by the locus data. Populations are separated by "Pop". Each individual ID is linked to the locus data by " , " (space, space space) and is read in as as a single row (character).} \item{variable}{data to be returned Four options \code{default = "Pops"} "Pops" = vector of population names. "PopNum" = dataframe of population names and counts. "Inds" = vector of sample IDs. "Loci" = vector of Loci. "Allele" = vector of allele values.} } \description{ Function returns Genepop file meta-data. }
/man/genepop_detective.Rd
no_license
rystanley/genepopedit
R
false
true
1,088
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/genepop_detective.R \name{genepop_detective} \alias{genepop_detective} \title{Explore Genepop data structure.} \usage{ genepop_detective(genepop, variable = "Pops") } \arguments{ \item{genepop}{the genepop data to be manipulated. This can be either a file path or a dataframe read in with tab separation, header=FALSE , quote="", and stringsAsFactors=FALSE. This will be the standard genepop format with the first n+1 rows corresponding to the n loci names, or a single comma delimited row of loci names followed by the locus data. Populations are separated by "Pop". Each individual ID is linked to the locus data by " , " (space, space space) and is read in as as a single row (character).} \item{variable}{data to be returned Four options \code{default = "Pops"} "Pops" = vector of population names. "PopNum" = dataframe of population names and counts. "Inds" = vector of sample IDs. "Loci" = vector of Loci. "Allele" = vector of allele values.} } \description{ Function returns Genepop file meta-data. }
## File Name: mle.reliability.R ## File Version: 0.08 ######################################################### # MLE reliability mle.reliability <- function(meas, se.meas ){ meas[ abs(meas) > 1E10 ] <- NA v1 <- stats::var(meas, na.rm=TRUE) v2 <- mean( se.meas^2, na.rm=TRUE ) rel <- ( v1 - v2 ) / v1 return(rel) } ######################################################### ######################################################### # EAP reliability eap.reliability <- function(meas, se.meas ){ meas[ abs(meas) > 1E10 ] <- NA v1 <- stats::var(meas, na.rm=TRUE) v2 <- mean( se.meas^2, na.rm=TRUE ) rel <- v1 / ( v1 + v2 ) return(rel) } #########################################################
/R/mle.reliability.R
no_license
cran/sirt
R
false
false
825
r
## File Name: mle.reliability.R ## File Version: 0.08 ######################################################### # MLE reliability mle.reliability <- function(meas, se.meas ){ meas[ abs(meas) > 1E10 ] <- NA v1 <- stats::var(meas, na.rm=TRUE) v2 <- mean( se.meas^2, na.rm=TRUE ) rel <- ( v1 - v2 ) / v1 return(rel) } ######################################################### ######################################################### # EAP reliability eap.reliability <- function(meas, se.meas ){ meas[ abs(meas) > 1E10 ] <- NA v1 <- stats::var(meas, na.rm=TRUE) v2 <- mean( se.meas^2, na.rm=TRUE ) rel <- v1 / ( v1 + v2 ) return(rel) } #########################################################
################################################################################ # Script to extract variances values for Heritability analysis from Eurobats # expression data Freezev2 January 2013. # Modify from Kerrin Small. # Author: Ana ViÒuela (ana.vinuela@kcl.ac.uk) # Date: 13/01/2013 ################################################################################ #read in the arguments arguments <- commandArgs(trailingOnly = TRUE) if (length(arguments) != 1) { print("Error, this script requires 1 arguments:") print("START") stop("Exiting script") } library(Matrix) library(lme4) library(GenABEL) start <- arguments[1] data_file <- "/data/jbryois/projects/systemotwin/Solar_modeling/Heritability/prepare_exp_gen_files/t1.GC_mean.corrected.exp.csv" pedigree <- "/data/jbryois/projects/systemotwin/Solar_modeling/Heritability/prepare_exp_gen_files/pedigree_file_t1.csv" reads = read.csv(data_file, header=T) reads <- t(reads) colnames(reads) <- reads[1,] reads <- reads[-1,] reads <- as.data.frame(reads) covs = read.csv(pedigree,header=T) covs$hhid <- paste("HH-",substr(covs$id,1,nchar(covs$id)-1),sep="") covs$mztwin <- ifelse(substr(covs$mztwin,1,1)=="M",paste("MZ-",substr(covs$id,1,nchar(covs$id)-1),sep=""),paste("DZ-",substr(covs$id,1,nchar(covs$id)),sep="")) unique_family <- names(which(table(covs$hhid)==2)) unique_family_of_single_individuals <- names(which(table(covs$hhid)==1)) bootstrap_unique_family <- sample(unique_family,length(unique_family),replace=T) family_to_keep_for_bootstrap <- as.data.frame(c(unique_family_of_single_individuals,bootstrap_unique_family)) colnames(family_to_keep_for_bootstrap) <- "hhid" ####BOOTSTRAPED COVS and READS covs <- merge(covs,family_to_keep_for_bootstrap,by="hhid") reads <- reads[as.character(covs$id)] normal_transformed_expression <- apply(reads,1, function(x) rntransform(as.numeric(x))) normal_transformed_expression <- t(normal_transformed_expression) #rntransform(as.numeric(reads[2312,]))==normal_transformed_expression[2312,] reads <- normal_transformed_expression #### CHANGE NAMES OF THE COVS THAT ARE DUPLICATED covs_temp <- covs id_list <- NULL for(i in 1:nrow(covs)){ id_list <- c(id_list,covs$id[i]) number_of_occurence_in_list <- table(id_list)[as.character(covs$id[i])] covs_temp$hhid[i] <- paste(covs$hhid[i],number_of_occurence_in_list,sep="_") covs_temp$mztwin[i] <- paste(covs$mztwin[i],number_of_occurence_in_list,sep="_") } covs <- covs_temp DEL <- as.factor(as.matrix(covs['mztwin'])) # Same number for MZ twins (family ID), different number for DZ twins (twin ID + 00000) DZ <- as.factor(as.matrix(covs['hhid'])) # This is coded with the family ID. n_peaks <- nrow(reads) gene_name <- character() h2_vector <- numeric() c2_vector <- numeric() for(j in 1:n_peaks){ gene_name <- c(gene_name,rownames(reads[j,])) lmer_totv <- lmer(as.matrix(as.double(reads[j,])) ~ 1 + (1 | DZ) + (1 | DEL)) variance_E <- attr(VarCorr(lmer_totv),"sc")^2 variance_DEL <- attr(VarCorr(lmer_totv)$DEL,"stddev")^2 variance_DZ <- attr(VarCorr(lmer_totv)$DZ,"stddev")^2 h2_fly <- 2*variance_DEL/(variance_DEL+variance_E+variance_DZ) c2_fly <- (variance_DZ-variance_DEL)/(variance_DEL+variance_E+variance_DZ) h2_vector <- c(h2_vector,h2_fly) c2_vector <- c(c2_vector,c2_fly) cat(j,"\t",rownames(reads[j,]),"\t",h2_fly,"\t",c2_fly,"\n") } heritability <- as.data.frame(cbind(rownames(reads),as.numeric(h2_vector),as.numeric(c2_vector))) colnames(heritability) <- c("name","h2","c2") write.table(heritability,paste("/data/jbryois/projects/systemotwin/Solar_modeling/Heritability_Ana/Bootstrap_results/t1.GC_mean.corrected.h2r_",start,sep=""), sep="\t",quote=F, row.names=F,col.names=T)
/systemotwin/heritability_RNA_seq_modified_for_boot_strap.R
no_license
jbryois/Scripts
R
false
false
3,790
r
################################################################################ # Script to extract variances values for Heritability analysis from Eurobats # expression data Freezev2 January 2013. # Modify from Kerrin Small. # Author: Ana ViÒuela (ana.vinuela@kcl.ac.uk) # Date: 13/01/2013 ################################################################################ #read in the arguments arguments <- commandArgs(trailingOnly = TRUE) if (length(arguments) != 1) { print("Error, this script requires 1 arguments:") print("START") stop("Exiting script") } library(Matrix) library(lme4) library(GenABEL) start <- arguments[1] data_file <- "/data/jbryois/projects/systemotwin/Solar_modeling/Heritability/prepare_exp_gen_files/t1.GC_mean.corrected.exp.csv" pedigree <- "/data/jbryois/projects/systemotwin/Solar_modeling/Heritability/prepare_exp_gen_files/pedigree_file_t1.csv" reads = read.csv(data_file, header=T) reads <- t(reads) colnames(reads) <- reads[1,] reads <- reads[-1,] reads <- as.data.frame(reads) covs = read.csv(pedigree,header=T) covs$hhid <- paste("HH-",substr(covs$id,1,nchar(covs$id)-1),sep="") covs$mztwin <- ifelse(substr(covs$mztwin,1,1)=="M",paste("MZ-",substr(covs$id,1,nchar(covs$id)-1),sep=""),paste("DZ-",substr(covs$id,1,nchar(covs$id)),sep="")) unique_family <- names(which(table(covs$hhid)==2)) unique_family_of_single_individuals <- names(which(table(covs$hhid)==1)) bootstrap_unique_family <- sample(unique_family,length(unique_family),replace=T) family_to_keep_for_bootstrap <- as.data.frame(c(unique_family_of_single_individuals,bootstrap_unique_family)) colnames(family_to_keep_for_bootstrap) <- "hhid" ####BOOTSTRAPED COVS and READS covs <- merge(covs,family_to_keep_for_bootstrap,by="hhid") reads <- reads[as.character(covs$id)] normal_transformed_expression <- apply(reads,1, function(x) rntransform(as.numeric(x))) normal_transformed_expression <- t(normal_transformed_expression) #rntransform(as.numeric(reads[2312,]))==normal_transformed_expression[2312,] reads <- normal_transformed_expression #### CHANGE NAMES OF THE COVS THAT ARE DUPLICATED covs_temp <- covs id_list <- NULL for(i in 1:nrow(covs)){ id_list <- c(id_list,covs$id[i]) number_of_occurence_in_list <- table(id_list)[as.character(covs$id[i])] covs_temp$hhid[i] <- paste(covs$hhid[i],number_of_occurence_in_list,sep="_") covs_temp$mztwin[i] <- paste(covs$mztwin[i],number_of_occurence_in_list,sep="_") } covs <- covs_temp DEL <- as.factor(as.matrix(covs['mztwin'])) # Same number for MZ twins (family ID), different number for DZ twins (twin ID + 00000) DZ <- as.factor(as.matrix(covs['hhid'])) # This is coded with the family ID. n_peaks <- nrow(reads) gene_name <- character() h2_vector <- numeric() c2_vector <- numeric() for(j in 1:n_peaks){ gene_name <- c(gene_name,rownames(reads[j,])) lmer_totv <- lmer(as.matrix(as.double(reads[j,])) ~ 1 + (1 | DZ) + (1 | DEL)) variance_E <- attr(VarCorr(lmer_totv),"sc")^2 variance_DEL <- attr(VarCorr(lmer_totv)$DEL,"stddev")^2 variance_DZ <- attr(VarCorr(lmer_totv)$DZ,"stddev")^2 h2_fly <- 2*variance_DEL/(variance_DEL+variance_E+variance_DZ) c2_fly <- (variance_DZ-variance_DEL)/(variance_DEL+variance_E+variance_DZ) h2_vector <- c(h2_vector,h2_fly) c2_vector <- c(c2_vector,c2_fly) cat(j,"\t",rownames(reads[j,]),"\t",h2_fly,"\t",c2_fly,"\n") } heritability <- as.data.frame(cbind(rownames(reads),as.numeric(h2_vector),as.numeric(c2_vector))) colnames(heritability) <- c("name","h2","c2") write.table(heritability,paste("/data/jbryois/projects/systemotwin/Solar_modeling/Heritability_Ana/Bootstrap_results/t1.GC_mean.corrected.h2r_",start,sep=""), sep="\t",quote=F, row.names=F,col.names=T)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prl_rp_multipleB.R \name{prl_rp_multipleB} \alias{prl_rp_multipleB} \title{Probabilistic Reversal Learning Task, multiple blocks per subject} \usage{ prl_rp_multipleB(data = "choice", niter = 3000, nwarmup = 1000, nchain = 1, ncore = 1, nthin = 1, inits = "random", indPars = "mean", saveDir = NULL, email = NULL, modelRegressor = FALSE, adapt_delta = 0.95, stepsize = 1, max_treedepth = 10) } \arguments{ \item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "outcome", and "block". See \bold{Details} below for more information.} \item{niter}{Number of iterations, including warm-up.} \item{nwarmup}{Number of iterations used for warm-up only.} \item{nchain}{Number of chains to be run.} \item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} \item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} \item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} \item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} \item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} \item{email}{Character value containing email address to send notification of completion. Leave blank if not interested.} \item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} \item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} \item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} \item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} } \value{ \code{modelData} A class \code{"hBayesDM"} object with the following components: \describe{ \item{\code{model}}{Character string with the name of the model (\code{"prl_rp_multipleB"}).} \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} } } \description{ Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "Apun" (punishment learning rate), "Arew" (reward learning rate), and "beta" (inverse temperature). \strong{MODEL:} Reward-Punishment Model (Ouden et al., 2013, Neuron) } \details{ This section describes some of the function arguments in greater detail. \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled correctly and contain the information below: \describe{ \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} \item{\code{"block"}}{An integer value representing the block number of the current trial (e.g., 1 1 1 2 2 2).} } \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, there is no need to remove other miscellaneous data columns. \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the effects that initial values have on the resulting posteriors. \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} command. The chains should resemble a "furry caterpillar". \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to generate the posterior. \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" output <- prl_rp_multipleB(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') plot(output, type = 'trace') # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) } } \references{ Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The Journal of Machine Learning Research, 15(1), 1593-1623. Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 } \seealso{ We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} }
/hBayesDM/man/prl_rp_multipleB.Rd
no_license
mimburgi/VTSdistract-ComputationalModeling
R
false
true
7,774
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prl_rp_multipleB.R \name{prl_rp_multipleB} \alias{prl_rp_multipleB} \title{Probabilistic Reversal Learning Task, multiple blocks per subject} \usage{ prl_rp_multipleB(data = "choice", niter = 3000, nwarmup = 1000, nchain = 1, ncore = 1, nthin = 1, inits = "random", indPars = "mean", saveDir = NULL, email = NULL, modelRegressor = FALSE, adapt_delta = 0.95, stepsize = 1, max_treedepth = 10) } \arguments{ \item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "outcome", and "block". See \bold{Details} below for more information.} \item{niter}{Number of iterations, including warm-up.} \item{nwarmup}{Number of iterations used for warm-up only.} \item{nchain}{Number of chains to be run.} \item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} \item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} \item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} \item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} \item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} \item{email}{Character value containing email address to send notification of completion. Leave blank if not interested.} \item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} \item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} \item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} \item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} } \value{ \code{modelData} A class \code{"hBayesDM"} object with the following components: \describe{ \item{\code{model}}{Character string with the name of the model (\code{"prl_rp_multipleB"}).} \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} } } \description{ Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "Apun" (punishment learning rate), "Arew" (reward learning rate), and "beta" (inverse temperature). \strong{MODEL:} Reward-Punishment Model (Ouden et al., 2013, Neuron) } \details{ This section describes some of the function arguments in greater detail. \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled correctly and contain the information below: \describe{ \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} \item{\code{"block"}}{An integer value representing the block number of the current trial (e.g., 1 1 1 2 2 2).} } \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, there is no need to remove other miscellaneous data columns. \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the effects that initial values have on the resulting posteriors. \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} command. The chains should resemble a "furry caterpillar". \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to generate the posterior. \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" output <- prl_rp_multipleB(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') plot(output, type = 'trace') # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) } } \references{ Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The Journal of Machine Learning Research, 15(1), 1593-1623. Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 } \seealso{ We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} }
#' @title Short Straddle Strategy Function #' @description This function can be used to develop a Short Straddle Strategy. #' @param c Premium of Short call Option #' @param p Premium of Short Put Option #' @param k Excercise Price of Short call and Put Option #' @param llimit Lower limit of stock price at Expiration., Default: 20 #' @param ulimit Upper Limit of Stock Price at Expiration, Default: 20 #' @return OUTPUT_DESCRIPTION Returns the profit/loss generated from the strategy along with the profit/loss of individual contract and an interactive graph for the same. #' @details A straddle is a neutral options strategy that involves simultaneously selling both a put option and a call option for the underlying security with the same strike price and the same expiration date. #' @examples #' straddle.short(1.2, 3.2, 100) #' @rdname straddle.short #' @export #' @importFrom purrr map_dbl #' @import ggplot2 #' @importFrom plotly ggplotly #' @importFrom stats pnorm straddle.short = function(c, p, k, ulimit = 10, llimit = 10){ stock_price_at_expiration = round((k - llimit)):round((ulimit + k)) call_option = (-1* map_dbl(round((k - llimit)):round((ulimit + k)), .f = ~max(.x - k,0))) + c put_option = ( -1 * map_dbl(round((k - llimit)):round((ulimit + k)), .f = ~max(k - .x,0))) + p profit_loss = call_option + put_option df2 = data.frame(stock_price_at_expiration, call_option, put_option, profit_loss) p2 = ggplot(data = df2) + geom_line(aes(x = stock_price_at_expiration, y = call_option, colour = 'call_option')) + geom_line(aes(x = stock_price_at_expiration, y = put_option, colour = 'put_option')) + geom_line(aes(x = stock_price_at_expiration, y = profit_loss, colour = 'profit_loss')) + labs(x = 'stock price at expiration', y = 'profit/loss', title = 'Short Straddle Plot', color = 'Option contract') + scale_colour_manual('', breaks = c('call_option', 'put_option', 'profit_loss'), values = c('blue', 'red', 'black')) ggplotly(p2) print(df2) }
/R/straddle_short.R
no_license
cran/roptions
R
false
false
2,034
r
#' @title Short Straddle Strategy Function #' @description This function can be used to develop a Short Straddle Strategy. #' @param c Premium of Short call Option #' @param p Premium of Short Put Option #' @param k Excercise Price of Short call and Put Option #' @param llimit Lower limit of stock price at Expiration., Default: 20 #' @param ulimit Upper Limit of Stock Price at Expiration, Default: 20 #' @return OUTPUT_DESCRIPTION Returns the profit/loss generated from the strategy along with the profit/loss of individual contract and an interactive graph for the same. #' @details A straddle is a neutral options strategy that involves simultaneously selling both a put option and a call option for the underlying security with the same strike price and the same expiration date. #' @examples #' straddle.short(1.2, 3.2, 100) #' @rdname straddle.short #' @export #' @importFrom purrr map_dbl #' @import ggplot2 #' @importFrom plotly ggplotly #' @importFrom stats pnorm straddle.short = function(c, p, k, ulimit = 10, llimit = 10){ stock_price_at_expiration = round((k - llimit)):round((ulimit + k)) call_option = (-1* map_dbl(round((k - llimit)):round((ulimit + k)), .f = ~max(.x - k,0))) + c put_option = ( -1 * map_dbl(round((k - llimit)):round((ulimit + k)), .f = ~max(k - .x,0))) + p profit_loss = call_option + put_option df2 = data.frame(stock_price_at_expiration, call_option, put_option, profit_loss) p2 = ggplot(data = df2) + geom_line(aes(x = stock_price_at_expiration, y = call_option, colour = 'call_option')) + geom_line(aes(x = stock_price_at_expiration, y = put_option, colour = 'put_option')) + geom_line(aes(x = stock_price_at_expiration, y = profit_loss, colour = 'profit_loss')) + labs(x = 'stock price at expiration', y = 'profit/loss', title = 'Short Straddle Plot', color = 'Option contract') + scale_colour_manual('', breaks = c('call_option', 'put_option', 'profit_loss'), values = c('blue', 'red', 'black')) ggplotly(p2) print(df2) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterluts.R \name{expand.colmat} \alias{expand.colmat} \title{expand color matrix} \usage{ expand.colmat(cm, length = 256, fill.col = "#000000", ...) } \arguments{ \item{cm}{color matrix (or valid input to \code{\link[grDevices]{col2rgb}})} \item{length}{desired number of color entries} \item{fill.col}{color to fill matrix with ('black')} \item{...}{ignored} } \value{ expanded color matrix } \description{ Add entries with \code{fill.col} to reach \code{length} colors in color matrix. } \details{ Should \code{cm} have more entries than specified by \code{length}, the input martix is returned unchanged } \author{ Benno Pütz \email{puetz@psych.mpg.de} }
/man/expand.colmat.Rd
no_license
bennop/clusterLUTs
R
false
true
743
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clusterluts.R \name{expand.colmat} \alias{expand.colmat} \title{expand color matrix} \usage{ expand.colmat(cm, length = 256, fill.col = "#000000", ...) } \arguments{ \item{cm}{color matrix (or valid input to \code{\link[grDevices]{col2rgb}})} \item{length}{desired number of color entries} \item{fill.col}{color to fill matrix with ('black')} \item{...}{ignored} } \value{ expanded color matrix } \description{ Add entries with \code{fill.col} to reach \code{length} colors in color matrix. } \details{ Should \code{cm} have more entries than specified by \code{length}, the input martix is returned unchanged } \author{ Benno Pütz \email{puetz@psych.mpg.de} }
library(mongolite) library(ggplot2) library(tidyr) library(dplyr) library(arules) library(arulesViz) # Nombres de las colecciones que definimos en mongo c1 = "usuarios_nhashtgs_dia" c2 = "usuarios_hashtags" # Conecciones a mongo usuarios_nhashtgs_dia <- mongo(collection = c1, db = "DMUBA") usuarios_hashtags <- mongo(collection = c2, db = "DMUBA") # Lectura de las colecciones df_c1 = usuarios_nhashtgs_dia$find( query = '{}', fields = '{ "_id": false, "user_id" : true, "dia_semana" : true, "n_hashtags": true, "friends_count": true, "followers_count": true, "retweet_favorite_count": true, "retweet_retweet_count": true}' ) View( head(df_c1) ) df_c2 = usuarios_hashtags$find( query = '{}', fields = '{"_id": false, "user_id" : true, "cat_hashtags" : true}' ) View( head(df_c2) ) # Elimino los términos de búsqueda de los hashtags df_c2 = df_c2[-grep(x = df_c2$cat_hashtags, pattern = "^(covid|coronav|cuarentena)"),] # Creación de la variable categorica día dias = c( "lunes", "martes", "mi?rcoles", "jueves", "viernes", "s?bado", "domingo") df_c1$cat_nombre_dia = apply(df_c1[c(4)], 1, function(x){return(dias[x[1]])}) table(df_c1$cat_nombre_dia) barplot( table(df_c1$n_hashtags), main = "Uso de hashtags", xlab = "# hashtags", ylab="Cantidad" ) # ---------- Discretizaciones -------------------------- # 1) friends_count hist(df_c1$friends_count) hist( log10(df_c1$friends_count) ) df_c1$cat_friends = discretize( log10(df_c1$friends_count), method = "fixed", breaks = c(-Inf, 1, 3, 4, 5, Inf), labels=c("muy pocos","pocos", "medio", "muchos", "muchisimos") ) table(df_c1$cat_friends) # 2) followers_count hist( log10(df_c1$followers_count) ) df_c1$cat_followers = discretize( log10(df_c1$followers_count), method = "fixed", breaks = c(-Inf, 1, 3, 4, 5, Inf), labels=c("muy pocos","pocos", "medio", "muchos", "muchisimos") ) table(df_c1$cat_followers) # 3) retweet_favorite_count hist(df_c1$retweet_favorite_count) hist(log10(df_c1$retweet_favorite_count)) df_c1$cat_retweet_favorite_count = discretize(log10(df_c1$retweet_favorite_count),method = "fixed", breaks = c(-Inf, 1.5, 3, Inf), labels=c("bajo", "medio", "alto")) table(df_c1$cat_retweet_favorite_count) # 4) retweet_retweet_count hist( log10(df_c1$retweet_retweet_count) ) df_c1$cat_retweet_retweet_count = discretize( log10(df_c1$retweet_retweet_count), method = "fixed", breaks = c(-Inf, 1.5, 3, Inf), labels=c("bajo", "medio", "alto") ) table(df_c1$cat_retweet_retweet_count) View(head(df_c1)) ## Rotacion de Matrices # Rotación de las matriz de datos de usuario y las categóricas df_tuples_c1 = df_c1 %>% pivot_longer( cols =starts_with("cat"), names_to = "feat", values_to = "val", names_prefix = "cat_", values_drop_na = TRUE) %>% select("user_id", "feat", "val") head(df_tuples_c1) # Rotación de las matriz de datos de usuario y los hashtags df_tuples_c2 = df_c2 %>% pivot_longer( cols =starts_with("cat"), names_to = "feat", values_to = "val", names_prefix = "cat_", values_drop_na = TRUE) %>% select("user_id", "feat", "val") head(df_tuples_c2) # Concateno los dos data.frames df_tuples = rbind(df_tuples_c1,df_tuples_c2) # Se generan las tuplas TID-ITEM (el TID es el user_id) df_tuples = df_tuples %>% mutate("item" = paste0(feat,"=",val)) %>% select("user_id", "item") # Cantidad de transacciones (son los user_id únicos) length( unique(df_tuples$user_id) ) # Generamos las transacciones trans <- as(split(df_tuples$item, df_tuples$user_id), "transactions") inspect(trans[101]) # Buscamos reglas con min_sup=0.005 y min_conf=0.5 # Adem?s, se limitan la cantidad de ?tems (orden) entre 2 y 9 rules = apriori( trans, parameter=list( target="rule", support=0.005, confidence=0.5, maxlen=9, minlen=2 ) ) print(rules) inspect( sort(rules, by="lift", decreasing = TRUE)[1:20] ) # Scatter plot de support vs lift plot(rules, measure = c("support", "lift"), shading = "confidence") # Two-Key Plots: Grafica el Orden y las m?tricas plot(rules, method = "two-key plot") # Filtros de reglas # --------------------- # Utilizo el comando subset y su parámetro (también se llama subset :s ) # Ejemplo: # 7 < lift < 10 y 0.04 < support < 0.1 # quedar?a: (lift < 10 & lift > 7) & (support > 0.04 & support < 0.1) rules.filter = arules::subset( rules, subset = (lift < 10 & lift > 7) & (support > 0.04 & support < 0.1) ) print(rules.filter) inspect( head(rules.filter, 20) ) # Scatter plot de support vs lift plot(rules.filter, measure = c("support", "lift"), shading = "confidence") # Two-Key Plots: Grafica el Orden y las m?tricas plot(rules.filter, method = "two-key plot") # Referencias: # https://cran.r-project.org/web/packages/arulesViz/vignettes/arulesViz.pdf #
/examples/TP2-Clase_Santiago.R
no_license
francotestori/dmuba_02
R
false
false
4,866
r
library(mongolite) library(ggplot2) library(tidyr) library(dplyr) library(arules) library(arulesViz) # Nombres de las colecciones que definimos en mongo c1 = "usuarios_nhashtgs_dia" c2 = "usuarios_hashtags" # Conecciones a mongo usuarios_nhashtgs_dia <- mongo(collection = c1, db = "DMUBA") usuarios_hashtags <- mongo(collection = c2, db = "DMUBA") # Lectura de las colecciones df_c1 = usuarios_nhashtgs_dia$find( query = '{}', fields = '{ "_id": false, "user_id" : true, "dia_semana" : true, "n_hashtags": true, "friends_count": true, "followers_count": true, "retweet_favorite_count": true, "retweet_retweet_count": true}' ) View( head(df_c1) ) df_c2 = usuarios_hashtags$find( query = '{}', fields = '{"_id": false, "user_id" : true, "cat_hashtags" : true}' ) View( head(df_c2) ) # Elimino los términos de búsqueda de los hashtags df_c2 = df_c2[-grep(x = df_c2$cat_hashtags, pattern = "^(covid|coronav|cuarentena)"),] # Creación de la variable categorica día dias = c( "lunes", "martes", "mi?rcoles", "jueves", "viernes", "s?bado", "domingo") df_c1$cat_nombre_dia = apply(df_c1[c(4)], 1, function(x){return(dias[x[1]])}) table(df_c1$cat_nombre_dia) barplot( table(df_c1$n_hashtags), main = "Uso de hashtags", xlab = "# hashtags", ylab="Cantidad" ) # ---------- Discretizaciones -------------------------- # 1) friends_count hist(df_c1$friends_count) hist( log10(df_c1$friends_count) ) df_c1$cat_friends = discretize( log10(df_c1$friends_count), method = "fixed", breaks = c(-Inf, 1, 3, 4, 5, Inf), labels=c("muy pocos","pocos", "medio", "muchos", "muchisimos") ) table(df_c1$cat_friends) # 2) followers_count hist( log10(df_c1$followers_count) ) df_c1$cat_followers = discretize( log10(df_c1$followers_count), method = "fixed", breaks = c(-Inf, 1, 3, 4, 5, Inf), labels=c("muy pocos","pocos", "medio", "muchos", "muchisimos") ) table(df_c1$cat_followers) # 3) retweet_favorite_count hist(df_c1$retweet_favorite_count) hist(log10(df_c1$retweet_favorite_count)) df_c1$cat_retweet_favorite_count = discretize(log10(df_c1$retweet_favorite_count),method = "fixed", breaks = c(-Inf, 1.5, 3, Inf), labels=c("bajo", "medio", "alto")) table(df_c1$cat_retweet_favorite_count) # 4) retweet_retweet_count hist( log10(df_c1$retweet_retweet_count) ) df_c1$cat_retweet_retweet_count = discretize( log10(df_c1$retweet_retweet_count), method = "fixed", breaks = c(-Inf, 1.5, 3, Inf), labels=c("bajo", "medio", "alto") ) table(df_c1$cat_retweet_retweet_count) View(head(df_c1)) ## Rotacion de Matrices # Rotación de las matriz de datos de usuario y las categóricas df_tuples_c1 = df_c1 %>% pivot_longer( cols =starts_with("cat"), names_to = "feat", values_to = "val", names_prefix = "cat_", values_drop_na = TRUE) %>% select("user_id", "feat", "val") head(df_tuples_c1) # Rotación de las matriz de datos de usuario y los hashtags df_tuples_c2 = df_c2 %>% pivot_longer( cols =starts_with("cat"), names_to = "feat", values_to = "val", names_prefix = "cat_", values_drop_na = TRUE) %>% select("user_id", "feat", "val") head(df_tuples_c2) # Concateno los dos data.frames df_tuples = rbind(df_tuples_c1,df_tuples_c2) # Se generan las tuplas TID-ITEM (el TID es el user_id) df_tuples = df_tuples %>% mutate("item" = paste0(feat,"=",val)) %>% select("user_id", "item") # Cantidad de transacciones (son los user_id únicos) length( unique(df_tuples$user_id) ) # Generamos las transacciones trans <- as(split(df_tuples$item, df_tuples$user_id), "transactions") inspect(trans[101]) # Buscamos reglas con min_sup=0.005 y min_conf=0.5 # Adem?s, se limitan la cantidad de ?tems (orden) entre 2 y 9 rules = apriori( trans, parameter=list( target="rule", support=0.005, confidence=0.5, maxlen=9, minlen=2 ) ) print(rules) inspect( sort(rules, by="lift", decreasing = TRUE)[1:20] ) # Scatter plot de support vs lift plot(rules, measure = c("support", "lift"), shading = "confidence") # Two-Key Plots: Grafica el Orden y las m?tricas plot(rules, method = "two-key plot") # Filtros de reglas # --------------------- # Utilizo el comando subset y su parámetro (también se llama subset :s ) # Ejemplo: # 7 < lift < 10 y 0.04 < support < 0.1 # quedar?a: (lift < 10 & lift > 7) & (support > 0.04 & support < 0.1) rules.filter = arules::subset( rules, subset = (lift < 10 & lift > 7) & (support > 0.04 & support < 0.1) ) print(rules.filter) inspect( head(rules.filter, 20) ) # Scatter plot de support vs lift plot(rules.filter, measure = c("support", "lift"), shading = "confidence") # Two-Key Plots: Grafica el Orden y las m?tricas plot(rules.filter, method = "two-key plot") # Referencias: # https://cran.r-project.org/web/packages/arulesViz/vignettes/arulesViz.pdf #
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hcrParam.R \docType{methods} \name{hcrParam} \alias{hcrParam} \title{hcrParam} \usage{ hcrParam(ftar, btrig, fmin, blim) } \arguments{ \item{ftar}{blah,blah,blah,...} \item{btrig}{blah,blah,blah,...} \item{fmin}{blah,blah,blah,...} \item{blim}{blah,blah,blah,...} } \description{ Creats an \code{FLPar} object for hockey stock HCR based on ftar,btrig,fmin, and blim. } \details{ hcrParam } \examples{ \dontrun{ data(pl4) } } \author{ Laurence Kell, Sea++ }
/man/hcrParam.Rd
no_license
flr/mydas
R
false
true
538
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hcrParam.R \docType{methods} \name{hcrParam} \alias{hcrParam} \title{hcrParam} \usage{ hcrParam(ftar, btrig, fmin, blim) } \arguments{ \item{ftar}{blah,blah,blah,...} \item{btrig}{blah,blah,blah,...} \item{fmin}{blah,blah,blah,...} \item{blim}{blah,blah,blah,...} } \description{ Creats an \code{FLPar} object for hockey stock HCR based on ftar,btrig,fmin, and blim. } \details{ hcrParam } \examples{ \dontrun{ data(pl4) } } \author{ Laurence Kell, Sea++ }
########################################## #### GAM MODELS FOR T1 BIFACTOR STUDY #### ########################################## #Load data data.NMF <- readRDS("/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1396_T1_subjData.rds") #Load library library(mgcv) #Get NMF variable names nmfComponents <- names(data.NMF)[grep("Nmf26",names(data.NMF))] #Run gam models (GAM without TBV) NmfModels <- lapply(nmfComponents, function(x) { gam(substitute(i ~ s(age) + sex + averageManualRating + white + medu1 + mood_corrtraitsv2, list(i = as.name(x))), method="REML", data = data.NMF) }) #Look at model summaries models <- lapply(NmfModels, summary) ###################### #### MOOD RESULTS #### ###################### #Pull p-values p_mood <- sapply(NmfModels, function(v) summary(v)$p.table[6,4]) #Convert to data frame p_mood <- as.data.frame(p_mood) #Print original p-values to three decimal places p_mood_round <- round(p_mood,3) #FDR correct p-values p_mood_fdr <- p.adjust(p_mood[,1],method="fdr") #Convert to data frame p_mood_fdr <- as.data.frame(p_mood_fdr) #To print fdr-corrected p-values to three decimal places p_mood_fdr_round <- round(p_mood_fdr,3) #List the NMF components that survive FDR correction Nmf_mood_fdr <- row.names(p_mood_fdr)[p_mood_fdr<0.05] #Name of the NMF components that survive FDR correction Nmf_mood_fdr_names <- nmfComponents[as.numeric(Nmf_mood_fdr)] #To check direction of coefficient estimates mood_coeff <- models[as.numeric(Nmf_mood_fdr)] p.table_list <- lapply(mood_coeff,`[[`, 'p.table')
/ravensNMF/Sensitivity/Old/GamAnalyses_T1CorrTraits_ravensNMF_mood_noTBV_white_medu1.R
no_license
PennBBL/pncT1Bifactors
R
false
false
1,551
r
########################################## #### GAM MODELS FOR T1 BIFACTOR STUDY #### ########################################## #Load data data.NMF <- readRDS("/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1396_T1_subjData.rds") #Load library library(mgcv) #Get NMF variable names nmfComponents <- names(data.NMF)[grep("Nmf26",names(data.NMF))] #Run gam models (GAM without TBV) NmfModels <- lapply(nmfComponents, function(x) { gam(substitute(i ~ s(age) + sex + averageManualRating + white + medu1 + mood_corrtraitsv2, list(i = as.name(x))), method="REML", data = data.NMF) }) #Look at model summaries models <- lapply(NmfModels, summary) ###################### #### MOOD RESULTS #### ###################### #Pull p-values p_mood <- sapply(NmfModels, function(v) summary(v)$p.table[6,4]) #Convert to data frame p_mood <- as.data.frame(p_mood) #Print original p-values to three decimal places p_mood_round <- round(p_mood,3) #FDR correct p-values p_mood_fdr <- p.adjust(p_mood[,1],method="fdr") #Convert to data frame p_mood_fdr <- as.data.frame(p_mood_fdr) #To print fdr-corrected p-values to three decimal places p_mood_fdr_round <- round(p_mood_fdr,3) #List the NMF components that survive FDR correction Nmf_mood_fdr <- row.names(p_mood_fdr)[p_mood_fdr<0.05] #Name of the NMF components that survive FDR correction Nmf_mood_fdr_names <- nmfComponents[as.numeric(Nmf_mood_fdr)] #To check direction of coefficient estimates mood_coeff <- models[as.numeric(Nmf_mood_fdr)] p.table_list <- lapply(mood_coeff,`[[`, 'p.table')
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hdma2.R \name{mEWAS} \alias{mEWAS} \title{Epigenome Wide Association Study with both exposure and outcome} \usage{ mEWAS(X, Y, M, k, conf = NULL) } \arguments{ \item{X}{an explanatory variable matrix with n rows and d columns. Each column corresponds to a distinct explanatory variable (Exposure). Explanatory variables must be encoded as numeric variables.} \item{Y}{an explanatory variable matrix with n rows and d columns. Each column corresponds to a distinct explanatory variable (Outcome). Explanatory variables must be encoded as numeric variables.} \item{M}{a response variable matrix with n rows and p columns. Each column corresponds to a beta-normalized methylation profile. Response variables must be encoded as numeric. No NAs allowed.} \item{K}{an integer for the number of latent factors in the regression model.} \item{covar}{set of covariable, must be numeric.} } \value{ an object with the following attributes: - U the latent variable score matrix with dimensions n x K. - B the effect size matrix for the exposure X and the outcome Y. - score matrix for the exposure X and the outcome Y. - pValue matrix for the exposure X and the outcome Y. - calibrated.score2, the calibrated score matrix for the exposure X and the outcome Y. - calibrated.pvalue, the calibrated pValue matrix for the exposure X and the outcome Y. - GIF : Genomic Inflation Factor for exposure and outcome - lfmm : the result of the 2 regressions of lfmm, mod1 for the regression of X on M and mod2 for the regression of Y on M given X. } \description{ This function uses lfmm (latent factor mixed models) to estimate the effects of exposures and outcomes on a response matrix. } \details{ The response variable matrix Y and the explanatory variable are centered. Missing values must be imputed. The number of latent factors can be estimated by looking at the screeplot of eigenvalues of a PCA. Possibility of calibrating the scores and pValues by the GIF (Genomic Inflation Factor). See lfmm package for more information. } \examples{ library(hdma2) # Run mEWAS res <- mEWAS(X = example$X, Y = example$Y, M = example$M, K = 5) } \author{ Basile Jumentier }
/man/mEWAS.Rd
no_license
jumentib/hdma2
R
false
true
2,250
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hdma2.R \name{mEWAS} \alias{mEWAS} \title{Epigenome Wide Association Study with both exposure and outcome} \usage{ mEWAS(X, Y, M, k, conf = NULL) } \arguments{ \item{X}{an explanatory variable matrix with n rows and d columns. Each column corresponds to a distinct explanatory variable (Exposure). Explanatory variables must be encoded as numeric variables.} \item{Y}{an explanatory variable matrix with n rows and d columns. Each column corresponds to a distinct explanatory variable (Outcome). Explanatory variables must be encoded as numeric variables.} \item{M}{a response variable matrix with n rows and p columns. Each column corresponds to a beta-normalized methylation profile. Response variables must be encoded as numeric. No NAs allowed.} \item{K}{an integer for the number of latent factors in the regression model.} \item{covar}{set of covariable, must be numeric.} } \value{ an object with the following attributes: - U the latent variable score matrix with dimensions n x K. - B the effect size matrix for the exposure X and the outcome Y. - score matrix for the exposure X and the outcome Y. - pValue matrix for the exposure X and the outcome Y. - calibrated.score2, the calibrated score matrix for the exposure X and the outcome Y. - calibrated.pvalue, the calibrated pValue matrix for the exposure X and the outcome Y. - GIF : Genomic Inflation Factor for exposure and outcome - lfmm : the result of the 2 regressions of lfmm, mod1 for the regression of X on M and mod2 for the regression of Y on M given X. } \description{ This function uses lfmm (latent factor mixed models) to estimate the effects of exposures and outcomes on a response matrix. } \details{ The response variable matrix Y and the explanatory variable are centered. Missing values must be imputed. The number of latent factors can be estimated by looking at the screeplot of eigenvalues of a PCA. Possibility of calibrating the scores and pValues by the GIF (Genomic Inflation Factor). See lfmm package for more information. } \examples{ library(hdma2) # Run mEWAS res <- mEWAS(X = example$X, Y = example$Y, M = example$M, K = 5) } \author{ Basile Jumentier }
## app.R ## #chargement des packages necessaires pour l'applicatif #Interface du tableau de bord library(shinydashboard) #connection a la base de donnee library(RPostgreSQL) #Interface avec la base de donnee library(DBI) #graphiques library(highcharter) #connection a la base de donnees drv<-dbDriver("PostgreSQL") #parametres de la connection : db name est le nom de la base de donnees #host est l IP de connection #port est le port de connection de la base de donnees #user est l identifiant de connection a la base de donnee #password est le mot de passe de l identifiant de connection a la base de donnee con <- dbConnect(drv, dbname = "DB", host = "localhost", port = 5432, user = "XXX", password = "XXX") #chargement de la liste des etablissements (etb) et des differents services (serv) etb <- dbGetQuery(con,'Select distinct("LIB_ETB") as "ETABLISEMENT" from table_analyse order by "LIB_ETB"') serv <- dbGetQuery(con,'Select distinct("LAFFECTATION") as "AFFECTATION" from table_analyse order by "LAFFECTATION"') #chargement de la masse salariale par etablissement total <- dbGetQuery(con,'Select "LIB_ETB" as "Etablissement", SUM("MONTANT") as "Masse salariale" from table_analyse Group By "LIB_ETB" order by "LIB_ETB"') #chargement de la masse salariale par CDI, CDD et autres types de contrats CDI <- dbGetQuery(con,'Select "LIB_ETB" as "Etablissement", SUM("MONTANT") as "Masse salariale" from table_analyse where "CNATURECONTRAT" = \'CDI\' Group By "LIB_ETB" order by "LIB_ETB"') CDD <- dbGetQuery(con,'Select "LIB_ETB" as "Etablissement", SUM("MONTANT") as "Masse salariale" from table_analyse where "CNATURECONTRAT" = \'CDD\' Group By "LIB_ETB" order by "LIB_ETB"') reste <- dbGetQuery(con,'Select "LIB_ETB" as "Etablissement", SUM("MONTANT") as "Masse salariale" from table_analyse where "CNATURECONTRAT" != \'CDI\' and "CNATURECONTRAT" != \'CDD\' Group By "LIB_ETB" order by "LIB_ETB"') #creation de l interface utilisateur. #On cree une page ui <- dashboardPage( #L en tete de la page dashboardHeader(title = "Tableau de bord"), #La barre de selection de la page dashboardSidebar( sidebarMenu( menuItem("MASSE SALARIALE", tabName = "total", icon = icon("th")), menuItem("TYPES DE CONTRATS", tabName = "contrats", icon = icon("th")), menuItem("SERVICES", tabName = "services", icon = icon("th")), menuItem("QUALIFICATIONS DES METIERS", tabName = "qualifications", icon = icon("th")), menuItem("EMPLOYES", tabName = "employes", icon = icon("th")) ) ), #Le corp de la page dashboardBody( #Les onglets de la page tabItems( #Contenu du premier onglet tabItem(tabName = "total", #On cree une ligne fluidRow( #on rajoute une boite dans laquelle on place un titre et une autre boite contenant un tableau box(h4("Masse salariale totale par etablissement"),tableOutput("total") ) , #Toujours dans la ligne on cree un graphique dans une boite box(highchartOutput("totalplot", height = 300) ) ) ), #Contenu du deuxieme onglet tabItem(tabName = "contrats", #Ici on cree une ligne contenant deux box avec un tableau et un titre fluidRow( box(h4("Masse salariale des CDI par etablissement"),(tableOutput("CDI")) ) , box(h4("Masse salariale des CDD par etablissement"),(tableOutput("CDD")) ) ) , #Puis une autre ligne avec un tableau avec un titre fluidRow( box(h4("Masse salariale des autres types de contrats par etablissement"),tableOutput("reste") ) ) , #On fini la page avec une nouvelle ligne contenant deux box avec un graphique fluidRow( box(highchartOutput("CDIplot", height = 300)), box(highchartOutput("CDDplot", height = 300) ) ) ) , #Contenu du troisieme onglet tabItem(tabName = "services", #Une ligne avec un tableau, un selecteur en liste deroulante et un graphique fluidRow( #Le tableau box(h4("Masse salariale par service"),tableOutput("services") ), #Le selecteur box(selectizeInput("services", "Etablissement", choices = etb) ), #Le graphique box(highchartOutput("servicesplot", height = 500) ) ) ), #Contenu du quatrieme onglet tabItem(tabName = "qualifications", #Meme chose que pour le troisieme onglet fluidRow( box(h4("Masse salariale par qualification"),tableOutput("qualifications") ), box(selectizeInput("qualifications", "Etablissement", choices = etb) ), box(highchartOutput("qualificationsplot", height = 500) ) ) ), #Contenu du cinquieme onglet tabItem(tabName = "employes", #On cree une ligne fluidRow( #Une box avec titre avec un tableau box(h4("Masse salariale par employe"), tableOutput("employes") ), #Une box contenant 4 box avec un selecteur box(box(title = "Etablissements", #Un premier selecteur en liste deroulante selectizeInput("employes01", label = "SELECTIONNEZ UN ETABLISSEMENT", choices = (etb) ) ), box(title = "Services", #Un deuxieme selecteur en liste deroulante selectizeInput("employes02", label = "SELECTIONNEZ UN SERVICE", choices = (serv) ) ), box(title = "Nom", #Un troisieme selecteur avec un champ de texte textInput("employes03",label = "ENTREZ UN NOM") ), box(title = "Prenom", #Un quatrieme selecteur avec un champ de texte textInput("employes04",label = "ENTREZ UN PRENOM") ) ) ) ) ) ) ) #On cree la partie serveur de l applicatif #Les variables input et outpout permettent le dialogue entre le serveur et l interface utilisateur server <- function(input, output) { #On transforme le tableau de donnees contenant la masse salariale par etablissement en table output$total <- renderTable(total) #On transforme le tableau de donnees contenant la masse salariale par etablissement en graphique circulaire output$totalplot <- renderHighchart({ highchart() %>% hc_add_series_labels_values(type = "pie",labels = paste(total$Etablissement,': ',total$'Masse salariale',' EUROS <br/>',round(total$`Masse salariale`/sum(total$`Masse salariale`)*100),'%'), values = total$'Masse salariale') %>% hc_title(text = "Masse salariale par etablissement en euros") }) #On transforme les tableaux de donnees contenant la masse salariale par type de contrats et par etablissement en tables output$CDI <- renderTable(CDI) output$CDD <- renderTable(CDD) output$reste <- renderTable(reste) #On transforme les tableaux de donnees contenant la masse salariale pour par etablissement pour les CDI et les CDD en graphiques circulaires output$CDIplot <- renderHighchart({ highchart() %>% hc_add_series_labels_values(type = "pie",labels = paste(CDI$Etablissement,': ',CDI$'Masse salariale',' EUROS <br/>',round(CDI$`Masse salariale`/sum(CDI$`Masse salariale`)*100),'%'), values = CDI$'Masse salariale') %>% hc_title(text = "Masse salariale des CDI par etablissement en euros") }) output$CDDplot <- renderHighchart({ highchart() %>% #type correspond au type de graphique #labels correspond a ce que l on souhaite afficher comme descriptif #values correspond aux donnees numeriques #Ici, labels est long car on cherche a afficher le nom de l etablissement avec sa masse salariale et la proportion que cela represente #L operation effectuee pour y arriver est de souder avec paste() le nom du service avec sa masse salariale et a la ligne le pourcentage que cela represente #La balise HTML <br/> est comprise par le package shiny et permet de revenir a la ligne #la fonction round() permet d arrondir un nombre hc_add_series_labels_values(type = "pie",labels = paste(CDD$Etablissement,': ',CDD$'Masse salariale',' EUROS <br/>',round(CDD$`Masse salariale`/sum(CDD$`Masse salariale`)*100),'%'), values = CDD$'Masse salariale') %>% #On donne un titre au graphique hc_title(text = "Masse salariale des CDD par etablissement en euros") }) #On transforme le tableau de donnees contenant la masse salariale par service et selon un etablissement en table #La variable input$services correcpond a l etablissement choisi par le selecteur se nommant services output$services <- renderTable(dbGetQuery(con,paste('Select "LAFFECTATION" as "Service",sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$qualifications,' group by "LAFFECTATION" order by "LAFFECTATION"',sep ="\'", collapse = NULL))) #On transforme le tableau de donnees contenant la masse salariale par service et selon un etablissement en fonction #Cela est necessaire pour avoir des graphiques interactifs mais ne fonctionne pas avec les tables comme output$services servicesR <- reactive(dbGetQuery(con,paste('Select "LAFFECTATION" as "Service",sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$qualifications,' group by "LAFFECTATION" order by "LAFFECTATION"',sep ="\'", collapse = NULL))) #On transforme la fonction contenant la masse salariale selon un etablissement en graphique a barres verticales output$servicesplot <- renderHighchart({ highchart() %>% #type correspond au type de graphique #name correspond a ce que l on souhaite afficher comme nom de serie, ici le nom du service #data correspond aux donnees numeriques hc_add_series(type = "column",name = input$services, data = servicesR()$'Masse salariale') %>% #categories correspond a ce que l on souhaite afficher comme descriptif #labels = list(enabled = FALSE) permet de desactiver le nom des colonnes a la base du graphique car cela deviendrait illisible #Ici, categories est long car on cherche a afficher le nom de l etablissement avec sa masse salariale et la proportion que cela represente #L operation effectuee pour y arriver est de souder avec paste() le nom du service avec sa masse salariale et a la ligne le pourcentage que cela represente #La balise HTML <br/> est comprise par le package shiny et permet de revenir a la ligne #la fonction round() permet d arrondir un nombre hc_xAxis(categories = paste(servicesR()$Service,': ',servicesR()$'Masse salariale',' EUROS <br/>',round(servicesR()$`Masse salariale`/sum(servicesR()$`Masse salariale`)*100),'%'), labels = list(enabled = FALSE)) %>% #On donne un titre au graphique hc_title(text = "Repartition de la masse salariale par service en euros") }) #On transforme le tableau de donnees contenant la masse salariale par qualification et selon un etablissement en fonction output$qualifications <- renderTable(dbGetQuery(con,paste('Select "LQUALIFICATION" as "Qualifiquation",sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$qualifications,' group by "LQUALIFICATION" order by "LQUALIFICATION"',sep ="\'", collapse = NULL))) #On transforme la fonction contenant la masse salariale par qualification et selon un etablissement en graphique a barres verticales qualificationsR <- reactive(dbGetQuery(con,paste('Select "LQUALIFICATION" as "Qualification",sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$qualifications,' group by "LQUALIFICATION" order by "LQUALIFICATION"',sep ="\'", collapse = NULL))) #On transforme la fonction contenant la masse salariale par qualification et selon un etablissement en graphique a barres verticales output$qualificationsplot <- renderHighchart({ highchart() %>% #type correspond au type de graphique #name correspond a ce que l on souhaite afficher comme nom de serie, ici le nom du service #data correspond aux donnees numeriques hc_add_series(type = "column",name = input$qualifications, data = qualificationsR()$'Masse salariale') %>% #categories correspond a ce que l on souhaite afficher comme descriptif #labels = list(enabled = FALSE) permet de desactiver le nom des colonnes a la base du graphique car cela deviendrait illisible #Ici, categories est long car on cherche a afficher le nom de l etablissement avec sa masse salariale et la proportion que cela represente #L operation effectuee pour y arriver est de souder avec paste() le nom du service avec sa masse salariale et a la ligne le pourcentage que cela represente #La balise HTML <br/> est comprise par le package shiny et permet de revenir a la ligne #la fonction round() permet d arrondir un nombre hc_xAxis(categories = paste(qualificationsR()$Qualification,': ',qualificationsR()$'Masse salariale',' EUROS <br/>',round(qualificationsR()$`Masse salariale`/sum(qualificationsR()$`Masse salariale`)*100),'%'), labels = list(enabled = FALSE)) %>% #On donne un titre au graphique hc_title(text = "Repartition de la masse salariale par qualification en euros") }) #On transforme le tableau de donnees contenant la masse salariale par service et par etablissement en tableau de donnees exploitable par le package shiny #on place des conditions if pour que la requette se fasse si aucun nom ou prenom n est entre par l utilisateur #la fonction toupper() passe du texe en majuscule ce qui permet a la saisie de ne pas etre sensible a la casse output$employes <- renderTable(dbGetQuery(con,paste('Select "MATRICULE", "NOM", "PRENOM", sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$employes01,' and "LAFFECTATION" = ',input$employes02,if (!is.null(input$employes03)){paste(' and "NOM" like ',paste(toupper(input$employes03),'%',sep = "",collapse = NULL),sep = "\'",collapse = NULL)},if (!is.null(input$employes04)){paste(' and "PRENOM" like ',paste(toupper(input$employes04),'%',sep = "",collapse = NULL),sep = "\'",collapse = NULL)},' group by "MATRICULE", "NOM", "PRENOM" order by "MATRICULE", "NOM", "PRENOM"',sep ="\'", collapse = NULL))) } #On execute l applicatif shinyApp(ui, server)
/tdb_analyse.R
no_license
emonthieux/Groupe-3H
R
false
false
15,810
r
## app.R ## #chargement des packages necessaires pour l'applicatif #Interface du tableau de bord library(shinydashboard) #connection a la base de donnee library(RPostgreSQL) #Interface avec la base de donnee library(DBI) #graphiques library(highcharter) #connection a la base de donnees drv<-dbDriver("PostgreSQL") #parametres de la connection : db name est le nom de la base de donnees #host est l IP de connection #port est le port de connection de la base de donnees #user est l identifiant de connection a la base de donnee #password est le mot de passe de l identifiant de connection a la base de donnee con <- dbConnect(drv, dbname = "DB", host = "localhost", port = 5432, user = "XXX", password = "XXX") #chargement de la liste des etablissements (etb) et des differents services (serv) etb <- dbGetQuery(con,'Select distinct("LIB_ETB") as "ETABLISEMENT" from table_analyse order by "LIB_ETB"') serv <- dbGetQuery(con,'Select distinct("LAFFECTATION") as "AFFECTATION" from table_analyse order by "LAFFECTATION"') #chargement de la masse salariale par etablissement total <- dbGetQuery(con,'Select "LIB_ETB" as "Etablissement", SUM("MONTANT") as "Masse salariale" from table_analyse Group By "LIB_ETB" order by "LIB_ETB"') #chargement de la masse salariale par CDI, CDD et autres types de contrats CDI <- dbGetQuery(con,'Select "LIB_ETB" as "Etablissement", SUM("MONTANT") as "Masse salariale" from table_analyse where "CNATURECONTRAT" = \'CDI\' Group By "LIB_ETB" order by "LIB_ETB"') CDD <- dbGetQuery(con,'Select "LIB_ETB" as "Etablissement", SUM("MONTANT") as "Masse salariale" from table_analyse where "CNATURECONTRAT" = \'CDD\' Group By "LIB_ETB" order by "LIB_ETB"') reste <- dbGetQuery(con,'Select "LIB_ETB" as "Etablissement", SUM("MONTANT") as "Masse salariale" from table_analyse where "CNATURECONTRAT" != \'CDI\' and "CNATURECONTRAT" != \'CDD\' Group By "LIB_ETB" order by "LIB_ETB"') #creation de l interface utilisateur. #On cree une page ui <- dashboardPage( #L en tete de la page dashboardHeader(title = "Tableau de bord"), #La barre de selection de la page dashboardSidebar( sidebarMenu( menuItem("MASSE SALARIALE", tabName = "total", icon = icon("th")), menuItem("TYPES DE CONTRATS", tabName = "contrats", icon = icon("th")), menuItem("SERVICES", tabName = "services", icon = icon("th")), menuItem("QUALIFICATIONS DES METIERS", tabName = "qualifications", icon = icon("th")), menuItem("EMPLOYES", tabName = "employes", icon = icon("th")) ) ), #Le corp de la page dashboardBody( #Les onglets de la page tabItems( #Contenu du premier onglet tabItem(tabName = "total", #On cree une ligne fluidRow( #on rajoute une boite dans laquelle on place un titre et une autre boite contenant un tableau box(h4("Masse salariale totale par etablissement"),tableOutput("total") ) , #Toujours dans la ligne on cree un graphique dans une boite box(highchartOutput("totalplot", height = 300) ) ) ), #Contenu du deuxieme onglet tabItem(tabName = "contrats", #Ici on cree une ligne contenant deux box avec un tableau et un titre fluidRow( box(h4("Masse salariale des CDI par etablissement"),(tableOutput("CDI")) ) , box(h4("Masse salariale des CDD par etablissement"),(tableOutput("CDD")) ) ) , #Puis une autre ligne avec un tableau avec un titre fluidRow( box(h4("Masse salariale des autres types de contrats par etablissement"),tableOutput("reste") ) ) , #On fini la page avec une nouvelle ligne contenant deux box avec un graphique fluidRow( box(highchartOutput("CDIplot", height = 300)), box(highchartOutput("CDDplot", height = 300) ) ) ) , #Contenu du troisieme onglet tabItem(tabName = "services", #Une ligne avec un tableau, un selecteur en liste deroulante et un graphique fluidRow( #Le tableau box(h4("Masse salariale par service"),tableOutput("services") ), #Le selecteur box(selectizeInput("services", "Etablissement", choices = etb) ), #Le graphique box(highchartOutput("servicesplot", height = 500) ) ) ), #Contenu du quatrieme onglet tabItem(tabName = "qualifications", #Meme chose que pour le troisieme onglet fluidRow( box(h4("Masse salariale par qualification"),tableOutput("qualifications") ), box(selectizeInput("qualifications", "Etablissement", choices = etb) ), box(highchartOutput("qualificationsplot", height = 500) ) ) ), #Contenu du cinquieme onglet tabItem(tabName = "employes", #On cree une ligne fluidRow( #Une box avec titre avec un tableau box(h4("Masse salariale par employe"), tableOutput("employes") ), #Une box contenant 4 box avec un selecteur box(box(title = "Etablissements", #Un premier selecteur en liste deroulante selectizeInput("employes01", label = "SELECTIONNEZ UN ETABLISSEMENT", choices = (etb) ) ), box(title = "Services", #Un deuxieme selecteur en liste deroulante selectizeInput("employes02", label = "SELECTIONNEZ UN SERVICE", choices = (serv) ) ), box(title = "Nom", #Un troisieme selecteur avec un champ de texte textInput("employes03",label = "ENTREZ UN NOM") ), box(title = "Prenom", #Un quatrieme selecteur avec un champ de texte textInput("employes04",label = "ENTREZ UN PRENOM") ) ) ) ) ) ) ) #On cree la partie serveur de l applicatif #Les variables input et outpout permettent le dialogue entre le serveur et l interface utilisateur server <- function(input, output) { #On transforme le tableau de donnees contenant la masse salariale par etablissement en table output$total <- renderTable(total) #On transforme le tableau de donnees contenant la masse salariale par etablissement en graphique circulaire output$totalplot <- renderHighchart({ highchart() %>% hc_add_series_labels_values(type = "pie",labels = paste(total$Etablissement,': ',total$'Masse salariale',' EUROS <br/>',round(total$`Masse salariale`/sum(total$`Masse salariale`)*100),'%'), values = total$'Masse salariale') %>% hc_title(text = "Masse salariale par etablissement en euros") }) #On transforme les tableaux de donnees contenant la masse salariale par type de contrats et par etablissement en tables output$CDI <- renderTable(CDI) output$CDD <- renderTable(CDD) output$reste <- renderTable(reste) #On transforme les tableaux de donnees contenant la masse salariale pour par etablissement pour les CDI et les CDD en graphiques circulaires output$CDIplot <- renderHighchart({ highchart() %>% hc_add_series_labels_values(type = "pie",labels = paste(CDI$Etablissement,': ',CDI$'Masse salariale',' EUROS <br/>',round(CDI$`Masse salariale`/sum(CDI$`Masse salariale`)*100),'%'), values = CDI$'Masse salariale') %>% hc_title(text = "Masse salariale des CDI par etablissement en euros") }) output$CDDplot <- renderHighchart({ highchart() %>% #type correspond au type de graphique #labels correspond a ce que l on souhaite afficher comme descriptif #values correspond aux donnees numeriques #Ici, labels est long car on cherche a afficher le nom de l etablissement avec sa masse salariale et la proportion que cela represente #L operation effectuee pour y arriver est de souder avec paste() le nom du service avec sa masse salariale et a la ligne le pourcentage que cela represente #La balise HTML <br/> est comprise par le package shiny et permet de revenir a la ligne #la fonction round() permet d arrondir un nombre hc_add_series_labels_values(type = "pie",labels = paste(CDD$Etablissement,': ',CDD$'Masse salariale',' EUROS <br/>',round(CDD$`Masse salariale`/sum(CDD$`Masse salariale`)*100),'%'), values = CDD$'Masse salariale') %>% #On donne un titre au graphique hc_title(text = "Masse salariale des CDD par etablissement en euros") }) #On transforme le tableau de donnees contenant la masse salariale par service et selon un etablissement en table #La variable input$services correcpond a l etablissement choisi par le selecteur se nommant services output$services <- renderTable(dbGetQuery(con,paste('Select "LAFFECTATION" as "Service",sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$qualifications,' group by "LAFFECTATION" order by "LAFFECTATION"',sep ="\'", collapse = NULL))) #On transforme le tableau de donnees contenant la masse salariale par service et selon un etablissement en fonction #Cela est necessaire pour avoir des graphiques interactifs mais ne fonctionne pas avec les tables comme output$services servicesR <- reactive(dbGetQuery(con,paste('Select "LAFFECTATION" as "Service",sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$qualifications,' group by "LAFFECTATION" order by "LAFFECTATION"',sep ="\'", collapse = NULL))) #On transforme la fonction contenant la masse salariale selon un etablissement en graphique a barres verticales output$servicesplot <- renderHighchart({ highchart() %>% #type correspond au type de graphique #name correspond a ce que l on souhaite afficher comme nom de serie, ici le nom du service #data correspond aux donnees numeriques hc_add_series(type = "column",name = input$services, data = servicesR()$'Masse salariale') %>% #categories correspond a ce que l on souhaite afficher comme descriptif #labels = list(enabled = FALSE) permet de desactiver le nom des colonnes a la base du graphique car cela deviendrait illisible #Ici, categories est long car on cherche a afficher le nom de l etablissement avec sa masse salariale et la proportion que cela represente #L operation effectuee pour y arriver est de souder avec paste() le nom du service avec sa masse salariale et a la ligne le pourcentage que cela represente #La balise HTML <br/> est comprise par le package shiny et permet de revenir a la ligne #la fonction round() permet d arrondir un nombre hc_xAxis(categories = paste(servicesR()$Service,': ',servicesR()$'Masse salariale',' EUROS <br/>',round(servicesR()$`Masse salariale`/sum(servicesR()$`Masse salariale`)*100),'%'), labels = list(enabled = FALSE)) %>% #On donne un titre au graphique hc_title(text = "Repartition de la masse salariale par service en euros") }) #On transforme le tableau de donnees contenant la masse salariale par qualification et selon un etablissement en fonction output$qualifications <- renderTable(dbGetQuery(con,paste('Select "LQUALIFICATION" as "Qualifiquation",sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$qualifications,' group by "LQUALIFICATION" order by "LQUALIFICATION"',sep ="\'", collapse = NULL))) #On transforme la fonction contenant la masse salariale par qualification et selon un etablissement en graphique a barres verticales qualificationsR <- reactive(dbGetQuery(con,paste('Select "LQUALIFICATION" as "Qualification",sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$qualifications,' group by "LQUALIFICATION" order by "LQUALIFICATION"',sep ="\'", collapse = NULL))) #On transforme la fonction contenant la masse salariale par qualification et selon un etablissement en graphique a barres verticales output$qualificationsplot <- renderHighchart({ highchart() %>% #type correspond au type de graphique #name correspond a ce que l on souhaite afficher comme nom de serie, ici le nom du service #data correspond aux donnees numeriques hc_add_series(type = "column",name = input$qualifications, data = qualificationsR()$'Masse salariale') %>% #categories correspond a ce que l on souhaite afficher comme descriptif #labels = list(enabled = FALSE) permet de desactiver le nom des colonnes a la base du graphique car cela deviendrait illisible #Ici, categories est long car on cherche a afficher le nom de l etablissement avec sa masse salariale et la proportion que cela represente #L operation effectuee pour y arriver est de souder avec paste() le nom du service avec sa masse salariale et a la ligne le pourcentage que cela represente #La balise HTML <br/> est comprise par le package shiny et permet de revenir a la ligne #la fonction round() permet d arrondir un nombre hc_xAxis(categories = paste(qualificationsR()$Qualification,': ',qualificationsR()$'Masse salariale',' EUROS <br/>',round(qualificationsR()$`Masse salariale`/sum(qualificationsR()$`Masse salariale`)*100),'%'), labels = list(enabled = FALSE)) %>% #On donne un titre au graphique hc_title(text = "Repartition de la masse salariale par qualification en euros") }) #On transforme le tableau de donnees contenant la masse salariale par service et par etablissement en tableau de donnees exploitable par le package shiny #on place des conditions if pour que la requette se fasse si aucun nom ou prenom n est entre par l utilisateur #la fonction toupper() passe du texe en majuscule ce qui permet a la saisie de ne pas etre sensible a la casse output$employes <- renderTable(dbGetQuery(con,paste('Select "MATRICULE", "NOM", "PRENOM", sum("MONTANT") as "Masse salariale" from table_analyse where "LIB_ETB" = ',input$employes01,' and "LAFFECTATION" = ',input$employes02,if (!is.null(input$employes03)){paste(' and "NOM" like ',paste(toupper(input$employes03),'%',sep = "",collapse = NULL),sep = "\'",collapse = NULL)},if (!is.null(input$employes04)){paste(' and "PRENOM" like ',paste(toupper(input$employes04),'%',sep = "",collapse = NULL),sep = "\'",collapse = NULL)},' group by "MATRICULE", "NOM", "PRENOM" order by "MATRICULE", "NOM", "PRENOM"',sep ="\'", collapse = NULL))) } #On execute l applicatif shinyApp(ui, server)
# A test that mr_mash_simple also works for univariate linear # regression (i.e., r = 1). library(mvtnorm) library(varbvs) library(Rcpp) source("../code/misc.R") source("../code/bayes_mvr.R") source("../code/mr_mash_simple.R") sourceCpp("../code/mr_mash.cpp",verbose = TRUE) # SCRIPT PARAMETERS # ----------------- # Number of samples (n) and number of predictors (p). n <- 500 p <- 20 # True effects used to simulate the data. b <- c(-2,1,rep(0,p - 2)) # Variances in the mixture-of-normals prior on the regression # coefficients. s0 <- list(k1 = 1e-10,k2 = 4,k3 = 6,k4 = 5) # The mixture weights in the mixture-of-normals prior on the # regression coefficients. w0 <- c(0.1,0.6,0.2,0.1) k <- length(w0) # SIMULATE DATA # ------------- set.seed(1) X <- matrix(rnorm(n*p),n,p) X <- scale(X,scale = FALSE) # Simulate Y ~ MN(X*B,I,V). Note that matrix.normal from the MBSP # package appears to be much faster than rmatrixnorm from the # MixMatrix package. y <- drop(X %*% b + rnorm(n)) y <- y - mean(y) # FIT MR-MASH MODEL # ----------------- # Run 20 co-ordinate ascent updates. b0 <- rep(0,p) fit1 <- mr_mash_simple(X,y,1,s0,w0,b0,20) # Compare the posterior mean estimates of the regression coefficients # against the coefficients used to simulate the data. plot(b,fit1$B,pch = 20,xlab = "true",ylab = "estimated") abline(a = 0,b = 1,col = "skyblue",lty = "dotted") # Test the C++ code. fit2 <- mr_mash_simple(X,y,1,s0,w0,b0,20,version = "Rcpp") print(range(fit1$B - fit2$B)) # FIR VARBVSMIX MODEL # ------------------- s0 <- unlist(s0) s0[1] <- 0 out <- varbvsmix(X,NULL,y,s0,1,w0,matrix(0,p,k),matrix(0,p,k), update.sigma = FALSE,update.sa = FALSE,update.w = FALSE, maxiter = 20,tol = 0,drop.threshold = 0,verbose = FALSE) b <- with(out,rowSums(alpha * mu)) # Should be close to zero. print(max(abs(fit1$B - b)))
/scripts/demo_mr_mash_simple_univ.R
no_license
pcarbo/mr-mash
R
false
false
1,870
r
# A test that mr_mash_simple also works for univariate linear # regression (i.e., r = 1). library(mvtnorm) library(varbvs) library(Rcpp) source("../code/misc.R") source("../code/bayes_mvr.R") source("../code/mr_mash_simple.R") sourceCpp("../code/mr_mash.cpp",verbose = TRUE) # SCRIPT PARAMETERS # ----------------- # Number of samples (n) and number of predictors (p). n <- 500 p <- 20 # True effects used to simulate the data. b <- c(-2,1,rep(0,p - 2)) # Variances in the mixture-of-normals prior on the regression # coefficients. s0 <- list(k1 = 1e-10,k2 = 4,k3 = 6,k4 = 5) # The mixture weights in the mixture-of-normals prior on the # regression coefficients. w0 <- c(0.1,0.6,0.2,0.1) k <- length(w0) # SIMULATE DATA # ------------- set.seed(1) X <- matrix(rnorm(n*p),n,p) X <- scale(X,scale = FALSE) # Simulate Y ~ MN(X*B,I,V). Note that matrix.normal from the MBSP # package appears to be much faster than rmatrixnorm from the # MixMatrix package. y <- drop(X %*% b + rnorm(n)) y <- y - mean(y) # FIT MR-MASH MODEL # ----------------- # Run 20 co-ordinate ascent updates. b0 <- rep(0,p) fit1 <- mr_mash_simple(X,y,1,s0,w0,b0,20) # Compare the posterior mean estimates of the regression coefficients # against the coefficients used to simulate the data. plot(b,fit1$B,pch = 20,xlab = "true",ylab = "estimated") abline(a = 0,b = 1,col = "skyblue",lty = "dotted") # Test the C++ code. fit2 <- mr_mash_simple(X,y,1,s0,w0,b0,20,version = "Rcpp") print(range(fit1$B - fit2$B)) # FIR VARBVSMIX MODEL # ------------------- s0 <- unlist(s0) s0[1] <- 0 out <- varbvsmix(X,NULL,y,s0,1,w0,matrix(0,p,k),matrix(0,p,k), update.sigma = FALSE,update.sa = FALSE,update.w = FALSE, maxiter = 20,tol = 0,drop.threshold = 0,verbose = FALSE) b <- with(out,rowSums(alpha * mu)) # Should be close to zero. print(max(abs(fit1$B - b)))
library(dplyr) # this packages is use for data preparation (edit, remove, mutate, etc) library(stringr) # all functions deal with "NA"'s and zero length vectors library(purrr) # requirement packages for Functional Programming Tools library(rlang) # requirement packages for Rmarkdown library(DT) # interface to the JavaScript library DataTables (https://datatables.net/) library(r2d3) # D3 visualization library(shinydashboard) library(shiny) library(readxl) library(plotly) library(leaflet) library(leaflet.providers) library(rgdal) library(sf) library(viridis) library(hrbrthemes) library(htmltools) jmlkejahatan.res <- read_excel("jumlahkejahatan.xlsx") jmlkejahatan <- jmlkejahatan.res[-12] year_list <- jmlkejahatan %>% select(tahun) %>% unique() year_list <- as.list(c(year_list$tahun, "all_year")) %>% set_names(2017, 2018, 2019, "Semua") crime_list <- as.list(c(colnames(jmlkejahatan)[-c(1, 2)], "all_crime")) %>% set_names("Kejahatan terhadap Nyawa", "Kejahatan terhadap Fisik/ Badan", "Kejahatan terhadap Kesusilaan", "Kejahatan terhadap Kemerdekaan Orang", "Kejahatan terhadap Hak Milik/ Barang dengan Penggunaan Kekerasan", "Kejahatan terhadap Hak Milik/ Barang", "Kejahatan Terkait Narkotika", "Kejahatan Terkait Penipuan, Penggelapan, dan Korupsi", "Kejahatan terhadap Ketertiban Umum", "Semua") #MAP data2<-readOGR(dsn=".",layer="INDO_PROV_2016") ## FILTER 2018 DAN UBAH PROVINSI JADI HURUF BESAR BIAR BISA DI JOIN jumlahKejahatan2018<-jmlkejahatan.res %>%filter(tahun==2019) jumlahKejahatan2018$provinsi<-toupper(jumlahKejahatan2018$provinsi) data2018<-merge(data2,jumlahKejahatan2018,by.x="PROVINSI",by.y="provinsi") labels <- sprintf( "<strong>%s</strong><br/>%g Jumlah Kejahatan ", data2018@data$PROVINSI, data2018@data$total ) %>% lapply(htmltools::HTML) dt <- read_excel("jumlahkejahatan.xlsx", sheet = "Sheet2")
/global.R
no_license
rizkinazhif/Dashboard-Kelompok-7
R
false
false
2,124
r
library(dplyr) # this packages is use for data preparation (edit, remove, mutate, etc) library(stringr) # all functions deal with "NA"'s and zero length vectors library(purrr) # requirement packages for Functional Programming Tools library(rlang) # requirement packages for Rmarkdown library(DT) # interface to the JavaScript library DataTables (https://datatables.net/) library(r2d3) # D3 visualization library(shinydashboard) library(shiny) library(readxl) library(plotly) library(leaflet) library(leaflet.providers) library(rgdal) library(sf) library(viridis) library(hrbrthemes) library(htmltools) jmlkejahatan.res <- read_excel("jumlahkejahatan.xlsx") jmlkejahatan <- jmlkejahatan.res[-12] year_list <- jmlkejahatan %>% select(tahun) %>% unique() year_list <- as.list(c(year_list$tahun, "all_year")) %>% set_names(2017, 2018, 2019, "Semua") crime_list <- as.list(c(colnames(jmlkejahatan)[-c(1, 2)], "all_crime")) %>% set_names("Kejahatan terhadap Nyawa", "Kejahatan terhadap Fisik/ Badan", "Kejahatan terhadap Kesusilaan", "Kejahatan terhadap Kemerdekaan Orang", "Kejahatan terhadap Hak Milik/ Barang dengan Penggunaan Kekerasan", "Kejahatan terhadap Hak Milik/ Barang", "Kejahatan Terkait Narkotika", "Kejahatan Terkait Penipuan, Penggelapan, dan Korupsi", "Kejahatan terhadap Ketertiban Umum", "Semua") #MAP data2<-readOGR(dsn=".",layer="INDO_PROV_2016") ## FILTER 2018 DAN UBAH PROVINSI JADI HURUF BESAR BIAR BISA DI JOIN jumlahKejahatan2018<-jmlkejahatan.res %>%filter(tahun==2019) jumlahKejahatan2018$provinsi<-toupper(jumlahKejahatan2018$provinsi) data2018<-merge(data2,jumlahKejahatan2018,by.x="PROVINSI",by.y="provinsi") labels <- sprintf( "<strong>%s</strong><br/>%g Jumlah Kejahatan ", data2018@data$PROVINSI, data2018@data$total ) %>% lapply(htmltools::HTML) dt <- read_excel("jumlahkejahatan.xlsx", sheet = "Sheet2")
library(shiny) library(shinydashboard) library(dplyr) library(data.table) library(wrapr) library(ggplot2) # Define UI for application that draws a histogram ui <- dashboardPage( dashboardHeader(title = "Historical Player Performance Trends"), dashboardSidebar(), dashboardBody( fluidRow( #box(plotOutput("rcViolinAggregatesPlots",height = 250)), box( title = "Player Selection Filters", checkboxGroupInput("posSelection","Player Position(s) Selected", choices = c("P"="P","C"="C","1B"="1B","2B"="2B","3B"="3B","SS"="SS","OF"="OF")) # checkboxGroupInput("posSelection","Player Position(s) Selected",c("SS"="SS","1B"="1B")) #sliderInput("periodBegin","Beginning of Historical Period","minYear","maxYear","middleYear"), #sliderInput("periodEnd","End of Historical Period","minYear","maxYear","middleYear") ) ), fluidRow( box( plotOutput("perf_violins_yoe",width = "200%") ) ) ) ) # Define server logic required to draw a histogram server <- function(input, output, session) { # prep batting_enriched table # form posList, pass to output allPositions <- na.omit(unique(batting_enriched$primaryPos)) # output$posList <- allPositions#named_map_builder(allPositions,allPositions) posListMap = list() for (p in allPositions) { posListMap[[p]]=p } # output$posListMap <- renderValueBox(posListMap) # get minYear, middleYear, maxYear variables, pass to output minYear <- min(batting_enriched$yearID,na.rm = TRUE) maxYear <- max(batting_enriched$yearID,na.rm = TRUE) middleYear <- round(mean(c(minYear,maxYear))) # output$minYear <- minYear # output$maxYear <- maxYear # output$middleYear <- middleYear ## make violin plots of performance by age for the position(s) chosen output$perf_violins_yoe <- renderPlot( ggplot(batting_enriched %>% filter(primaryPos %in% input$posSelection) %>% mutate(YOE=as.factor(YOE)),aes(YOE,RC)) + geom_violin() ) } # Run the application shinyApp(ui = ui, server = server)
/Shiny/aggregatePlayerViz/app.R
no_license
tlortz/BaseballR
R
false
false
2,105
r
library(shiny) library(shinydashboard) library(dplyr) library(data.table) library(wrapr) library(ggplot2) # Define UI for application that draws a histogram ui <- dashboardPage( dashboardHeader(title = "Historical Player Performance Trends"), dashboardSidebar(), dashboardBody( fluidRow( #box(plotOutput("rcViolinAggregatesPlots",height = 250)), box( title = "Player Selection Filters", checkboxGroupInput("posSelection","Player Position(s) Selected", choices = c("P"="P","C"="C","1B"="1B","2B"="2B","3B"="3B","SS"="SS","OF"="OF")) # checkboxGroupInput("posSelection","Player Position(s) Selected",c("SS"="SS","1B"="1B")) #sliderInput("periodBegin","Beginning of Historical Period","minYear","maxYear","middleYear"), #sliderInput("periodEnd","End of Historical Period","minYear","maxYear","middleYear") ) ), fluidRow( box( plotOutput("perf_violins_yoe",width = "200%") ) ) ) ) # Define server logic required to draw a histogram server <- function(input, output, session) { # prep batting_enriched table # form posList, pass to output allPositions <- na.omit(unique(batting_enriched$primaryPos)) # output$posList <- allPositions#named_map_builder(allPositions,allPositions) posListMap = list() for (p in allPositions) { posListMap[[p]]=p } # output$posListMap <- renderValueBox(posListMap) # get minYear, middleYear, maxYear variables, pass to output minYear <- min(batting_enriched$yearID,na.rm = TRUE) maxYear <- max(batting_enriched$yearID,na.rm = TRUE) middleYear <- round(mean(c(minYear,maxYear))) # output$minYear <- minYear # output$maxYear <- maxYear # output$middleYear <- middleYear ## make violin plots of performance by age for the position(s) chosen output$perf_violins_yoe <- renderPlot( ggplot(batting_enriched %>% filter(primaryPos %in% input$posSelection) %>% mutate(YOE=as.factor(YOE)),aes(YOE,RC)) + geom_violin() ) } # Run the application shinyApp(ui = ui, server = server)
####################### ## FraseRDataSet ## ==================== #' FraseRDataSet #' #' This class is designed to store the whole FraseR data set #' needed for an analysis of a disease cohort #' #' @author Christian Mertes \email{mertes@@in.tum.de} setClass("FraseRDataSet", contains="RangedSummarizedExperiment", slots = list( name = "character", method = "character", parallel = "BiocParallelParam", bamParam = "ScanBamParam", strandSpecific = "logical", workingDir = "character", nonSplicedReads = "RangedSummarizedExperiment" ), prototype = list( name = "Data Analysis", method = "betaBin", parallel = SerialParam(), bamParam = ScanBamParam(mapqFilter=0), strandSpecific = FALSE, workingDir = file.path(Sys.getenv("HOME"), "FraseR"), nonSplicedReads = SummarizedExperiment(rowRanges=GRanges()) ) ) ## Validity ## ======== # # check sample annotation within the colData slot of the SE object # validateSampleAnnotation <- function(object) { sampleData <- as.data.table(colData(object)) if(!"sampleID" %in% colnames(sampleData)){ return("Please provide a 'sampleID' column with a ID for each sample.") } if(any(duplicated(sampleData$sampleID))){ return("The 'sampleID' column needs to be unique.") } if(!any("bamFile" %in% colnames(sampleData))){ return("Please provide a 'bamFile' column.") } if(any(samples(object) != rownames(colData(object)))){ return("Please set the rownames of your colData to the sampleIDs") } NULL } validateName <- function(object){ if(!isScalarCharacter(object@name)){ return("The name of the experiment should be of type 'character'.") } if(length(object@name) == 0){ return("The experiment name can not be empty.") } if(!grep("^[a-zA-Z0-9 ._-]+$", object@name, perl=TRUE)){ return(paste("For readabilty the name of the experiment should only ", "contain the following characters: 'a-zA-Z0-9 ._-'" )) } NULL } validateMethod <- function(object) { validMethods <- c("Fisher", "betaBin", "DESeq2", "Martin") if(!isScalarCharacter(object@method) || !object@method %in% validMethods) { return(paste0("The selected method must be one of the following: ", paste(validMethods, collapse=", "), "." )) } NULL } validateParallel <- function(object) { if(!is(object@parallel, "BiocParallelParam")) { return("The 'parallel' option must be a BiocParallelParam object.") } NULL } validateBamParam <- function(object) { if(class(scanBamParam(object)) != "ScanBamParam") { return("The 'bamParam' option must be a ScanBamParam object.") } NULL } validateStrandSpecific <- function(object) { if(!isScalarLogical(object@strandSpecific)) { return(paste("The 'strandSpecific' option must be 0L, 1L or 2L.")) } NULL } validateWorkingDir <- function(object) { if(!isScalarCharacter(object@workingDir)){ return(paste("The path to the working directory needs", "to be a set as a character." )) } if(object@workingDir == ""){ return("The working directory can not be empty.") } if(!dir.exists(object@workingDir)){ message(date(), ": The given working directory '", object@workingDir, "' does not exists. We will create it." ) dir.create(object@workingDir, recursive = TRUE) } if(file.access(object@workingDir, mode = 4) != 0){ return(paste("Make sure we can write to the given working directory '", object@workingDir, "'." )) } NULL } validateNonSplicedReadsType <- function(object) { if(class(object@nonSplicedReads) != "RangedSummarizedExperiment") { return("'nonSplicedReads' must be a RangedSummarizedExperiment object") } if(length(object) != 0 && dim(object@nonSplicedReads)[2] != dim(object)[2]){ return("The NSR dimensions are not correct. This is a internal error!") } ans <- validObject(object@nonSplicedReads) if(!isScalarLogical(ans) || ans == FALSE){ return(ans) } NULL } validateAssays <- function(object){ if(length(assayNames(object)) > 1){ if(any(duplicated(assayNames(object)))){ return( "FraseR enforces unique assay names! Please provie such names." ) } } NULL } ## general validate function validateFraseRDataSet <- function(object) { c( validateSampleAnnotation(object), validateName(object), validateMethod(object), validateParallel(object), validateBamParam(object), validateStrandSpecific(object), validateWorkingDir(object), validateNonSplicedReadsType(object), validateAssays(object) ) } setValidity("FraseRDataSet", validateFraseRDataSet) ## Cosmetics (the show function) ## ============================= ## show method for FraseRDataSet showFraseRDataSet <- function(object) { if(dim(object)[2] < 1){ cat("This Fraser object does not contain any sample! Please add one.") return() } # taken from SummarizedExperiment show function scat <- function(fmt, vals=character(), exdent=2, ...){ vals <- ifelse(nzchar(vals), vals, "''") lbls <- paste(S4Vectors:::selectSome(vals), collapse=" ") txt <- sprintf(fmt, length(vals), lbls) cat(strwrap(txt, exdent=exdent, ...), sep="\n") } cat("-------------------- Sample data table -----------------\n") sampleData <- as.data.table(colData(object)) if(all(sapply(sampleData$bamFile, isScalarCharacter))){ sampleData$bamFile <- gsub("... [^/]+/", ".../", sapply(sampleData$bamFile, function(str){ if(nchar(str) <= 29) return(str) paste("...", substr(str, nchar(str) - 25, nchar(str))) }) ) } show(as_tibble(sampleData)) cat("\n") if(length(object) > 0){ cat(paste0("Number of samples: ", dim(object)[2]), "\n") cat(paste0("Number of junctions: ", length(object)), "\n") cat(paste0("Number of splice sites: ", length(nonSplicedReads(object))), "\n") scat("assays(%d): %s\n", assayNames(object)) cat("\n") } cat("----------------------- Settings -----------------------\n") cat(paste0("Analysis name: ", name(object)), "\n") cat(paste0("Statistical method: ", method(object)), "\n") cat(paste0("Analysis is strand specific: ", strandSpecific(object)), "\n") cat(paste0("Working directory: '", workingDir(object), "'"), "\n") cat("\n") cat("-------------------- Parallel backend ------------------\n") # show(parallel(object)) cat(paste0("Type: ", as.character(class(parallel(object))), "\tWorkers: ", bpworkers(parallel(object)), "\tTasks: ", bptasks(parallel(object)) )) cat("\n\n") cat("-------------------- BAM parameters --------------------\n") if(identical(scanBamParam(FraseRDataSet()), scanBamParam(object))){ cat(paste0("Default used with: ", "bamMapqFilter=", bamMapqFilter(scanBamParam(object)) )) } else { show(scanBamParam(object)) } cat("\n\n") } setMethod("show", "FraseRDataSet", function(object) { showFraseRDataSet(object) }) ## Constructor ## ========== #' #' The FraseR dataset object #' #' Constructs an FraseR object based on the given input. It can take only the #' annotation (colData) or count tables (junctions/spliceSites). #' #' @param colData A DataFrame containing the annotation of the samples #' @param junctions A matrix like object containing the raw counts for each #' junction. It requires the \code{start} and the \code{endID} #' column that identifies the corresponding splice site for #' the given junction. #' @param spliceSites A matrix like object containing the raw counts for each #' splice site. it requires the \code{spliceSiteID} and the #' \code{type} column that gives the ID and the type of the #' given splice site. The ID maps back to the junction. #' @param ... Any parameters corresponding to the slots and their possible #' values. See \linkS4class{FraseRDataSet} #' @return A FraseRDataSet object. #' @author Christian Mertes \email{mertes@@in.tum.de} #' @export #' @examples #' fraser <- FraseRDataSet() #' fraser <- countRNAData(createTestFraseRSettings()) FraseRDataSet <- function(colData=NULL, junctions=NULL, spliceSites=NULL, ...) { if(!is.null(colData)){ if(is.data.table(colData)){ colData <- DataFrame(colData) } if(is.null(rownames(colData))){ rownames(colData) <- colData[['sampleID']] } if(is.null(junctions) & is.null(spliceSites)) return(new("FraseRDataSet", colData=colData, ...)) if(is.null(junctions)){ stop("Please provide junctions counts if you provide ", "spliceSite counts.") } if(is.null(spliceSites)){ stop("Please provdie splice site counts if you provide ", "junction counts.") } if(is.data.frame(junctions)){ junctions <- makeGRangesFromDataFrame(junctions, keep.extra.columns=TRUE) } if(is.data.frame(spliceSites)){ spliceSites <- makeGRangesFromDataFrame(spliceSites, keep.extra.columns=TRUE) } nsr <- SummarizedExperiment( rowRanges=spliceSites[,c("spliceSiteID", "type")], assays=SimpleList(rawCountsSS=as.data.frame( mcols(spliceSites)[colData[,"sampleID"]]), a=NULL)[1]) se <- SummarizedExperiment( rowRanges=junctions[,c("startID", "endID")], colData=colData, assays=SimpleList(rawCountsJ=as.data.frame( mcols(junctions)[colData[,"sampleID"]]), a=NULL)[1]) return(new("FraseRDataSet", se, nonSplicedReads=nsr, ...)) } return(new("FraseRDataSet", ...)) }
/R/FraseRDataSet-class.R
no_license
ischeller/FraseR
R
false
false
10,494
r
####################### ## FraseRDataSet ## ==================== #' FraseRDataSet #' #' This class is designed to store the whole FraseR data set #' needed for an analysis of a disease cohort #' #' @author Christian Mertes \email{mertes@@in.tum.de} setClass("FraseRDataSet", contains="RangedSummarizedExperiment", slots = list( name = "character", method = "character", parallel = "BiocParallelParam", bamParam = "ScanBamParam", strandSpecific = "logical", workingDir = "character", nonSplicedReads = "RangedSummarizedExperiment" ), prototype = list( name = "Data Analysis", method = "betaBin", parallel = SerialParam(), bamParam = ScanBamParam(mapqFilter=0), strandSpecific = FALSE, workingDir = file.path(Sys.getenv("HOME"), "FraseR"), nonSplicedReads = SummarizedExperiment(rowRanges=GRanges()) ) ) ## Validity ## ======== # # check sample annotation within the colData slot of the SE object # validateSampleAnnotation <- function(object) { sampleData <- as.data.table(colData(object)) if(!"sampleID" %in% colnames(sampleData)){ return("Please provide a 'sampleID' column with a ID for each sample.") } if(any(duplicated(sampleData$sampleID))){ return("The 'sampleID' column needs to be unique.") } if(!any("bamFile" %in% colnames(sampleData))){ return("Please provide a 'bamFile' column.") } if(any(samples(object) != rownames(colData(object)))){ return("Please set the rownames of your colData to the sampleIDs") } NULL } validateName <- function(object){ if(!isScalarCharacter(object@name)){ return("The name of the experiment should be of type 'character'.") } if(length(object@name) == 0){ return("The experiment name can not be empty.") } if(!grep("^[a-zA-Z0-9 ._-]+$", object@name, perl=TRUE)){ return(paste("For readabilty the name of the experiment should only ", "contain the following characters: 'a-zA-Z0-9 ._-'" )) } NULL } validateMethod <- function(object) { validMethods <- c("Fisher", "betaBin", "DESeq2", "Martin") if(!isScalarCharacter(object@method) || !object@method %in% validMethods) { return(paste0("The selected method must be one of the following: ", paste(validMethods, collapse=", "), "." )) } NULL } validateParallel <- function(object) { if(!is(object@parallel, "BiocParallelParam")) { return("The 'parallel' option must be a BiocParallelParam object.") } NULL } validateBamParam <- function(object) { if(class(scanBamParam(object)) != "ScanBamParam") { return("The 'bamParam' option must be a ScanBamParam object.") } NULL } validateStrandSpecific <- function(object) { if(!isScalarLogical(object@strandSpecific)) { return(paste("The 'strandSpecific' option must be 0L, 1L or 2L.")) } NULL } validateWorkingDir <- function(object) { if(!isScalarCharacter(object@workingDir)){ return(paste("The path to the working directory needs", "to be a set as a character." )) } if(object@workingDir == ""){ return("The working directory can not be empty.") } if(!dir.exists(object@workingDir)){ message(date(), ": The given working directory '", object@workingDir, "' does not exists. We will create it." ) dir.create(object@workingDir, recursive = TRUE) } if(file.access(object@workingDir, mode = 4) != 0){ return(paste("Make sure we can write to the given working directory '", object@workingDir, "'." )) } NULL } validateNonSplicedReadsType <- function(object) { if(class(object@nonSplicedReads) != "RangedSummarizedExperiment") { return("'nonSplicedReads' must be a RangedSummarizedExperiment object") } if(length(object) != 0 && dim(object@nonSplicedReads)[2] != dim(object)[2]){ return("The NSR dimensions are not correct. This is a internal error!") } ans <- validObject(object@nonSplicedReads) if(!isScalarLogical(ans) || ans == FALSE){ return(ans) } NULL } validateAssays <- function(object){ if(length(assayNames(object)) > 1){ if(any(duplicated(assayNames(object)))){ return( "FraseR enforces unique assay names! Please provie such names." ) } } NULL } ## general validate function validateFraseRDataSet <- function(object) { c( validateSampleAnnotation(object), validateName(object), validateMethod(object), validateParallel(object), validateBamParam(object), validateStrandSpecific(object), validateWorkingDir(object), validateNonSplicedReadsType(object), validateAssays(object) ) } setValidity("FraseRDataSet", validateFraseRDataSet) ## Cosmetics (the show function) ## ============================= ## show method for FraseRDataSet showFraseRDataSet <- function(object) { if(dim(object)[2] < 1){ cat("This Fraser object does not contain any sample! Please add one.") return() } # taken from SummarizedExperiment show function scat <- function(fmt, vals=character(), exdent=2, ...){ vals <- ifelse(nzchar(vals), vals, "''") lbls <- paste(S4Vectors:::selectSome(vals), collapse=" ") txt <- sprintf(fmt, length(vals), lbls) cat(strwrap(txt, exdent=exdent, ...), sep="\n") } cat("-------------------- Sample data table -----------------\n") sampleData <- as.data.table(colData(object)) if(all(sapply(sampleData$bamFile, isScalarCharacter))){ sampleData$bamFile <- gsub("... [^/]+/", ".../", sapply(sampleData$bamFile, function(str){ if(nchar(str) <= 29) return(str) paste("...", substr(str, nchar(str) - 25, nchar(str))) }) ) } show(as_tibble(sampleData)) cat("\n") if(length(object) > 0){ cat(paste0("Number of samples: ", dim(object)[2]), "\n") cat(paste0("Number of junctions: ", length(object)), "\n") cat(paste0("Number of splice sites: ", length(nonSplicedReads(object))), "\n") scat("assays(%d): %s\n", assayNames(object)) cat("\n") } cat("----------------------- Settings -----------------------\n") cat(paste0("Analysis name: ", name(object)), "\n") cat(paste0("Statistical method: ", method(object)), "\n") cat(paste0("Analysis is strand specific: ", strandSpecific(object)), "\n") cat(paste0("Working directory: '", workingDir(object), "'"), "\n") cat("\n") cat("-------------------- Parallel backend ------------------\n") # show(parallel(object)) cat(paste0("Type: ", as.character(class(parallel(object))), "\tWorkers: ", bpworkers(parallel(object)), "\tTasks: ", bptasks(parallel(object)) )) cat("\n\n") cat("-------------------- BAM parameters --------------------\n") if(identical(scanBamParam(FraseRDataSet()), scanBamParam(object))){ cat(paste0("Default used with: ", "bamMapqFilter=", bamMapqFilter(scanBamParam(object)) )) } else { show(scanBamParam(object)) } cat("\n\n") } setMethod("show", "FraseRDataSet", function(object) { showFraseRDataSet(object) }) ## Constructor ## ========== #' #' The FraseR dataset object #' #' Constructs an FraseR object based on the given input. It can take only the #' annotation (colData) or count tables (junctions/spliceSites). #' #' @param colData A DataFrame containing the annotation of the samples #' @param junctions A matrix like object containing the raw counts for each #' junction. It requires the \code{start} and the \code{endID} #' column that identifies the corresponding splice site for #' the given junction. #' @param spliceSites A matrix like object containing the raw counts for each #' splice site. it requires the \code{spliceSiteID} and the #' \code{type} column that gives the ID and the type of the #' given splice site. The ID maps back to the junction. #' @param ... Any parameters corresponding to the slots and their possible #' values. See \linkS4class{FraseRDataSet} #' @return A FraseRDataSet object. #' @author Christian Mertes \email{mertes@@in.tum.de} #' @export #' @examples #' fraser <- FraseRDataSet() #' fraser <- countRNAData(createTestFraseRSettings()) FraseRDataSet <- function(colData=NULL, junctions=NULL, spliceSites=NULL, ...) { if(!is.null(colData)){ if(is.data.table(colData)){ colData <- DataFrame(colData) } if(is.null(rownames(colData))){ rownames(colData) <- colData[['sampleID']] } if(is.null(junctions) & is.null(spliceSites)) return(new("FraseRDataSet", colData=colData, ...)) if(is.null(junctions)){ stop("Please provide junctions counts if you provide ", "spliceSite counts.") } if(is.null(spliceSites)){ stop("Please provdie splice site counts if you provide ", "junction counts.") } if(is.data.frame(junctions)){ junctions <- makeGRangesFromDataFrame(junctions, keep.extra.columns=TRUE) } if(is.data.frame(spliceSites)){ spliceSites <- makeGRangesFromDataFrame(spliceSites, keep.extra.columns=TRUE) } nsr <- SummarizedExperiment( rowRanges=spliceSites[,c("spliceSiteID", "type")], assays=SimpleList(rawCountsSS=as.data.frame( mcols(spliceSites)[colData[,"sampleID"]]), a=NULL)[1]) se <- SummarizedExperiment( rowRanges=junctions[,c("startID", "endID")], colData=colData, assays=SimpleList(rawCountsJ=as.data.frame( mcols(junctions)[colData[,"sampleID"]]), a=NULL)[1]) return(new("FraseRDataSet", se, nonSplicedReads=nsr, ...)) } return(new("FraseRDataSet", ...)) }
testlist <- list(A = structure(c(7.41975724086257e-251, 3.81575932257023e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613125940-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
226
r
testlist <- list(A = structure(c(7.41975724086257e-251, 3.81575932257023e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
library(babynames) library(dplyr) library(data.table) library(tictoc) library(microbenchmark) midt <- setDT(copy(babynames)) midf <- as.data.table(midt) #--- Ejemplo de condiciones que se buscan # Elizabeth == 8915 # Michael == 88514 # David == 86251 # Selecciono nombres y su frecuencia (primera aparición) aleatoriamente NUM_NOMBRES <- 1000 all_nam <- unique(midt[ , .(name)]) rnd_idx <- sample(1:nrow(all_nam), NUM_NOMBRES, replace = FALSE) mi_names <- all_nam[ rnd_idx, .(name)] mi_value <- midt[ name %chin% mi_names$name, .(n), by = name] %>% .[ , .SD[1], by = name] #--- Crear condiciones automáticamente #-- cond_end == Condiciones para data.table y dplyr #-- cond_bas == Condiciones para base NUM_CONDICIONES <- 50 cond_end <- vector() cond_bas <- vector() for (i in 1:NUM_CONDICIONES) { name_val <- mi_value$name[i] n_val <- mi_value$n[i] if (i < num_cond) { mi_cond <- paste("( name == '" , name_val, "' & n == ", n_val, " ) | ", sep = "") mi_cond2 <- paste("( midf$name == '" , name_val, "' & midf$n == ", n_val, " ) | ", sep = "") } else { mi_cond <- paste(" ( name == '" , name_val, "' & n == ", n_val, " )", sep = "") mi_cond2 <- paste(" ( midf$name == '" , name_val, "' & midf$n == ", n_val, " )", sep = "") } cond_end <- paste0(cond_end, mi_cond, collapse = "\n") cond_bas <- paste0(cond_bas, mi_cond2, collapse = "\n") } #------- COMPARACIONES --------------- #-- data.table tic() midt[ eval(parse(text = cond_end )), ] toc() #-- dplyr midf <- as.data.frame(midt) tic() midf %>% filter( eval(parse(text = cond_end )), ) toc() #--- base tic() midf[ eval(parse(text = cond_bas )), ] toc() #------- BENCHMARKINGS -------------- microbenchmark( data.table = midt[ eval(parse(text = cond_end )), ] , base = midf[ eval(parse(text = cond_bas )), ] , dplyr = midf %>% filter( eval(parse(text = cond_end )), ) , times = 25 )
/2021_06_Ayuda_filtros_bench_.R
no_license
coforfe/R-Help-es
R
false
false
1,931
r
library(babynames) library(dplyr) library(data.table) library(tictoc) library(microbenchmark) midt <- setDT(copy(babynames)) midf <- as.data.table(midt) #--- Ejemplo de condiciones que se buscan # Elizabeth == 8915 # Michael == 88514 # David == 86251 # Selecciono nombres y su frecuencia (primera aparición) aleatoriamente NUM_NOMBRES <- 1000 all_nam <- unique(midt[ , .(name)]) rnd_idx <- sample(1:nrow(all_nam), NUM_NOMBRES, replace = FALSE) mi_names <- all_nam[ rnd_idx, .(name)] mi_value <- midt[ name %chin% mi_names$name, .(n), by = name] %>% .[ , .SD[1], by = name] #--- Crear condiciones automáticamente #-- cond_end == Condiciones para data.table y dplyr #-- cond_bas == Condiciones para base NUM_CONDICIONES <- 50 cond_end <- vector() cond_bas <- vector() for (i in 1:NUM_CONDICIONES) { name_val <- mi_value$name[i] n_val <- mi_value$n[i] if (i < num_cond) { mi_cond <- paste("( name == '" , name_val, "' & n == ", n_val, " ) | ", sep = "") mi_cond2 <- paste("( midf$name == '" , name_val, "' & midf$n == ", n_val, " ) | ", sep = "") } else { mi_cond <- paste(" ( name == '" , name_val, "' & n == ", n_val, " )", sep = "") mi_cond2 <- paste(" ( midf$name == '" , name_val, "' & midf$n == ", n_val, " )", sep = "") } cond_end <- paste0(cond_end, mi_cond, collapse = "\n") cond_bas <- paste0(cond_bas, mi_cond2, collapse = "\n") } #------- COMPARACIONES --------------- #-- data.table tic() midt[ eval(parse(text = cond_end )), ] toc() #-- dplyr midf <- as.data.frame(midt) tic() midf %>% filter( eval(parse(text = cond_end )), ) toc() #--- base tic() midf[ eval(parse(text = cond_bas )), ] toc() #------- BENCHMARKINGS -------------- microbenchmark( data.table = midt[ eval(parse(text = cond_end )), ] , base = midf[ eval(parse(text = cond_bas )), ] , dplyr = midf %>% filter( eval(parse(text = cond_end )), ) , times = 25 )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DatabaseConnector.R \name{jdbcDrivers} \alias{jdbcDrivers} \title{How to download and use JDBC drivers for the various data platforms.} \description{ Below are instructions for downloading JDBC drivers for the various data platforms. Once downloaded use the \code{pathToDriver} argument in the \code{\link[=connect]{connect()}} or \code{\link[=createConnectionDetails]{createConnectionDetails()}} functions to point to the driver. Alternatively, you can set the 'DATABASECONNECTOR_JAR_FOLDER' environmental variable, for example in your .Renviron file (recommended). } \section{SQL Server, Oracle, PostgreSQL, PDW, Snowflake, Spark, RedShift, Azure Synapse, BigQuery}{ Use the \code{\link[=downloadJdbcDrivers]{downloadJdbcDrivers()}} function to download these drivers from the OHDSI GitHub pages. } \section{Netezza}{ Read the instructions \href{https://www.ibm.com/docs/en/SSULQD_7.2.1/com.ibm.nz.datacon.doc/t_datacon_setup_JDBC.html}{here} on how to obtain the Netezza JDBC driver. } \section{Impala}{ Go to \href{https://www.cloudera.com/downloads/connectors/impala/jdbc/2-5-5.html}{Cloudera's site}, pick your OS version, and click "GET IT NOW!'. Register, and you should be able to download the driver. } \section{SQLite}{ For SQLite we actually don't use a JDBC driver. Instead, we use the RSQLite package, which can be installed using \code{install.packages("RSQLite")}. }
/man/jdbcDrivers.Rd
permissive
cran/DatabaseConnector
R
false
true
1,496
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DatabaseConnector.R \name{jdbcDrivers} \alias{jdbcDrivers} \title{How to download and use JDBC drivers for the various data platforms.} \description{ Below are instructions for downloading JDBC drivers for the various data platforms. Once downloaded use the \code{pathToDriver} argument in the \code{\link[=connect]{connect()}} or \code{\link[=createConnectionDetails]{createConnectionDetails()}} functions to point to the driver. Alternatively, you can set the 'DATABASECONNECTOR_JAR_FOLDER' environmental variable, for example in your .Renviron file (recommended). } \section{SQL Server, Oracle, PostgreSQL, PDW, Snowflake, Spark, RedShift, Azure Synapse, BigQuery}{ Use the \code{\link[=downloadJdbcDrivers]{downloadJdbcDrivers()}} function to download these drivers from the OHDSI GitHub pages. } \section{Netezza}{ Read the instructions \href{https://www.ibm.com/docs/en/SSULQD_7.2.1/com.ibm.nz.datacon.doc/t_datacon_setup_JDBC.html}{here} on how to obtain the Netezza JDBC driver. } \section{Impala}{ Go to \href{https://www.cloudera.com/downloads/connectors/impala/jdbc/2-5-5.html}{Cloudera's site}, pick your OS version, and click "GET IT NOW!'. Register, and you should be able to download the driver. } \section{SQLite}{ For SQLite we actually don't use a JDBC driver. Instead, we use the RSQLite package, which can be installed using \code{install.packages("RSQLite")}. }
#' Author: Julio Trecenti #' Subject: # library(tidyverse) library(magrittr) u <- "http://divulgacandcontas.tse.jus.br/divulga/#/candidato/2018/2022802018/BR/280000614517" r <- httr::GET(u) r %>% xml2::read_html() %>% xml2::xml_find_all("//img[@class='img-thumbnail img-responsive dvg-cand-foto ng-scope']") # pjs <- webdriver::run_phantomjs() # sess <- webdriver::Session$new(port = pjs$port) # sess$go(u) # sess$takeScreenshot() "RSelenium" library(reticulate) link <- xml2::read_html(py$obj) %>% xml2::xml_find_first("//img[@class='img-thumbnail img-responsive dvg-cand-foto ng-scope']") %>% xml2::xml_attr("src") httr::GET(link, httr::write_disk("data-raw/candidato.jpg")) # Import ----------------------------------------------------------------------- # Tidy ------------------------------------------------------------------------- # Visualize -------------------------------------------------------------------- # Model ------------------------------------------------------------------------ # Export ----------------------------------------------------------------------- # readr::write_rds(d, "")
/drafts/20200730_fotos_candidatos.R
no_license
Tai-Rocha/lives
R
false
false
1,129
r
#' Author: Julio Trecenti #' Subject: # library(tidyverse) library(magrittr) u <- "http://divulgacandcontas.tse.jus.br/divulga/#/candidato/2018/2022802018/BR/280000614517" r <- httr::GET(u) r %>% xml2::read_html() %>% xml2::xml_find_all("//img[@class='img-thumbnail img-responsive dvg-cand-foto ng-scope']") # pjs <- webdriver::run_phantomjs() # sess <- webdriver::Session$new(port = pjs$port) # sess$go(u) # sess$takeScreenshot() "RSelenium" library(reticulate) link <- xml2::read_html(py$obj) %>% xml2::xml_find_first("//img[@class='img-thumbnail img-responsive dvg-cand-foto ng-scope']") %>% xml2::xml_attr("src") httr::GET(link, httr::write_disk("data-raw/candidato.jpg")) # Import ----------------------------------------------------------------------- # Tidy ------------------------------------------------------------------------- # Visualize -------------------------------------------------------------------- # Model ------------------------------------------------------------------------ # Export ----------------------------------------------------------------------- # readr::write_rds(d, "")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/routesAtStop.R \name{namedList_RoutesAtStops} \alias{namedList_RoutesAtStops} \title{Faster function to simplify gtfsFunctions::routeIDAtStops() to include only one record per stop with a column containing a list of all routes at stop.} \usage{ namedList_RoutesAtStops(gtfsObject) } \arguments{ \item{gtfsObject}{stops table from gtfsFunctions::routeIDAtStops() or stops.txt with one row per route-stop combo. MUST have "route_id" column} } \value{ Names list of all routes at stop where stop is name and routes are values } \description{ Faster function to simplify gtfsFunctions::routeIDAtStops() to include only one record per stop with a column containing a list of all routes at stop. }
/man/namedList_RoutesAtStops.Rd
permissive
b-tomhave/gtfsFunctions
R
false
true
770
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/routesAtStop.R \name{namedList_RoutesAtStops} \alias{namedList_RoutesAtStops} \title{Faster function to simplify gtfsFunctions::routeIDAtStops() to include only one record per stop with a column containing a list of all routes at stop.} \usage{ namedList_RoutesAtStops(gtfsObject) } \arguments{ \item{gtfsObject}{stops table from gtfsFunctions::routeIDAtStops() or stops.txt with one row per route-stop combo. MUST have "route_id" column} } \value{ Names list of all routes at stop where stop is name and routes are values } \description{ Faster function to simplify gtfsFunctions::routeIDAtStops() to include only one record per stop with a column containing a list of all routes at stop. }
#!/usr/bin/env Rscript # installing WGCNA: #source("http://bioconductor.org/biocLite.R") #biocLite(c("AnnotationDbi", "impute", "GO.db", "preprocessCore")) #BiocManager::install(c("AnnotationDbi", "impute", "GO.db", "preprocessCore")) #install.packages("flashClust",repos="http://cran.us.r-project.org") #install.packages("WGCNA",dependencies=TRUE,repos="http://cran.us.r-project.org") #install.packages("scales") #install.packages("assertthat") #install.packages("ape") library(WGCNA) library(flashClust) library(ape) library("assertthat") library("scales") #setwd("~/Desktop/WGCNA_A_digitifera") setwd("/home/stenger/stenger_data/EPIGENETIC_DATA/08_RNA_Seq_link") # The following setting is important, do not omit. options(stringsAsFactors=FALSE) ##Read in the expr data set, from HT-Seq count ## https://labs.genetics.ucla.edu/horvath/htdocs/CoexpressionNetwork/Rpackages/WGCNA/faq.html ## We then recommend a variance-stabilizing transformation. For example, ## package DESeq2 implements the function varianceStabilizingTransformation which we have found useful, ## but one could also start with normalized counts (or RPKM/FPKM data) and log-transform them using log2(x+1). ## For highly expressed features, the differences between full variance stabilization and a simple log transformation are small. # ## If using Excel, the formalua is : =LOG(B2+1; 2) # ### Bash script in local ## ##awk '{print $2}' count_Control_1_30a.txt > count_Control_1_30a_col.txt ##awk '{print $2}' count_Control_1_30b.txt > count_Control_1_30b_col.txt ##awk '{print $2}' count_Control_1_30c.txt > count_Control_1_30c_col.txt ##awk '{print $2}' count_Control_2_35a.txt > count_Control_2_35a_col.txt ##awk '{print $2}' count_Control_2_35b.txt > count_Control_2_35b_col.txt ##awk '{print $2}' count_Control_2_35c.txt > count_Control_2_35c_col.txt ##awk '{print $2}' count_Control_3_30a.txt > count_Control_3_30a_col.txt ##awk '{print $2}' count_Control_3_30b.txt > count_Control_3_30b_col.txt ##awk '{print $2}' count_Control_3_30c.txt > count_Control_3_30c_col.txt ##awk '{print $2}' count_Acclimation_1_31_5a.txt > count_Acclimation_1_31_5a_col.txt ##awk '{print $2}' count_Acclimation_1_31_5b.txt > count_Acclimation_1_31_5b_col.txt ##awk '{print $2}' count_Acclimation_1_31_5c.txt > count_Acclimation_1_31_5c_col.txt ##awk '{print $2}' count_Acclimation_2_35a.txt > count_Acclimation_2_35a_col.txt ##awk '{print $2}' count_Acclimation_2_35b.txt > count_Acclimation_2_35b_col.txt ##awk '{print $2}' count_Acclimation_2_35c.txt > count_Acclimation_2_35c_col.txt ##awk '{print $2}' count_Acclimation_3_30a.txt > count_Acclimation_3_30a_col.txt ##awk '{print $2}' count_Acclimation_3_30b.txt > count_Acclimation_3_30b_col.txt ##awk '{print $2}' count_Acclimation_3_30c.txt > count_Acclimation_3_30c_col.txt ## ##awk '{print $1}' count_Control_1_30a.txt > names_genes_col.txt ## ##paste names_genes_col.txt count_Control_1_30a_col.txt count_Control_1_30b_col.txt count_Control_1_30c_col.txt count_Control_2_35a_col.txt count_Control_2_35b_col.txt count_Control_2_35c_col.txt count_Control_3_30a_col.txt count_Control_3_30b_col.txt count_Control_3_30c_col.txt count_Acclimation_1_31_5a_col.txt count_Acclimation_1_31_5b_col.txt count_Acclimation_1_31_5c_col.txt count_Acclimation_2_35a_col.txt count_Acclimation_2_35b_col.txt count_Acclimation_2_35c_col.txt count_Acclimation_3_30a_col.txt count_Acclimation_3_30b_col.txt count_Acclimation_3_30c_col.txt > all_count.txt ## ##sed -i '1s/^/Genes Control_1_30a Control_1_30b Control_1_30c Control_2_35a Control_2_35b Control_2_35c Control_3_30a Control_3_30b Control_3_30c Acclimation_1_31_5a Acclimation_1_31_5b Acclimation_1_31_5c Acclimation_2_35a Acclimation_2_35b Acclimation_2_35c Acclimation_3_30a Acclimation_3_30b Acclimation_3_30c\n/' all_count.txt ## ### header: ### Genes Control_1_30a Control_1_30b Control_1_30c Control_2_35a Control_2_35b Control_2_35c Control_3_30a Control_3_30b Control_3_30c Acclimation_1_31_5a Acclimation_1_31_5b Acclimation_1_31_5c Acclimation_2_35a Acclimation_2_35b Acclimation_2_35c Acclimation_3_30a Acclimation_3_30b Acclimation_3_30c # # ## And then in excel: ## =LOG(B2+1; 2) # #logcpm_test<-read.table("logcpm.txt", header=T) ## Take a quick look at what is in the data set: #dim(logcpm_test) #head(logcpm_test) # # ## We first check for genes and samples with too many missing values #datExpr0 = as.data.frame(t(logcpm_test)) #gsg = goodSamplesGenes(datExpr0, verbose = 3); #gsg$allOK #If the last statement returns TRUE, all genes have passed the cuts # ## If not, we remove the offending genes and samples from the data : #if (!gsg$allOK) #{ # # Optionally, print the gene and sample names that were removed: # if (sum(!gsg$goodGenes)>0) # printFlush(paste("Removing genes:", paste(names(datExpr0)[!gsg$goodGenes], collapse = ", "))); # if (sum(!gsg$goodSamples)>0) # printFlush(paste("Removing samples:", paste(rownames(datExpr0)[!gsg$goodSamples], collapse = ", "))); # # Remove the offending genes and samples from the data: # datExpr0 = datExpr0[gsg$goodSamples, gsg$goodGenes] #} # # # ## Next we cluster the samples (in contrast to clustering genes that will come later) to see if there are any obvious outliers. #sampleTree = hclust(dist(datExpr0), method = "average"); ## Plot the sample tree: Open a graphic output window of size 12 by 9 inches ## The user should change the dimensions if the window is too large or too small. #sizeGrWindow(12,9) ##pdf(file = "Plots/sampleClustering.pdf", width = 12, height = 9); #par(cex = 0.6); #par(mar = c(0,4,2,0)) #plot(sampleTree, main = "Sample clustering to detect outliers", sub="", xlab="", cex.lab = 1.5, # cex.axis = 1.5, cex.main = 2) # #plot(sampleTree, main = "Sample clustering", sub="", xlab="", cex.lab = 1.5, # cex.axis = 1.5, cex.main = 2) # # # # # # ## Plot a line to show the cut #abline(h = 400, col = "red"); ## Determine cluster under the line #clust = cutreeStatic(sampleTree, cutHeight = 400, minSize = 10) #table(clust) ## clust 1 contains the samples we want to keep. #keepSamples = (clust==1) #datExpr = datExpr0[keepSamples, ] #nGenes = ncol(datExpr) #nSamples = nrow(datExpr) # #variancedatExpr=as.vector(apply(as.matrix(datExpr),2,var, na.rm=T)) #no.presentdatExpr=as.vector(apply(!is.na(as.matrix(datExpr)),2, sum) ) ## Another way of summarizing the number of pressent entries #table(no.presentdatExpr) # ## Keep only genes whose variance is non-zero and have at least 4 present entries #KeepGenes= variancedatExpr>0.05 #table(KeepGenes) #datExpr=datExpr[, KeepGenes] ##View(datExpr) #name_datExpr <-colnames(datExpr) #head(name_datExpr) # # # # #allTraits = read.table("trait.txt", header=T); #names(allTraits) # ## Form a data frame analogous to expression data that will hold the clinical traits. # #temp = rownames(datExpr); #traitRows = match(temp, allTraits$Ind); #datTraits = allTraits[traitRows, -1]; #rownames(datTraits) = allTraits[traitRows, 1]; #str(datTraits) # #collectGarbage(); # # ## Re-cluster samples #sampleTree2 = hclust(dist(datExpr), method = "average") ## Convert traits to a color representation: white means low, red means high, grey means missing entry #traitColors = numbers2colors(datTraits,signed= FALSE); ## Plot the sample dendrogram and the colors underneath. #pdf("dendo_heatmap.pdf",width=12,height=9) #par(mar=c(1, 10, 1, 1)) #plotDendroAndColors(sampleTree2, traitColors, # groupLabels = names(datTraits), # main = "Sample dendrogram and trait heatmap") # #dev.off() # # #save(datExpr, datTraits, file = "dataInput_subset.Rda") # # ########################### Module construction step-by-step ################################# # # # ##setting is important, do not omit. #options(stringsAsFactors = FALSE); ## Allow multi-threading within WGCNA. At present this call is necessary. ## Any error here may be ignored but you may want to update WGCNA if you see one. ## Caution: skip this line if you run RStudio or other third-party R environments. ## See note above. ##enableWGCNAThreads() ## Load the data saved in the first part #lnames = load(file = "dataInput_subset.Rda"); ##The variable lnames contains the names of loaded variables. #lnames # ## Very important for sft !! #allowWGCNAThreads() # #load(file = "dataInput_subset.Rda") ## Choose a set of soft-thresholding powers #powers = c(c(1:10), seq(from = 10, to=24, by=2)) ## Call the network topology analysis function #sft = pickSoftThreshold(datExpr, powerVector = powers, verbose = 5,networkType="signed") ## Plot the results: #sizeGrWindow(9, 5) #par(mfrow = c(1,2)); #cex1 = 0.9; ## Scale-free topology fit index as a function of the soft-thresholding power #plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2], # xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n", # main = paste("Scale independence")); #text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2], # labels=powers,cex=cex1,col="red"); ## this line corresponds to using an R^2 cut-off of h #abline(h=0.84,col="red") ## Mean connectivity as a function of the soft-thresholding power #plot(sft$fitIndices[,1], sft$fitIndices[,5], # xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n", # main = paste("Mean connectivity")) #text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red") #save(sft,file="sft_signed.Rda") #View(sft$fitIndices) #pickSoftThreshold( # datExpr, # dataIsExpr = TRUE, # weights = NULL, # RsquaredCut = 0.85, # powerVector = c(seq(1, 10, by = 1), seq(12, 20, by = 2)), # removeFirst = FALSE, nBreaks = 10, blockSize = NULL, # corFnc = cor, corOptions = list(use = 'p'), # networkType = "unsigned", # moreNetworkConcepts = FALSE, # gcInterval = NULL, # verbose = 0, indent = 0) #softPower = 20; #reached 90 R2 # Here 20 = 0.80237680 # Error in cor_mat^power : non-numeric argument to binary operator #adjacency = adjacency(datExpr, power = "softPower", type="signed"); # Error in cor_mat^power : non-numeric argument to binary operator # --> Error in cor_mat^power : non-numeric argument to binary operator ##################### # making modules load(file = "dataInput_subset.Rda") load(file = "sft_signed.Rda") #s.th=18 # re-specify according to previous section s.th=20 # re-specify according to previous section # the following two lines take a long time, prepare to wait 15-20 min # (if you don;t want to wait but proceed with exercise, skip to next section - we have results of this one recorded already) adjacency = adjacency(datExpr, power = s.th, type="signed"); save(adjacency,file="adjacency.RData") TOM = TOMsimilarity(adjacency,TOMType="signed"); dissTOM = 1-TOM save(adjacency, TOM, dissTOM, file="adjacency_2.RData") # Call the hierarchical clustering function geneTree = flashClust(as.dist(dissTOM), method = "average"); # We like large modules, so we set the minimum module size relatively high: minModuleSize = 30; dynamicMods = cutreeDynamic(dendro = geneTree, distM = dissTOM, deepSplit = 2, pamRespectsDendro = FALSE, minClusterSize = minModuleSize); dynamicColors = labels2colors(dynamicMods) table(dynamicColors) datt = datExpr # Calculate eigengenes MEList = moduleEigengenes(datt, colors = dynamicColors) MEs = MEList$eigengenes # Calculate dissimilarity of module eigengenes MEDiss = 1-cor(MEs); METree = flashClust(as.dist(MEDiss), method = "average"); save(dynamicMods,dynamicColors,MEs,METree,geneTree,file="1stPassModules.RData") ######################### # merging modules: mm=load('1stPassModules.RData') mm library(WGCNA) lnames=load('wgcnaData.RData') # traits # head(datt) quartz() MEDissThres = 0.35 # in the first pass, set this to 0 - no merging (we want to see the module-traits heatmap first, then decide which modules are telling us the same story and better be merged) sizeGrWindow(7, 6) plot(METree, main = "Clustering of module eigengenes", xlab = "", sub = "") # Plot the cut line into the dendrogram abline(h=MEDissThres, col = "red") # on 2nd pass: does this cut height meet your merging goals? If not, reset MEDissThres and replot # Call an automatic merging function merge = mergeCloseModules(datt, dynamicColors, cutHeight = MEDissThres, verbose = 3) # The merged module colors mergedColors = merge$colors; # Eigengenes of the new merged modules: mergedMEs = merge$newMEs # plotting the fabulous ridiculogram quartz() plotDendroAndColors(geneTree, cbind(dynamicColors, mergedColors), c("Dynamic Tree Cut", "Merged dynamic"), dendroLabels = FALSE, hang = 0.03, addGuide = FALSE, guideHang = 0.05,lwd=0.3) # Rename to moduleColors moduleColors = mergedColors # Construct numerical labels corresponding to the colors colorOrder = c("grey", standardColors(50)); moduleLabels = match(moduleColors, colorOrder)-1; MEs = mergedMEs; # Calculate dissimilarity of module eigengenes quartz() MEDiss = 1-cor(MEs); # Cluster module eigengenes METree = flashClust(as.dist(MEDiss), method = "average"); # Plot the result sizeGrWindow(7, 6) plot(METree, main = "Clustering of module eigengenes", xlab = "", sub = "") # how many genes in each module? table(moduleColors) # Save module colors and labels for use in subsequent parts save(MEs, geneTree, moduleLabels, moduleColors, file = "networkdata_signed.RData") ################### # plotting correlations with traits: load(file = "networkdata_signed.RData") load(file = "wgcnaData.RData") # Define numbers of genes and samples nGenes = ncol(datt); nSamples = nrow(datt); # Recalculate MEs with color labels MEs0 = moduleEigengenes(datt, moduleColors)$eigengenes MEs = orderMEs(MEs0) # correlations of genes with eigengenes moduleGeneCor=cor(MEs,datt) moduleGenePvalue = corPvalueStudent(moduleGeneCor, nSamples); moduleTraitCor = cor(MEs, traits, use = "p"); moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples); # gene-trait correlations - a gene-by-gene heatmap corresponding to the droopy tree # (to make augmented ridiculogram as in mice-men-embryos paper) # quartz() # geneTraitCor = cor(datt, traits, use = "p"); # colnames(geneTraitCor) # geneTraitCor=geneTraitCor[geneTree$order,] # head(geneTraitCor) # labeledHeatmap(Matrix = geneTraitCor, # xLabels = colnames(geneTraitCor), # xLabelsAngle=90, # ySymbols = FALSE, # colorLabels = FALSE, # colors = blueWhiteRed(50), # setStdMargins = FALSE, cex.text = 0.5, # zlim = c(-1,1), # main = paste("Gene-trait relationships")) # module-trait correlations quartz() textMatrix = paste(signif(moduleTraitCor, 2), "\n(", signif(moduleTraitPvalue, 1), ")", sep = ""); dim(textMatrix) = dim(moduleTraitCor) par(mar = c(6, 8.5, 3, 3)); # Display the correlation values within a heatmap plot labeledHeatmap(Matrix = moduleTraitCor, xLabels = names(traits), yLabels = names(MEs), ySymbols = names(MEs), colorLabels = FALSE, colors = blueWhiteRed(50), textMatrix = textMatrix, setStdMargins = FALSE, cex.text = 0.5, zlim = c(-1,1), main = paste("Module-trait relationships")) print(data.frame(table(moduleColors))) # gives numbers of genes in each module # if it was first pass with no module merging, this is where you examine your heatmap # and dendrogram of module eigengenes to see where you would like to see # where you woudl like to set cut height (MEDissThres parameter) in the previous section # to merge modules that are talling the same story for your trait data # good way to do it is to find a group of similar modules in the heat map and then see # at which tree height they connect in the dendrogram. ############# # scatterplots of gene significance (correlation-based) vs kME load(file = "networkdata_signed.RData") load(file = "wgcnaData.RData"); traits table(moduleColors) whichTrait="Stress" nGenes = ncol(datt); nSamples = nrow(datt); selTrait = as.data.frame(traits[,whichTrait]); names(selTrait) = whichTrait # names (colors) of the modules modNames = substring(names(MEs), 3) geneModuleMembership = as.data.frame(signedKME(datt, MEs)); MMPvalue = as.data.frame(corPvalueStudent(as.matrix(geneModuleMembership), nSamples)); names(geneModuleMembership) = paste("MM", modNames, sep=""); names(MMPvalue) = paste("p.MM", modNames, sep=""); geneTraitSignificance = as.data.frame(cor(datt, selTrait, use = "p")); GSPvalue = as.data.frame(corPvalueStudent(as.matrix(geneTraitSignificance), nSamples)); names(geneTraitSignificance) = paste("GS.", names(selTrait), sep=""); names(GSPvalue) = paste("p.GS.", names(selTrait), sep=""); quartz() par(mfrow=c(3,3)) counter=0 for(module in modNames[1:length(modNames)]){ counter=counter+1 if (counter>9) { quartz() par(mfrow=c(3,3)) counter=1 } column = match(module, modNames); moduleGenes = moduleColors==module; #trr="heat resistance" verboseScatterplot(abs(geneModuleMembership[moduleGenes, column]), abs(geneTraitSignificance[moduleGenes, 1]), xlab = paste(module,"module membership"), ylab = paste("GS for", whichTrait), col = "grey50",mgp=c(2.3,1,0)) } ################ # eigengene-heatmap plot (sanity check - is the whole module driven by just one crazy sample?) # note: this part does not make much sense for unsigned modules load(file = "networkdata_signed.RData") load(file = "wgcnaData.RData"); which.module="darkturquoise" datME=MEs datExpr=datt quartz() ME=datME[, paste("ME",which.module, sep="")] par(mfrow=c(2,1), mar=c(0.3, 5.5, 3, 2)) plotMat(t(scale(datExpr[,moduleColors==which.module ]) ), nrgcols=30,rlabels=F,rcols=which.module, main=which.module, cex.main=2) par(mar=c(5, 4.2, 0, 0.7)) barplot(ME, col=which.module, main="", cex.main=2, ylab="eigengene expression",xlab="sample") length(datExpr[1,moduleColors==which.module ]) # number of genes in chosen module ################# # saving selected modules for GO and KOG analysis (two-parts: Fisher test, MWU test within-module) library(WGCNA) load(file = "networkdata_signed.RData") # moduleColors, MEs load(file = "wgcnaData.RData") # vsd table load(file = "../data4wgcna.RData") # vsd table # calculating modul memberships for all genes for all modules allkME =as.data.frame(signedKME(datt, MEs)) names(allkME)=gsub("kME","",names(allkME)) whichModule="darkslateblue" table(moduleColors==whichModule) # how many genes are in it? # Saving data for Fisher-MWU combo test (GO_MWU) inModuleBinary=as.numeric(moduleColors==whichModule) combo=data.frame("gene"=row.names(vsd.wg),"Fish_kME"=allkME[,whichModule]*inModuleBinary) write.csv(combo,file=paste(whichModule,".csv",sep=""),row.names=F,quote=F) ################ # plotting heatmap for named top-kME genes load(file = "networkdata_signed.RData") load(file = "wgcnaData.RData"); allkME =as.data.frame(signedKME(datt, MEs)) gg=read.table("../heatmaps/amil_iso2gene.tab",sep="\t") library(pheatmap) whichModule="darkslateblue" top=30 # number of named top-kME genes to plot datME=MEs datExpr=datt modcol=paste("kME",whichModule,sep="") sorted=vsd.wg[order(allkME[,modcol],decreasing=T),] head(sorted) # selection top N names genes, attaching gene names gnames=c();counts=0;hubs=c() for(i in 1:length(sorted[,1])) { if (row.names(sorted)[i] %in% gg$V1) { counts=counts+1 gn=gg[gg$V1==row.names(sorted)[i],2] gn=paste(gn,row.names(sorted)[i],sep=".") if (gn %in% gnames) { gn=paste(gn,counts,sep=".") } gnames=append(gnames,gn) hubs=data.frame(rbind(hubs,sorted[i,])) if (counts==top) {break} } } row.names(hubs)=gnames contrasting = colorRampPalette(rev(c("chocolate1","#FEE090","grey10", "cyan3","cyan")))(100) contrasting2 = colorRampPalette(rev(c("chocolate1","chocolate1","#FEE090","grey10", "cyan3","cyan","cyan")))(100) contrasting3 = colorRampPalette(rev(c("chocolate1","#FEE090","grey10", "cyan3","cyan","cyan")))(100) pheatmap(hubs,scale="row",col=contrasting2,border_color=NA,treeheight_col=0,cex=0.9,cluster_rows=F)
/00_scripts/17_WGCNA_on_RNA_Seq.R
no_license
PLStenger/Acropora_digitifera_BS_Seq
R
false
false
20,374
r
#!/usr/bin/env Rscript # installing WGCNA: #source("http://bioconductor.org/biocLite.R") #biocLite(c("AnnotationDbi", "impute", "GO.db", "preprocessCore")) #BiocManager::install(c("AnnotationDbi", "impute", "GO.db", "preprocessCore")) #install.packages("flashClust",repos="http://cran.us.r-project.org") #install.packages("WGCNA",dependencies=TRUE,repos="http://cran.us.r-project.org") #install.packages("scales") #install.packages("assertthat") #install.packages("ape") library(WGCNA) library(flashClust) library(ape) library("assertthat") library("scales") #setwd("~/Desktop/WGCNA_A_digitifera") setwd("/home/stenger/stenger_data/EPIGENETIC_DATA/08_RNA_Seq_link") # The following setting is important, do not omit. options(stringsAsFactors=FALSE) ##Read in the expr data set, from HT-Seq count ## https://labs.genetics.ucla.edu/horvath/htdocs/CoexpressionNetwork/Rpackages/WGCNA/faq.html ## We then recommend a variance-stabilizing transformation. For example, ## package DESeq2 implements the function varianceStabilizingTransformation which we have found useful, ## but one could also start with normalized counts (or RPKM/FPKM data) and log-transform them using log2(x+1). ## For highly expressed features, the differences between full variance stabilization and a simple log transformation are small. # ## If using Excel, the formalua is : =LOG(B2+1; 2) # ### Bash script in local ## ##awk '{print $2}' count_Control_1_30a.txt > count_Control_1_30a_col.txt ##awk '{print $2}' count_Control_1_30b.txt > count_Control_1_30b_col.txt ##awk '{print $2}' count_Control_1_30c.txt > count_Control_1_30c_col.txt ##awk '{print $2}' count_Control_2_35a.txt > count_Control_2_35a_col.txt ##awk '{print $2}' count_Control_2_35b.txt > count_Control_2_35b_col.txt ##awk '{print $2}' count_Control_2_35c.txt > count_Control_2_35c_col.txt ##awk '{print $2}' count_Control_3_30a.txt > count_Control_3_30a_col.txt ##awk '{print $2}' count_Control_3_30b.txt > count_Control_3_30b_col.txt ##awk '{print $2}' count_Control_3_30c.txt > count_Control_3_30c_col.txt ##awk '{print $2}' count_Acclimation_1_31_5a.txt > count_Acclimation_1_31_5a_col.txt ##awk '{print $2}' count_Acclimation_1_31_5b.txt > count_Acclimation_1_31_5b_col.txt ##awk '{print $2}' count_Acclimation_1_31_5c.txt > count_Acclimation_1_31_5c_col.txt ##awk '{print $2}' count_Acclimation_2_35a.txt > count_Acclimation_2_35a_col.txt ##awk '{print $2}' count_Acclimation_2_35b.txt > count_Acclimation_2_35b_col.txt ##awk '{print $2}' count_Acclimation_2_35c.txt > count_Acclimation_2_35c_col.txt ##awk '{print $2}' count_Acclimation_3_30a.txt > count_Acclimation_3_30a_col.txt ##awk '{print $2}' count_Acclimation_3_30b.txt > count_Acclimation_3_30b_col.txt ##awk '{print $2}' count_Acclimation_3_30c.txt > count_Acclimation_3_30c_col.txt ## ##awk '{print $1}' count_Control_1_30a.txt > names_genes_col.txt ## ##paste names_genes_col.txt count_Control_1_30a_col.txt count_Control_1_30b_col.txt count_Control_1_30c_col.txt count_Control_2_35a_col.txt count_Control_2_35b_col.txt count_Control_2_35c_col.txt count_Control_3_30a_col.txt count_Control_3_30b_col.txt count_Control_3_30c_col.txt count_Acclimation_1_31_5a_col.txt count_Acclimation_1_31_5b_col.txt count_Acclimation_1_31_5c_col.txt count_Acclimation_2_35a_col.txt count_Acclimation_2_35b_col.txt count_Acclimation_2_35c_col.txt count_Acclimation_3_30a_col.txt count_Acclimation_3_30b_col.txt count_Acclimation_3_30c_col.txt > all_count.txt ## ##sed -i '1s/^/Genes Control_1_30a Control_1_30b Control_1_30c Control_2_35a Control_2_35b Control_2_35c Control_3_30a Control_3_30b Control_3_30c Acclimation_1_31_5a Acclimation_1_31_5b Acclimation_1_31_5c Acclimation_2_35a Acclimation_2_35b Acclimation_2_35c Acclimation_3_30a Acclimation_3_30b Acclimation_3_30c\n/' all_count.txt ## ### header: ### Genes Control_1_30a Control_1_30b Control_1_30c Control_2_35a Control_2_35b Control_2_35c Control_3_30a Control_3_30b Control_3_30c Acclimation_1_31_5a Acclimation_1_31_5b Acclimation_1_31_5c Acclimation_2_35a Acclimation_2_35b Acclimation_2_35c Acclimation_3_30a Acclimation_3_30b Acclimation_3_30c # # ## And then in excel: ## =LOG(B2+1; 2) # #logcpm_test<-read.table("logcpm.txt", header=T) ## Take a quick look at what is in the data set: #dim(logcpm_test) #head(logcpm_test) # # ## We first check for genes and samples with too many missing values #datExpr0 = as.data.frame(t(logcpm_test)) #gsg = goodSamplesGenes(datExpr0, verbose = 3); #gsg$allOK #If the last statement returns TRUE, all genes have passed the cuts # ## If not, we remove the offending genes and samples from the data : #if (!gsg$allOK) #{ # # Optionally, print the gene and sample names that were removed: # if (sum(!gsg$goodGenes)>0) # printFlush(paste("Removing genes:", paste(names(datExpr0)[!gsg$goodGenes], collapse = ", "))); # if (sum(!gsg$goodSamples)>0) # printFlush(paste("Removing samples:", paste(rownames(datExpr0)[!gsg$goodSamples], collapse = ", "))); # # Remove the offending genes and samples from the data: # datExpr0 = datExpr0[gsg$goodSamples, gsg$goodGenes] #} # # # ## Next we cluster the samples (in contrast to clustering genes that will come later) to see if there are any obvious outliers. #sampleTree = hclust(dist(datExpr0), method = "average"); ## Plot the sample tree: Open a graphic output window of size 12 by 9 inches ## The user should change the dimensions if the window is too large or too small. #sizeGrWindow(12,9) ##pdf(file = "Plots/sampleClustering.pdf", width = 12, height = 9); #par(cex = 0.6); #par(mar = c(0,4,2,0)) #plot(sampleTree, main = "Sample clustering to detect outliers", sub="", xlab="", cex.lab = 1.5, # cex.axis = 1.5, cex.main = 2) # #plot(sampleTree, main = "Sample clustering", sub="", xlab="", cex.lab = 1.5, # cex.axis = 1.5, cex.main = 2) # # # # # # ## Plot a line to show the cut #abline(h = 400, col = "red"); ## Determine cluster under the line #clust = cutreeStatic(sampleTree, cutHeight = 400, minSize = 10) #table(clust) ## clust 1 contains the samples we want to keep. #keepSamples = (clust==1) #datExpr = datExpr0[keepSamples, ] #nGenes = ncol(datExpr) #nSamples = nrow(datExpr) # #variancedatExpr=as.vector(apply(as.matrix(datExpr),2,var, na.rm=T)) #no.presentdatExpr=as.vector(apply(!is.na(as.matrix(datExpr)),2, sum) ) ## Another way of summarizing the number of pressent entries #table(no.presentdatExpr) # ## Keep only genes whose variance is non-zero and have at least 4 present entries #KeepGenes= variancedatExpr>0.05 #table(KeepGenes) #datExpr=datExpr[, KeepGenes] ##View(datExpr) #name_datExpr <-colnames(datExpr) #head(name_datExpr) # # # # #allTraits = read.table("trait.txt", header=T); #names(allTraits) # ## Form a data frame analogous to expression data that will hold the clinical traits. # #temp = rownames(datExpr); #traitRows = match(temp, allTraits$Ind); #datTraits = allTraits[traitRows, -1]; #rownames(datTraits) = allTraits[traitRows, 1]; #str(datTraits) # #collectGarbage(); # # ## Re-cluster samples #sampleTree2 = hclust(dist(datExpr), method = "average") ## Convert traits to a color representation: white means low, red means high, grey means missing entry #traitColors = numbers2colors(datTraits,signed= FALSE); ## Plot the sample dendrogram and the colors underneath. #pdf("dendo_heatmap.pdf",width=12,height=9) #par(mar=c(1, 10, 1, 1)) #plotDendroAndColors(sampleTree2, traitColors, # groupLabels = names(datTraits), # main = "Sample dendrogram and trait heatmap") # #dev.off() # # #save(datExpr, datTraits, file = "dataInput_subset.Rda") # # ########################### Module construction step-by-step ################################# # # # ##setting is important, do not omit. #options(stringsAsFactors = FALSE); ## Allow multi-threading within WGCNA. At present this call is necessary. ## Any error here may be ignored but you may want to update WGCNA if you see one. ## Caution: skip this line if you run RStudio or other third-party R environments. ## See note above. ##enableWGCNAThreads() ## Load the data saved in the first part #lnames = load(file = "dataInput_subset.Rda"); ##The variable lnames contains the names of loaded variables. #lnames # ## Very important for sft !! #allowWGCNAThreads() # #load(file = "dataInput_subset.Rda") ## Choose a set of soft-thresholding powers #powers = c(c(1:10), seq(from = 10, to=24, by=2)) ## Call the network topology analysis function #sft = pickSoftThreshold(datExpr, powerVector = powers, verbose = 5,networkType="signed") ## Plot the results: #sizeGrWindow(9, 5) #par(mfrow = c(1,2)); #cex1 = 0.9; ## Scale-free topology fit index as a function of the soft-thresholding power #plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2], # xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n", # main = paste("Scale independence")); #text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2], # labels=powers,cex=cex1,col="red"); ## this line corresponds to using an R^2 cut-off of h #abline(h=0.84,col="red") ## Mean connectivity as a function of the soft-thresholding power #plot(sft$fitIndices[,1], sft$fitIndices[,5], # xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n", # main = paste("Mean connectivity")) #text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red") #save(sft,file="sft_signed.Rda") #View(sft$fitIndices) #pickSoftThreshold( # datExpr, # dataIsExpr = TRUE, # weights = NULL, # RsquaredCut = 0.85, # powerVector = c(seq(1, 10, by = 1), seq(12, 20, by = 2)), # removeFirst = FALSE, nBreaks = 10, blockSize = NULL, # corFnc = cor, corOptions = list(use = 'p'), # networkType = "unsigned", # moreNetworkConcepts = FALSE, # gcInterval = NULL, # verbose = 0, indent = 0) #softPower = 20; #reached 90 R2 # Here 20 = 0.80237680 # Error in cor_mat^power : non-numeric argument to binary operator #adjacency = adjacency(datExpr, power = "softPower", type="signed"); # Error in cor_mat^power : non-numeric argument to binary operator # --> Error in cor_mat^power : non-numeric argument to binary operator ##################### # making modules load(file = "dataInput_subset.Rda") load(file = "sft_signed.Rda") #s.th=18 # re-specify according to previous section s.th=20 # re-specify according to previous section # the following two lines take a long time, prepare to wait 15-20 min # (if you don;t want to wait but proceed with exercise, skip to next section - we have results of this one recorded already) adjacency = adjacency(datExpr, power = s.th, type="signed"); save(adjacency,file="adjacency.RData") TOM = TOMsimilarity(adjacency,TOMType="signed"); dissTOM = 1-TOM save(adjacency, TOM, dissTOM, file="adjacency_2.RData") # Call the hierarchical clustering function geneTree = flashClust(as.dist(dissTOM), method = "average"); # We like large modules, so we set the minimum module size relatively high: minModuleSize = 30; dynamicMods = cutreeDynamic(dendro = geneTree, distM = dissTOM, deepSplit = 2, pamRespectsDendro = FALSE, minClusterSize = minModuleSize); dynamicColors = labels2colors(dynamicMods) table(dynamicColors) datt = datExpr # Calculate eigengenes MEList = moduleEigengenes(datt, colors = dynamicColors) MEs = MEList$eigengenes # Calculate dissimilarity of module eigengenes MEDiss = 1-cor(MEs); METree = flashClust(as.dist(MEDiss), method = "average"); save(dynamicMods,dynamicColors,MEs,METree,geneTree,file="1stPassModules.RData") ######################### # merging modules: mm=load('1stPassModules.RData') mm library(WGCNA) lnames=load('wgcnaData.RData') # traits # head(datt) quartz() MEDissThres = 0.35 # in the first pass, set this to 0 - no merging (we want to see the module-traits heatmap first, then decide which modules are telling us the same story and better be merged) sizeGrWindow(7, 6) plot(METree, main = "Clustering of module eigengenes", xlab = "", sub = "") # Plot the cut line into the dendrogram abline(h=MEDissThres, col = "red") # on 2nd pass: does this cut height meet your merging goals? If not, reset MEDissThres and replot # Call an automatic merging function merge = mergeCloseModules(datt, dynamicColors, cutHeight = MEDissThres, verbose = 3) # The merged module colors mergedColors = merge$colors; # Eigengenes of the new merged modules: mergedMEs = merge$newMEs # plotting the fabulous ridiculogram quartz() plotDendroAndColors(geneTree, cbind(dynamicColors, mergedColors), c("Dynamic Tree Cut", "Merged dynamic"), dendroLabels = FALSE, hang = 0.03, addGuide = FALSE, guideHang = 0.05,lwd=0.3) # Rename to moduleColors moduleColors = mergedColors # Construct numerical labels corresponding to the colors colorOrder = c("grey", standardColors(50)); moduleLabels = match(moduleColors, colorOrder)-1; MEs = mergedMEs; # Calculate dissimilarity of module eigengenes quartz() MEDiss = 1-cor(MEs); # Cluster module eigengenes METree = flashClust(as.dist(MEDiss), method = "average"); # Plot the result sizeGrWindow(7, 6) plot(METree, main = "Clustering of module eigengenes", xlab = "", sub = "") # how many genes in each module? table(moduleColors) # Save module colors and labels for use in subsequent parts save(MEs, geneTree, moduleLabels, moduleColors, file = "networkdata_signed.RData") ################### # plotting correlations with traits: load(file = "networkdata_signed.RData") load(file = "wgcnaData.RData") # Define numbers of genes and samples nGenes = ncol(datt); nSamples = nrow(datt); # Recalculate MEs with color labels MEs0 = moduleEigengenes(datt, moduleColors)$eigengenes MEs = orderMEs(MEs0) # correlations of genes with eigengenes moduleGeneCor=cor(MEs,datt) moduleGenePvalue = corPvalueStudent(moduleGeneCor, nSamples); moduleTraitCor = cor(MEs, traits, use = "p"); moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples); # gene-trait correlations - a gene-by-gene heatmap corresponding to the droopy tree # (to make augmented ridiculogram as in mice-men-embryos paper) # quartz() # geneTraitCor = cor(datt, traits, use = "p"); # colnames(geneTraitCor) # geneTraitCor=geneTraitCor[geneTree$order,] # head(geneTraitCor) # labeledHeatmap(Matrix = geneTraitCor, # xLabels = colnames(geneTraitCor), # xLabelsAngle=90, # ySymbols = FALSE, # colorLabels = FALSE, # colors = blueWhiteRed(50), # setStdMargins = FALSE, cex.text = 0.5, # zlim = c(-1,1), # main = paste("Gene-trait relationships")) # module-trait correlations quartz() textMatrix = paste(signif(moduleTraitCor, 2), "\n(", signif(moduleTraitPvalue, 1), ")", sep = ""); dim(textMatrix) = dim(moduleTraitCor) par(mar = c(6, 8.5, 3, 3)); # Display the correlation values within a heatmap plot labeledHeatmap(Matrix = moduleTraitCor, xLabels = names(traits), yLabels = names(MEs), ySymbols = names(MEs), colorLabels = FALSE, colors = blueWhiteRed(50), textMatrix = textMatrix, setStdMargins = FALSE, cex.text = 0.5, zlim = c(-1,1), main = paste("Module-trait relationships")) print(data.frame(table(moduleColors))) # gives numbers of genes in each module # if it was first pass with no module merging, this is where you examine your heatmap # and dendrogram of module eigengenes to see where you would like to see # where you woudl like to set cut height (MEDissThres parameter) in the previous section # to merge modules that are talling the same story for your trait data # good way to do it is to find a group of similar modules in the heat map and then see # at which tree height they connect in the dendrogram. ############# # scatterplots of gene significance (correlation-based) vs kME load(file = "networkdata_signed.RData") load(file = "wgcnaData.RData"); traits table(moduleColors) whichTrait="Stress" nGenes = ncol(datt); nSamples = nrow(datt); selTrait = as.data.frame(traits[,whichTrait]); names(selTrait) = whichTrait # names (colors) of the modules modNames = substring(names(MEs), 3) geneModuleMembership = as.data.frame(signedKME(datt, MEs)); MMPvalue = as.data.frame(corPvalueStudent(as.matrix(geneModuleMembership), nSamples)); names(geneModuleMembership) = paste("MM", modNames, sep=""); names(MMPvalue) = paste("p.MM", modNames, sep=""); geneTraitSignificance = as.data.frame(cor(datt, selTrait, use = "p")); GSPvalue = as.data.frame(corPvalueStudent(as.matrix(geneTraitSignificance), nSamples)); names(geneTraitSignificance) = paste("GS.", names(selTrait), sep=""); names(GSPvalue) = paste("p.GS.", names(selTrait), sep=""); quartz() par(mfrow=c(3,3)) counter=0 for(module in modNames[1:length(modNames)]){ counter=counter+1 if (counter>9) { quartz() par(mfrow=c(3,3)) counter=1 } column = match(module, modNames); moduleGenes = moduleColors==module; #trr="heat resistance" verboseScatterplot(abs(geneModuleMembership[moduleGenes, column]), abs(geneTraitSignificance[moduleGenes, 1]), xlab = paste(module,"module membership"), ylab = paste("GS for", whichTrait), col = "grey50",mgp=c(2.3,1,0)) } ################ # eigengene-heatmap plot (sanity check - is the whole module driven by just one crazy sample?) # note: this part does not make much sense for unsigned modules load(file = "networkdata_signed.RData") load(file = "wgcnaData.RData"); which.module="darkturquoise" datME=MEs datExpr=datt quartz() ME=datME[, paste("ME",which.module, sep="")] par(mfrow=c(2,1), mar=c(0.3, 5.5, 3, 2)) plotMat(t(scale(datExpr[,moduleColors==which.module ]) ), nrgcols=30,rlabels=F,rcols=which.module, main=which.module, cex.main=2) par(mar=c(5, 4.2, 0, 0.7)) barplot(ME, col=which.module, main="", cex.main=2, ylab="eigengene expression",xlab="sample") length(datExpr[1,moduleColors==which.module ]) # number of genes in chosen module ################# # saving selected modules for GO and KOG analysis (two-parts: Fisher test, MWU test within-module) library(WGCNA) load(file = "networkdata_signed.RData") # moduleColors, MEs load(file = "wgcnaData.RData") # vsd table load(file = "../data4wgcna.RData") # vsd table # calculating modul memberships for all genes for all modules allkME =as.data.frame(signedKME(datt, MEs)) names(allkME)=gsub("kME","",names(allkME)) whichModule="darkslateblue" table(moduleColors==whichModule) # how many genes are in it? # Saving data for Fisher-MWU combo test (GO_MWU) inModuleBinary=as.numeric(moduleColors==whichModule) combo=data.frame("gene"=row.names(vsd.wg),"Fish_kME"=allkME[,whichModule]*inModuleBinary) write.csv(combo,file=paste(whichModule,".csv",sep=""),row.names=F,quote=F) ################ # plotting heatmap for named top-kME genes load(file = "networkdata_signed.RData") load(file = "wgcnaData.RData"); allkME =as.data.frame(signedKME(datt, MEs)) gg=read.table("../heatmaps/amil_iso2gene.tab",sep="\t") library(pheatmap) whichModule="darkslateblue" top=30 # number of named top-kME genes to plot datME=MEs datExpr=datt modcol=paste("kME",whichModule,sep="") sorted=vsd.wg[order(allkME[,modcol],decreasing=T),] head(sorted) # selection top N names genes, attaching gene names gnames=c();counts=0;hubs=c() for(i in 1:length(sorted[,1])) { if (row.names(sorted)[i] %in% gg$V1) { counts=counts+1 gn=gg[gg$V1==row.names(sorted)[i],2] gn=paste(gn,row.names(sorted)[i],sep=".") if (gn %in% gnames) { gn=paste(gn,counts,sep=".") } gnames=append(gnames,gn) hubs=data.frame(rbind(hubs,sorted[i,])) if (counts==top) {break} } } row.names(hubs)=gnames contrasting = colorRampPalette(rev(c("chocolate1","#FEE090","grey10", "cyan3","cyan")))(100) contrasting2 = colorRampPalette(rev(c("chocolate1","chocolate1","#FEE090","grey10", "cyan3","cyan","cyan")))(100) contrasting3 = colorRampPalette(rev(c("chocolate1","#FEE090","grey10", "cyan3","cyan","cyan")))(100) pheatmap(hubs,scale="row",col=contrasting2,border_color=NA,treeheight_col=0,cex=0.9,cluster_rows=F)
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 1.08664898958344e-311, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615829373-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
361
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 1.08664898958344e-311, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L))) result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist) str(result)
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548770043e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L) result <- do.call(myTAI:::cpp_bootMatrix,testlist) str(result)
/myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615764858-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
1,803
r
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548770043e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L) result <- do.call(myTAI:::cpp_bootMatrix,testlist) str(result)
#' Tile the assembly-based contigs of a merged assembly/spike GRanges. #' #' refactored out of scan_spiked_bam for more explicit information flow #' #' @param gr the GRanges #' @param binwidth bin width to tile (default is 300) #' #' @return a GRanges of bins #' #' @examples #' #' bam <- system.file("extdata", "ex1.bam", package="Rsamtools", #' mustWork=TRUE) #' gr <- as(seqinfo_from_header(bam), "GRanges") #' genome(gr) <- "notspike" #' tile_bins(gr) #' #' #' @export tile_bins <- function(gr, binwidth=300L) { chroms <- subset(seqlevels(gr), genome(gr)[seqlevels(gr)] != "spike") si <- seqinfo(gr)[chroms] message("Tiling ", binwidth, "bp bins across the genome...", appendLF=FALSE) bins <- tileGenome(si, tilewidth=binwidth, cut.last.tile.in.chrom=TRUE) message("Done.") return(bins) }
/R/tile_bins.R
no_license
alexpcheng/spiky
R
false
false
857
r
#' Tile the assembly-based contigs of a merged assembly/spike GRanges. #' #' refactored out of scan_spiked_bam for more explicit information flow #' #' @param gr the GRanges #' @param binwidth bin width to tile (default is 300) #' #' @return a GRanges of bins #' #' @examples #' #' bam <- system.file("extdata", "ex1.bam", package="Rsamtools", #' mustWork=TRUE) #' gr <- as(seqinfo_from_header(bam), "GRanges") #' genome(gr) <- "notspike" #' tile_bins(gr) #' #' #' @export tile_bins <- function(gr, binwidth=300L) { chroms <- subset(seqlevels(gr), genome(gr)[seqlevels(gr)] != "spike") si <- seqinfo(gr)[chroms] message("Tiling ", binwidth, "bp bins across the genome...", appendLF=FALSE) bins <- tileGenome(si, tilewidth=binwidth, cut.last.tile.in.chrom=TRUE) message("Done.") return(bins) }
library(openintro) library(oibiostat) data(LEAP) data(COL) leap.summary = table(LEAP$overall.V60.outcome, LEAP$treatment.group) leap.summary.prop <- prop.table(leap.summary, 2) leap.summary.prop #leap seg bar myPDF("leapBarPlot.pdf", 6, 3.5) par(mar=c(2.5, 2.5, 0.5, 7), xpd=TRUE) barplot(leap.summary, col = COL[c(4, 1)], ylim = c(0, 280), width = 2) legend("topright", inset = c(-.32, 0), fill = COL[c(4, 1)], legend = rownames(leap.summary)) dev.off() #leap sta seg bar myPDF("leapBarPlotSta.pdf", 6, 3.5) par(mar=c(2.5, 2.5, 0.5, 7), xpd=TRUE) barplot(leap.summary.prop, col = COL[c(4, 1)], ylim = c(0, 1), width = 2) legend("topright", inset = c(-.32, 0), fill = COL[c(4, 1)], legend = rownames(leap.summary.prop)) dev.off()
/ch_intro_to_data_oi_biostat/figures/leapBarPlot/leapBarPlot.R
no_license
OI-Biostat/oi_biostat_text
R
false
false
753
r
library(openintro) library(oibiostat) data(LEAP) data(COL) leap.summary = table(LEAP$overall.V60.outcome, LEAP$treatment.group) leap.summary.prop <- prop.table(leap.summary, 2) leap.summary.prop #leap seg bar myPDF("leapBarPlot.pdf", 6, 3.5) par(mar=c(2.5, 2.5, 0.5, 7), xpd=TRUE) barplot(leap.summary, col = COL[c(4, 1)], ylim = c(0, 280), width = 2) legend("topright", inset = c(-.32, 0), fill = COL[c(4, 1)], legend = rownames(leap.summary)) dev.off() #leap sta seg bar myPDF("leapBarPlotSta.pdf", 6, 3.5) par(mar=c(2.5, 2.5, 0.5, 7), xpd=TRUE) barplot(leap.summary.prop, col = COL[c(4, 1)], ylim = c(0, 1), width = 2) legend("topright", inset = c(-.32, 0), fill = COL[c(4, 1)], legend = rownames(leap.summary.prop)) dev.off()
#this r script is intended to collect, manage,and analyze the data produced by the imagej script #seed_color_imagej_v9, it produces as output file that provides a description of each seed analyzed #author: Ian McNish, mcnis003@umn.edu #institution: University of Minnesota #last modified: 13-feb-2017 #set working directory, load libraries, import data files ---- setwd("/Users/ianmcnish/Documents/imageanalysis/Jan2017oatseedpics") library(sqldf) red_result_files = list.files(pattern="*red_results.csv") green_result_files = list.files(pattern="*green_results.csv") blue_result_files = list.files(pattern="*blue_results.csv") size_result_files = list.files(pattern="*size_results.csv") mask_color_result_files = list.files(pattern="*mask_color_results.csv") #import data files created by seed_color_imagej_vn.txt ---- red_data=NULL green_data=NULL blue_data=NULL size_data=NULL mask_color_data=NULL #red data import---- red_data=read.csv(red_result_files[1]) red_data$file=red_result_files[1] for (i in 2:length(red_result_files)) { temp_data=read.csv(red_result_files[i]) temp_data$file=red_result_files[i] red_data=rbind(red_data,temp_data) } red_data$file2=sub("*.csv","",red_data$file) red_data$job=sub("[a-z]*_[a-z]*","",red_data$file2) red_data$red_mean=red_data$Mean red_data$seed=red_data$X keeps <- c("job","seed","red_mean") red_data=red_data[ , (names(red_data) %in% keeps)] #green data import---- green_data=read.csv(green_result_files[1]) green_data$file=green_result_files[1] for (i in 2:length(green_result_files)) { temp_data=read.csv(green_result_files[i]) temp_data$file=green_result_files[i] green_data=rbind(green_data,temp_data) } green_data$file2=sub("*.csv","",green_data$file) green_data$job=sub("[a-z]*_[a-z]*","",green_data$file2) green_data$green_mean=green_data$Mean green_data$seed=green_data$X keeps <- c("job","seed","green_mean") green_data=green_data[ , (names(green_data) %in% keeps)] #blue data import---- blue_data=read.csv(blue_result_files[1]) blue_data$file=blue_result_files[1] for (i in 2:length(blue_result_files)) { temp_data=read.csv(blue_result_files[i]) temp_data$file=blue_result_files[i] blue_data=rbind(blue_data,temp_data) } blue_data$file2=sub("*.csv","",blue_data$file) blue_data$job=sub("[a-z]*_[a-z]*","",blue_data$file2) blue_data$blue_mean=blue_data$Mean blue_data$seed=blue_data$X keeps <- c("job","seed","blue_mean") blue_data=blue_data[ , (names(blue_data) %in% keeps)] #size_data import---- size_data=read.csv(size_result_files[1]) size_data$file=size_result_files[1] for (i in 2:length(size_result_files)) { temp_data=read.csv(size_result_files[i]) temp_data$file=size_result_files[i] size_data=rbind(size_data,temp_data) } size_data$file2=sub("*.csv","",size_data$file) size_data$job=sub("[a-z]*_[a-z]*","",size_data$file2) size_data$seed=size_data$X.1 keeps <- c("job","seed","Length.long.axis","Length.short.axis") size_data=size_data[ , (names(size_data) %in% keeps)] colnames(size_data)=c("seed_length","seed_width","job","seed") ##mask_color_data import---- mask_color_data=read.csv(mask_color_result_files[1]) mask_color_data$file=mask_color_result_files[1] for (i in 2:length(mask_color_result_files)) { temp_data=read.csv(mask_color_result_files[i]) temp_data$file=mask_color_result_files[i] mask_color_data=rbind(mask_color_data,temp_data) } mask_color_data$file2=sub("*.csv","",mask_color_data$file) mask_color_data$job=sub("_[a-z]*_[a-z]*_[a-z]*","",mask_color_data$file2) mask_color_data$seed=mask_color_data$X keeps <- c("job","seed","Mean") mask_color_data=mask_color_data[ , (names(mask_color_data) %in% keeps)] colnames(mask_color_data)=c("mask_mean_color","job","seed") #join results---- join1=merge(red_data,green_data) join2=merge(join1,blue_data) join3=merge(join2,size_data) seed_data=merge(join3,mask_color_data) #write results---- write.csv(seed_data,file = "seed_phenotypes.csv")
/old_versions/seed_color_r_v05.R
permissive
mcnis003/image-analysis
R
false
false
3,959
r
#this r script is intended to collect, manage,and analyze the data produced by the imagej script #seed_color_imagej_v9, it produces as output file that provides a description of each seed analyzed #author: Ian McNish, mcnis003@umn.edu #institution: University of Minnesota #last modified: 13-feb-2017 #set working directory, load libraries, import data files ---- setwd("/Users/ianmcnish/Documents/imageanalysis/Jan2017oatseedpics") library(sqldf) red_result_files = list.files(pattern="*red_results.csv") green_result_files = list.files(pattern="*green_results.csv") blue_result_files = list.files(pattern="*blue_results.csv") size_result_files = list.files(pattern="*size_results.csv") mask_color_result_files = list.files(pattern="*mask_color_results.csv") #import data files created by seed_color_imagej_vn.txt ---- red_data=NULL green_data=NULL blue_data=NULL size_data=NULL mask_color_data=NULL #red data import---- red_data=read.csv(red_result_files[1]) red_data$file=red_result_files[1] for (i in 2:length(red_result_files)) { temp_data=read.csv(red_result_files[i]) temp_data$file=red_result_files[i] red_data=rbind(red_data,temp_data) } red_data$file2=sub("*.csv","",red_data$file) red_data$job=sub("[a-z]*_[a-z]*","",red_data$file2) red_data$red_mean=red_data$Mean red_data$seed=red_data$X keeps <- c("job","seed","red_mean") red_data=red_data[ , (names(red_data) %in% keeps)] #green data import---- green_data=read.csv(green_result_files[1]) green_data$file=green_result_files[1] for (i in 2:length(green_result_files)) { temp_data=read.csv(green_result_files[i]) temp_data$file=green_result_files[i] green_data=rbind(green_data,temp_data) } green_data$file2=sub("*.csv","",green_data$file) green_data$job=sub("[a-z]*_[a-z]*","",green_data$file2) green_data$green_mean=green_data$Mean green_data$seed=green_data$X keeps <- c("job","seed","green_mean") green_data=green_data[ , (names(green_data) %in% keeps)] #blue data import---- blue_data=read.csv(blue_result_files[1]) blue_data$file=blue_result_files[1] for (i in 2:length(blue_result_files)) { temp_data=read.csv(blue_result_files[i]) temp_data$file=blue_result_files[i] blue_data=rbind(blue_data,temp_data) } blue_data$file2=sub("*.csv","",blue_data$file) blue_data$job=sub("[a-z]*_[a-z]*","",blue_data$file2) blue_data$blue_mean=blue_data$Mean blue_data$seed=blue_data$X keeps <- c("job","seed","blue_mean") blue_data=blue_data[ , (names(blue_data) %in% keeps)] #size_data import---- size_data=read.csv(size_result_files[1]) size_data$file=size_result_files[1] for (i in 2:length(size_result_files)) { temp_data=read.csv(size_result_files[i]) temp_data$file=size_result_files[i] size_data=rbind(size_data,temp_data) } size_data$file2=sub("*.csv","",size_data$file) size_data$job=sub("[a-z]*_[a-z]*","",size_data$file2) size_data$seed=size_data$X.1 keeps <- c("job","seed","Length.long.axis","Length.short.axis") size_data=size_data[ , (names(size_data) %in% keeps)] colnames(size_data)=c("seed_length","seed_width","job","seed") ##mask_color_data import---- mask_color_data=read.csv(mask_color_result_files[1]) mask_color_data$file=mask_color_result_files[1] for (i in 2:length(mask_color_result_files)) { temp_data=read.csv(mask_color_result_files[i]) temp_data$file=mask_color_result_files[i] mask_color_data=rbind(mask_color_data,temp_data) } mask_color_data$file2=sub("*.csv","",mask_color_data$file) mask_color_data$job=sub("_[a-z]*_[a-z]*_[a-z]*","",mask_color_data$file2) mask_color_data$seed=mask_color_data$X keeps <- c("job","seed","Mean") mask_color_data=mask_color_data[ , (names(mask_color_data) %in% keeps)] colnames(mask_color_data)=c("mask_mean_color","job","seed") #join results---- join1=merge(red_data,green_data) join2=merge(join1,blue_data) join3=merge(join2,size_data) seed_data=merge(join3,mask_color_data) #write results---- write.csv(seed_data,file = "seed_phenotypes.csv")
#' @title Regional (or local) parameter and quantile estimation #' @description Calculates regional (or local) parameters of a generalized extreme value (GEV) distribution #' using (trimmed) L-moments #' (see \link[TLMoments]{TLMoments} and \link[TLMoments]{parameters}) from a vector or matrix of observation. #' Based on these parameters, a p-quantile of the GEV will be calculated for the jth station. #' @param x vector or matrix of observations (rows: observations, d columns: stations). #' @param p a probability. #' @param j quantile and parameter estimation for the jth #' station (jth column of \code{x}). Irrelevant if is \code{x} is a vector. #' @param leftrim integer indicating lower trimming parameter (\eqn{\ge 0}). #' @param rightrim integer indicating upper trimming parameter (\eqn{\ge 0}). #' @param na.rm Should missing values be removed? #' @param ... additional arguments, see \link[TLMoments]{TLMoments}. #' @details The optimal weights will be calculated as described in "Kinsvater, Fried and Lilienthal (2015): #' Regional extreme value index estimation and a test of tail homogeneity, #' Environmetrics, DOI: 10.1002/env.2376, Section 3.2". If it's not possible to calculate #' optimal weights (negative eigenvaules of an estimated covarinace matrix), simple weights #' will be calculated: \eqn{w_j=\frac{n_j}{sum_{j=1}^d n_j}}{w_j=n_j/sum_{j=1}^d n_j} #' @return List of \itemize{ #' \item \code{quant} quantile calculation from an estimated GEV with a regional shape-parameter. #' \item \code{param} estimated parameter vector from a GEV (using L-moments or trimmed L-moments). #' \item \code{w} optimal or simple weighting (just returned if \code{x} is a matrix).} #' @examples #' library("evd") #' # sample observations of 75 years at one station: #' x <- rgev(75) # x is a vector #' RegioGEV(x=x, p=0.95) #' #' x2 <- c(NA, NA, x[1:60], NA, x[61:75]) # vector of observations with missing values #' RegioGEV(x=x2, p=0.95) # NAs will be removed #' #' # sample observations of 100 years at 4 stations: #' set.seed(1053) #' x <- matrix(rgev(400, 2, 1, 0.3), ncol=4) #' RegioGEV(x=x, p=0.9, j=3, leftrim=0, rightrim=0) # optimal weighting #' RegioGEV(x=x, p=0.9, j=3, leftrim=0, rightrim=1) # optimal weighting #' #' # With missing values: #' x2 <- x #' x2[c(54, 89, 300)] <- NA #' RegioGEV(x=x2, p=0.9, j=3, leftrim=0, rightrim=0) #' #' # sample again observations of 100 years at 4 stations: #' set.seed(958) #' x <- matrix(rgev(400, 2, 1, 0.3), ncol=4) #' RegioGEV(x=x, p=0.9, j=3, leftrim=0, rightrim=0) # simple weighting #' @export RegioGEV <- function(x, p, j=1, leftrim=0, rightrim=0, na.rm=TRUE, ...){ if(!all(p < 1 & p > 0)) stop("p must be a probability: p in (0,1)") # local case (d=1): if(is.vector(x)){ para <- TLMoments(x, leftrim=leftrim, rightrim=rightrim, na.rm=na.rm, ...) %>% parameters("gev") quan <- para %>% quantiles(p) return(list(quant=quan, param=para)) } # regional case (d>1): if(is.matrix(x)){ # estimate parameters from (T)L-moments: tlmom <- TLMoments(x, leftrim=leftrim, rightrim=rightrim, na.rm=na.rm, ...) para <- tlmom %>% parameters("gev") d <- ncol(x) # calculate weights w with method "xi_cov": Sigma_Inv <- solve(est_cov(para)[1:d*3, 1:d*3]) ev <- eigen(Sigma_Inv)$values if(any(ev < 0)){ n <- apply(x, 2, function(y) sum(!is.na(y))) # winter- and sommermaxima are observed pairwise or NA pairwise. w <- n/sum(n) # simple weighting if any eigenvalues < 0 wname <- "w" } else{ w <- rowSums(Sigma_Inv)/sum(Sigma_Inv) # optimal weighting wname <- "wopt" } # calculate the estimated regional xi xi <- w %*% para[3,] x1 <- x[,j] betas <- PWMs(x[,j], na.rm=TRUE) # calculate the estimated mu and sigma at site j if(leftrim==0 && rightrim==0){ sig <- (2*betas[2]-betas[1])*xi/(gamma(1-xi)*(2^xi-1)) mu <- betas[1]+ sig/xi * (1-gamma(1-xi)) } if(leftrim==0 && rightrim==1){ sig <- (4*betas[2]-betas[1]-3*betas[3])/(gamma(-xi)*(3^xi-2*2^xi+1)) mu <- 2*(betas[1]-betas[2]) + sig/xi - sig*gamma(-xi)*(2^xi-2) } # calculate p-quantile quan <- as.vector(evd::qgev(p, mu, sig, xi)) return(list(quant=quan, param=c(loc=mu, scale=sig, shape=xi), w=wname)) } } #' @title Seasonal regional (or local) parameter and quantile estimation #' @description Calculates regional (or local) parameters of a two-component GEV distribution (product of two GEVs) #' using (trimmed) L-moments (see \link[TLMoments]{TLMoments} and \link[TLMoments]{parameters}) #' from two vectors or two matrices of observation, e.g. winter and summer observations #' from one or more than one station. #' Based on these two parameter vectors, a p-quantile of the two-component GEV will #' be calculated for the jth station. #' @param x1 vector or matrix of observations from season 1 (rows: observations, columns: stations). #' @param x2 vector or matrix of observations from season 2 (rows: observations, columns: stations). #' @param p a probability. #' @param j quantile and parameter estimation for the jth station (jth column of \code{x}). #' Irrelevant if is \code{x1} and \code{x2} are vectors. #' @param leftrim integer indicating lower trimming parameter (\eqn{\ge 0}). #' @param rightrim integer indicating upper trimming parameter (\eqn{\ge 0}). #' @param na.rm Should missing values be removed? #' @param ... additional arguments, see \link[TLMoments]{TLMoments}. #' @return List of \itemize{ #' \item \code{quant} quantile calculation from an estimated two-component GEV with a #' regional (or local) shape-parameters. #' \item \code{param1} estimated parameter vector from season 1 from a GEV (using L-moments or trimmed L-moments). #' \item \code{param2} estimated parameter vector from season 2 from a GEV (using L-moments or trimmed L-moments).} #' @examples #' library("evd") #' # Seasonal observations of 80 years at one station: #' x1 <- rgev(80, 2, 1, 0.2) # observations from season 1 #' x2 <- rgev(80, 3, 1, 0.3) # observations from season 2 #' RegioGEVSeas(x1=x1, x2=x2, p=0.95) #' #' # Missing values in both seasons in the same year(s): #' x1a <- c(NA, x1, NA) #' x2a <- c(NA, x2, NA) #' RegioGEVSeas(x1a, x2a, p=0.99, leftrim=0, rightrim=0, na.rm=TRUE) #' #' # Missing values in both seasons in different year(s): #' x1b <- x1 #' x1b[c(4,19)] <- NA #' x2b <- x2 #' x2b[77] <- NA #' RegioGEVSeas(x1b, x2b, p=0.99, leftrim=0, rightrim=0, na.rm=TRUE) #' #' # Seasonal observations of 100 years at 4 stations: #' x1 <- matrix(rgev(400, 2, 1, 0.3), ncol=4) #' x2 <- matrix(rgev(400, 4, 1, 0.2), ncol=4) #' # estimate quantile for station 1 and 2 (consider the same shape-parameters): #' RegioGEVSeas(x1, x2, p=0.99, j=1, leftrim=0, rightrim=0) #' RegioGEVSeas(x1, x2, p=0.99, j=2, leftrim=0, rightrim=0) #' #' # With missing values: #' x3 <- x1 #' x4 <- x2 #' x3[c(54, 89, 300)] <- NA #' RegioGEVSeas(x3, x4, p=0.99, j=1, leftrim=0, rightrim=0) #' @export RegioGEVSeas <- function(x1, x2, p, j=1, leftrim=0, rightrim=0, na.rm=TRUE, ...){ if(!all(p < 1 & p > 0)) stop("p must be a probability: p in (0,1)") param1 <- RegioGEV(x1, p=p, j=j, leftrim=leftrim, rightrim=rightrim, na.rm=na.rm, ...)$param param2 <- RegioGEV(x2, p=p, j=j, leftrim=leftrim, rightrim=rightrim, na.rm=na.rm, ...)$param quan <- qGEVxGEV(p=p, param1, param2) return(list(quant=quan, param1=param1, param2=param2)) }
/R/regional_gev.R
no_license
cran/flood
R
false
false
7,595
r
#' @title Regional (or local) parameter and quantile estimation #' @description Calculates regional (or local) parameters of a generalized extreme value (GEV) distribution #' using (trimmed) L-moments #' (see \link[TLMoments]{TLMoments} and \link[TLMoments]{parameters}) from a vector or matrix of observation. #' Based on these parameters, a p-quantile of the GEV will be calculated for the jth station. #' @param x vector or matrix of observations (rows: observations, d columns: stations). #' @param p a probability. #' @param j quantile and parameter estimation for the jth #' station (jth column of \code{x}). Irrelevant if is \code{x} is a vector. #' @param leftrim integer indicating lower trimming parameter (\eqn{\ge 0}). #' @param rightrim integer indicating upper trimming parameter (\eqn{\ge 0}). #' @param na.rm Should missing values be removed? #' @param ... additional arguments, see \link[TLMoments]{TLMoments}. #' @details The optimal weights will be calculated as described in "Kinsvater, Fried and Lilienthal (2015): #' Regional extreme value index estimation and a test of tail homogeneity, #' Environmetrics, DOI: 10.1002/env.2376, Section 3.2". If it's not possible to calculate #' optimal weights (negative eigenvaules of an estimated covarinace matrix), simple weights #' will be calculated: \eqn{w_j=\frac{n_j}{sum_{j=1}^d n_j}}{w_j=n_j/sum_{j=1}^d n_j} #' @return List of \itemize{ #' \item \code{quant} quantile calculation from an estimated GEV with a regional shape-parameter. #' \item \code{param} estimated parameter vector from a GEV (using L-moments or trimmed L-moments). #' \item \code{w} optimal or simple weighting (just returned if \code{x} is a matrix).} #' @examples #' library("evd") #' # sample observations of 75 years at one station: #' x <- rgev(75) # x is a vector #' RegioGEV(x=x, p=0.95) #' #' x2 <- c(NA, NA, x[1:60], NA, x[61:75]) # vector of observations with missing values #' RegioGEV(x=x2, p=0.95) # NAs will be removed #' #' # sample observations of 100 years at 4 stations: #' set.seed(1053) #' x <- matrix(rgev(400, 2, 1, 0.3), ncol=4) #' RegioGEV(x=x, p=0.9, j=3, leftrim=0, rightrim=0) # optimal weighting #' RegioGEV(x=x, p=0.9, j=3, leftrim=0, rightrim=1) # optimal weighting #' #' # With missing values: #' x2 <- x #' x2[c(54, 89, 300)] <- NA #' RegioGEV(x=x2, p=0.9, j=3, leftrim=0, rightrim=0) #' #' # sample again observations of 100 years at 4 stations: #' set.seed(958) #' x <- matrix(rgev(400, 2, 1, 0.3), ncol=4) #' RegioGEV(x=x, p=0.9, j=3, leftrim=0, rightrim=0) # simple weighting #' @export RegioGEV <- function(x, p, j=1, leftrim=0, rightrim=0, na.rm=TRUE, ...){ if(!all(p < 1 & p > 0)) stop("p must be a probability: p in (0,1)") # local case (d=1): if(is.vector(x)){ para <- TLMoments(x, leftrim=leftrim, rightrim=rightrim, na.rm=na.rm, ...) %>% parameters("gev") quan <- para %>% quantiles(p) return(list(quant=quan, param=para)) } # regional case (d>1): if(is.matrix(x)){ # estimate parameters from (T)L-moments: tlmom <- TLMoments(x, leftrim=leftrim, rightrim=rightrim, na.rm=na.rm, ...) para <- tlmom %>% parameters("gev") d <- ncol(x) # calculate weights w with method "xi_cov": Sigma_Inv <- solve(est_cov(para)[1:d*3, 1:d*3]) ev <- eigen(Sigma_Inv)$values if(any(ev < 0)){ n <- apply(x, 2, function(y) sum(!is.na(y))) # winter- and sommermaxima are observed pairwise or NA pairwise. w <- n/sum(n) # simple weighting if any eigenvalues < 0 wname <- "w" } else{ w <- rowSums(Sigma_Inv)/sum(Sigma_Inv) # optimal weighting wname <- "wopt" } # calculate the estimated regional xi xi <- w %*% para[3,] x1 <- x[,j] betas <- PWMs(x[,j], na.rm=TRUE) # calculate the estimated mu and sigma at site j if(leftrim==0 && rightrim==0){ sig <- (2*betas[2]-betas[1])*xi/(gamma(1-xi)*(2^xi-1)) mu <- betas[1]+ sig/xi * (1-gamma(1-xi)) } if(leftrim==0 && rightrim==1){ sig <- (4*betas[2]-betas[1]-3*betas[3])/(gamma(-xi)*(3^xi-2*2^xi+1)) mu <- 2*(betas[1]-betas[2]) + sig/xi - sig*gamma(-xi)*(2^xi-2) } # calculate p-quantile quan <- as.vector(evd::qgev(p, mu, sig, xi)) return(list(quant=quan, param=c(loc=mu, scale=sig, shape=xi), w=wname)) } } #' @title Seasonal regional (or local) parameter and quantile estimation #' @description Calculates regional (or local) parameters of a two-component GEV distribution (product of two GEVs) #' using (trimmed) L-moments (see \link[TLMoments]{TLMoments} and \link[TLMoments]{parameters}) #' from two vectors or two matrices of observation, e.g. winter and summer observations #' from one or more than one station. #' Based on these two parameter vectors, a p-quantile of the two-component GEV will #' be calculated for the jth station. #' @param x1 vector or matrix of observations from season 1 (rows: observations, columns: stations). #' @param x2 vector or matrix of observations from season 2 (rows: observations, columns: stations). #' @param p a probability. #' @param j quantile and parameter estimation for the jth station (jth column of \code{x}). #' Irrelevant if is \code{x1} and \code{x2} are vectors. #' @param leftrim integer indicating lower trimming parameter (\eqn{\ge 0}). #' @param rightrim integer indicating upper trimming parameter (\eqn{\ge 0}). #' @param na.rm Should missing values be removed? #' @param ... additional arguments, see \link[TLMoments]{TLMoments}. #' @return List of \itemize{ #' \item \code{quant} quantile calculation from an estimated two-component GEV with a #' regional (or local) shape-parameters. #' \item \code{param1} estimated parameter vector from season 1 from a GEV (using L-moments or trimmed L-moments). #' \item \code{param2} estimated parameter vector from season 2 from a GEV (using L-moments or trimmed L-moments).} #' @examples #' library("evd") #' # Seasonal observations of 80 years at one station: #' x1 <- rgev(80, 2, 1, 0.2) # observations from season 1 #' x2 <- rgev(80, 3, 1, 0.3) # observations from season 2 #' RegioGEVSeas(x1=x1, x2=x2, p=0.95) #' #' # Missing values in both seasons in the same year(s): #' x1a <- c(NA, x1, NA) #' x2a <- c(NA, x2, NA) #' RegioGEVSeas(x1a, x2a, p=0.99, leftrim=0, rightrim=0, na.rm=TRUE) #' #' # Missing values in both seasons in different year(s): #' x1b <- x1 #' x1b[c(4,19)] <- NA #' x2b <- x2 #' x2b[77] <- NA #' RegioGEVSeas(x1b, x2b, p=0.99, leftrim=0, rightrim=0, na.rm=TRUE) #' #' # Seasonal observations of 100 years at 4 stations: #' x1 <- matrix(rgev(400, 2, 1, 0.3), ncol=4) #' x2 <- matrix(rgev(400, 4, 1, 0.2), ncol=4) #' # estimate quantile for station 1 and 2 (consider the same shape-parameters): #' RegioGEVSeas(x1, x2, p=0.99, j=1, leftrim=0, rightrim=0) #' RegioGEVSeas(x1, x2, p=0.99, j=2, leftrim=0, rightrim=0) #' #' # With missing values: #' x3 <- x1 #' x4 <- x2 #' x3[c(54, 89, 300)] <- NA #' RegioGEVSeas(x3, x4, p=0.99, j=1, leftrim=0, rightrim=0) #' @export RegioGEVSeas <- function(x1, x2, p, j=1, leftrim=0, rightrim=0, na.rm=TRUE, ...){ if(!all(p < 1 & p > 0)) stop("p must be a probability: p in (0,1)") param1 <- RegioGEV(x1, p=p, j=j, leftrim=leftrim, rightrim=rightrim, na.rm=na.rm, ...)$param param2 <- RegioGEV(x2, p=p, j=j, leftrim=leftrim, rightrim=rightrim, na.rm=na.rm, ...)$param quan <- qGEVxGEV(p=p, param1, param2) return(list(quant=quan, param1=param1, param2=param2)) }
# sfmisc utils 1.0.0 # utils ------------------------------------------------------------------- # nocov start # commonly used utility functions included from the package sfmisc #' Paste and Truncate #' #' @param x a vector #' @param width (maximum) width of result #' @param dots `character` scalar. String to use for ellipses #' @inheritParams paste #' #' @return a `character` scalar #' @noRd #' #' @examples #' ptrunc(month.abb) #' ptrunc(month.abb, month.name) #' ptrunc <- function( ..., width = 40L, sep = ", ", collapse = ", ", dots = " ..." ){ assert(width > 7L, "The minimum supported width is 8") x <- paste(..., sep = sep, collapse = collapse) sel <- vapply(x, nchar, integer(1), USE.NAMES = FALSE) > width x[sel] <- strtrim(x[sel], width = width - 4L) x[sel] <- paste0(gsub(",{0,1}\\s*$", "", x[sel]), dots) x } fmt_class <- function(x){ paste0("<", paste(x, collapse = "/"), ">") } #' @param x any \R object #' @param ignore subclasses to ignore #' @noRd class_fmt <- function(x, ignore = NULL){ fmt_class(setdiff(class(x), ignore)) } compact <- function(x){ x[!vapply(x, is.null, FALSE)] } walk <- function(.x, .f, ...){ for (i in seq_along(.x)){ .f(.x[[i]], ...) } invisible(.x) } # assertions -------------------------------------------------------------- #' Assert a condition #' #' A simpler and more efficient for [base::stopifnot()] that has an easy #' mechanism for supplying custom error messages. As opposed to `stopifnot()`, #' `assert()` only works with a single (scalar) assertions. #' #' @param cond `TRUE` or `FALSE` (without any attributes). `FALSE` will throw #' an exception with an automatically constructed error message (if `...` #' was not supplied). Anything else will throw an exception stating that #' `cond` was not valid. #' @param ... passed on to [stop()] #' @param call. passed on to [stop()] #' @param domain passed on to [stop()] #' #' @noRd #' #' @return TRUE on success #' #' @examples #' #' \dontrun{ #' assert(1 == 1) #' assert(1 == 2) #' } #' #' assert <- function( cond, ..., call. = FALSE, domain = NULL ){ if (identical(cond, TRUE)){ return(TRUE) } else if (identical(cond, FALSE)){ if (identical(length(list(...)), 0L)){ msg <- paste0("`", deparse(match.call()[[2]]), "`", " is not 'TRUE'") stop(msg, call. = call., domain = domain) } else { suppressWarnings( stop(..., call. = call., domain = domain) ) } } else { stop("Assertion must be either 'TRUE' or 'FALSE'") } } assert_namespace <- function(...){ res <- vapply(c(...), requireNamespace, logical(1), quietly = TRUE) if (all(res)){ return(invisible(TRUE)) } else { pkgs <- c(...) if (identical(length(pkgs), 1L)){ msg <- sprintf(paste( "This function requires the package '%s'. You can install it with", '`install.packages("%s")`.'), pkgs, pkgs ) } else { msg <- sprintf( paste( "This function requires the packages %s. You can install them with", "`install.packages(%s)`." ), paste(names(res)[!res], collapse = ", "), deparse(names(res)) ) } } stop(msg) } # predicates -------------------------------------------------------------- is_error <- function(x){ inherits(x, "error") } is_try_error <- function(x){ inherits(x, "try-error") } is_scalar <- function(x){ identical(length(x), 1L) } is_POSIXct <- function(x){ inherits(x, "POSIXct") } is_scalar_POSIXct <- function(x){ is_POSIXct(x) && is_scalar(x) } is_POSIXlt <- function(x){ inherits(x, "POSIXlt") } is_scalar_POSIXlt <- function(x){ is_POSIXlt(x) && is_scalar(x) } is_POSIXt <- function(x){ inherits(x, "POSIXt") } is_scalar_POSIXt <- function(x){ is_POSIXt(x) && is_scalar(x) } is_Date <- function(x){ inherits(x, "Date") } is_scalar_Date <- function(x){ is_Date(x) && is_scalar(x) } is_scalar_list <- function(x){ is_list(x) && is_scalar(x) } is_scalar_atomic <- function(x){ is.atomic(x) && is_scalar(x) } is_scalar_logical <- function(x){ is.logical(x) && is_scalar(x) } is_scalar_integer <- function(x){ is.integer(x) && is_scalar(x) } is_scalar_factor <- function(x){ is.factor(x) && is_scalar(x) } is_scalar_list <- function(x){ is.list(x) && is_scalar(x) } is_scalar_numeric <- function(x){ is.numeric(x) && is_scalar(x) } is_scalar_character <- function(x){ is.character(x) && is_scalar(x) } is_vector <- function(x){ is.atomic(x) || is.list(x) } is_bool <- function(x){ is.logical(x) && !anyNA(x) } #' Check if Object is a Boolean #' #' Check wheter an object is either `TRUE` or `FALSE`. #' #' @param x Any \R Object. #' @return either `TRUE` or `FALSE` #' @noRd #' is_scalar_bool <- function(x){ identical(x, TRUE) || identical(x, FALSE) } #' Check if Object is Integer-like #' #' Check wheter an object is either `TRUE` or `FALSE`. #' #' @param x Any \R Object. #' @return either `TRUE` or `FALSE` #' @noRd #' is_integerish <- function(x){ if (!is.numeric(x)){ FALSE } else { all(as.integer(x) == x) } } is_scalar_integerish <- function(x){ is_scalar(x) && is_integerish(x) } is_n <- function(x){ is_scalar_integerish(x) && identical(x > 0, TRUE) } is_n0 <- function(x){ is_scalar_integerish(x) && identical(x >= 0, TRUE) } #' Check if Objects have the same length #' #' @param ... Any number of \R Objects. #' #' @return either `TRUE` or `FALSE` #' @noRd is_equal_length <- function(...){ lengths <- vapply(list(...), length, 1L) identical(length(unique(lengths)), 1L) } #' Check if Object has length 0 #' #' Check wheter an object is either `TRUE` or `FALSE`. #' #' @param x Any \R Object. #' @return either `TRUE` or `FALSE` #' @noRd #' is_empty <- function(x){ identical(length(x), 0L) } #' Check if a String is Blank #' #' Check wheter a character vector contains only of spaces #' #' @param x Any \R Object. #' @return either `TRUE` or `FALSE` #' @noRd #' is_blank <- function(x){ trimws(x) == "" } #' Test if a Vector or Combination of Vectors is a Candidate Key #' #' Checks if all elements of the atomic vector `x`, or the combination of #' all elements of `x` if `x` is a `list`, are unique and neither `NA` or #' `infinite`. #' #' @param x a atomic vector or a list of atomic vectors #' #' @return `TRUE/FALSE` #' @noRd #' #' @examples #' #' is_candidate_key(c(1, 2, 3)) #' is_candidate_key(c(1, 2, NA)) #' is_candidate_key(c(1, 2, Inf)) #' #' td <- data.frame( #' x = 1:10, #' y = 1:2, #' z = 1:5 #' ) #' #' is_candidate_key(list(td$x, td$z)) #' # a data.frame is just a special list #' is_candidate_key(td[, c("y", "z")]) is_candidate_key <- function(x){ if (is.atomic(x)){ # !is.infinite instead of is.finite because x can be a character vector length(x) > 1 && all(!is.infinite(x)) && !any(is.na(x)) && identical(length(unique(x)), length(x)) } else if (is.list(x)){ length(x) > 0 && length(x[[1]] > 0) && do.call(is_equal_length, x) && all(vapply(x, function(.x) all(!is.infinite(.x)), logical(1))) && all(vapply(x, function(.x) !any(is.na(.x)), logical(1))) && !any(duplicated(as.data.frame(x))) } } # https://modern-sql.com/feature/is-distinct-from is_not_distinct_from <- function(x, y){ ((x == y) & !is.na(x) & !is.na(y)) | (is.na(x) & is.na(y)) } is_distinct_from <- function(x, y){ ((x != y) & !is.na(x) & !is.na(y)) | (is.na(x) != is.na(y)) } is_windows_path <- function(x){ nchar(x) >= 2 & grepl("^[A-Za-z].*", x) & substr(x, 2, 2) == ":" } # equalish ---------------------------------------------------------------- #' Check for equality within a tolerance level #' #' #' #' @param x,y `numeric` vectors #' @param tolerance `numeric` scalar. tolerance level (absolute value). Defaults #' to `.Machine$double.eps^0.5` which is a sensible default for comparing #' floating point numbers. #' #' @return `equalish()` returns TRUE if the absolute difference between `x` and #' `y` is less than `tolerance`. #' @noRd #' @seealso [.Machine] #' #' #' @examples #' a <- 0.7 #' b <- 0.2 #' a - b == 0.5 #' equalish(a - b, 0.5) #' equalish <- function(x, y, tolerance = .Machine$double.eps ^ 0.5){ assert(is_scalar_numeric(tolerance) && tolerance >= 0) abs(x - y) < tolerance } #' @return `equalish_frac()` returns `TRUE` if the relative difference between #' `x` and `y` is smaller than `tolerance`. The relative difference is #' defined as `abs(x - y) / pmax(abs(x), abs(y))`. If both `x` and `y` are #' `0` the relative difference is not defined, but this function will still #' return `TRUE`. #' #' @noRd #' @examples #' #' equalish_frac(1000, 1010, tolerance = 0.01) #' equalish_frac(1000, 1010, tolerance = 0.009) #' equalish_frac(0, 0) #' equalish_frac <- function(x, y, tolerance = .Machine$double.eps ^ 0.5){ assert(is_scalar_numeric(tolerance) && tolerance >= 0) res <- abs(x - y) / pmax(abs(x), abs(y)) < tolerance res[x == 0 & y == 0] <- TRUE res } # all_are ----------------------------------------------------------------- #' Convert vector if identical elements to scalar #' #' Returns `unique(x)` if all elements of `x` are identical, throws an error if #' not. #' #' @inheritParams all_are_identical #' #' @return A scalar of the same type as `x` #' @noRd as_scalar <- function(x){ res <- unique(x) if (is_scalar(res)){ return(res) } else { stop("Not all elements of x are identical") } } #' Test if all elements of a vector are identical #' #' @param x any object that can be handled by [unique()] (usually a vector or #' list) #' @param empty_value Value to return if function is called on a vector of #' length 0 (e.g. `NULL`, `numeric()`, ...) #' #' @noRd #' @family special equality checks #' @return `TRUE/FALSE` #' #' @examples #' #' all_are_identical(c(1,2,3)) #' all_are_identical(c(1,1,1)) #' all_are_identical <- function(x, empty_value = FALSE) { assert(length(empty_value) <= 1) if (length(x) > 0L) { return(identical(length(unique(x)), 1L)) } else { if (is.null(x)){ warning("'x' is NULL") } else { warning("'x' is an empty vector") } return(empty_value) } } #' Test if all elements of a vector are unique #' #' @inheritParams all_are_identical #' #' @return TRUE/FALSE #' #' @noRd #' @family special equality checks #' #' @examples #' #' all_are_identical(c(1,2,3)) #' all_are_identical(c(1,1,1)) #' all_are_distinct <- function( x, empty_value = FALSE ){ assert(length(empty_value) <= 1) if (identical(length(x), 1L)) { return(TRUE) } else if (length(x) > 1L) { return(identical(length(unique(x)), length(x))) } else { if (is.null(x)){ warning("'x' is NULL") } else { warning("'x' is an empty vector") } return(empty_value) } } n_distinct <- function(x){ length(unique(x)) } # misc -------------------------------------------------------------------- pad_left <- function( x, width = max(nchar(paste(x))), pad = " " ){ diff <- pmax(width - nchar(paste(x)), 0L) padding <- vapply(diff, function(i) paste(rep.int(pad, i), collapse = ""), character(1)) paste0(padding, x) } pad_right <- function( x, width = max(nchar(paste(x))), pad = " " ){ diff <- pmax(width - nchar(paste(x)), 0L) padding <- vapply(diff, function(i) paste(rep.int(pad, i), collapse = ""), character(1)) paste0(x, padding) } `%||%` <- function(x, y){ if (is.null(x)) y else (x) } preview_object <- function( x, width = 32, brackets = c("(", ")"), quotes = c("`", "`"), dots = ".." ){ if (!is.atomic(x)) return(class_fmt(x)) if (is.numeric(x)) x <- format(x, justify = "none", drop0trailing = TRUE, trim = TRUE) res <- ptrunc(x, collapse = ", ", width = width, dots = dots) if (length(x) > 1) res <- paste0(brackets[[1]], res, brackets[[2]]) else res <- paste0(quotes[[1]], res, quotes[[2]]) res } #' Clean up paths to make them comparable, inspired by fs::path_tidy #' #' @param x `character` vector #' #' @return a `character` vector #' @noRd path_tidy <- function(x){ x <- gsub("\\\\", "/", x) x <- gsub("(?!^)/+", "/", x, perl = TRUE) sel <- x != "/" x[sel] <- gsub("/$", "", x[sel]) sel <- is_windows_path(x) if (any(sel)){ clean_win <- function(.x){ substr(.x, 1, 1) <- toupper(substr(.x, 1 ,1)) .sel <- nchar(.x) == 2 .x[.sel] <- paste0(.x[.sel], "/") .x } x[sel] <- clean_win(x[sel]) } x } # nocov end
/R/utils-sfmisc.R
permissive
s-fleck/hammr
R
false
false
12,679
r
# sfmisc utils 1.0.0 # utils ------------------------------------------------------------------- # nocov start # commonly used utility functions included from the package sfmisc #' Paste and Truncate #' #' @param x a vector #' @param width (maximum) width of result #' @param dots `character` scalar. String to use for ellipses #' @inheritParams paste #' #' @return a `character` scalar #' @noRd #' #' @examples #' ptrunc(month.abb) #' ptrunc(month.abb, month.name) #' ptrunc <- function( ..., width = 40L, sep = ", ", collapse = ", ", dots = " ..." ){ assert(width > 7L, "The minimum supported width is 8") x <- paste(..., sep = sep, collapse = collapse) sel <- vapply(x, nchar, integer(1), USE.NAMES = FALSE) > width x[sel] <- strtrim(x[sel], width = width - 4L) x[sel] <- paste0(gsub(",{0,1}\\s*$", "", x[sel]), dots) x } fmt_class <- function(x){ paste0("<", paste(x, collapse = "/"), ">") } #' @param x any \R object #' @param ignore subclasses to ignore #' @noRd class_fmt <- function(x, ignore = NULL){ fmt_class(setdiff(class(x), ignore)) } compact <- function(x){ x[!vapply(x, is.null, FALSE)] } walk <- function(.x, .f, ...){ for (i in seq_along(.x)){ .f(.x[[i]], ...) } invisible(.x) } # assertions -------------------------------------------------------------- #' Assert a condition #' #' A simpler and more efficient for [base::stopifnot()] that has an easy #' mechanism for supplying custom error messages. As opposed to `stopifnot()`, #' `assert()` only works with a single (scalar) assertions. #' #' @param cond `TRUE` or `FALSE` (without any attributes). `FALSE` will throw #' an exception with an automatically constructed error message (if `...` #' was not supplied). Anything else will throw an exception stating that #' `cond` was not valid. #' @param ... passed on to [stop()] #' @param call. passed on to [stop()] #' @param domain passed on to [stop()] #' #' @noRd #' #' @return TRUE on success #' #' @examples #' #' \dontrun{ #' assert(1 == 1) #' assert(1 == 2) #' } #' #' assert <- function( cond, ..., call. = FALSE, domain = NULL ){ if (identical(cond, TRUE)){ return(TRUE) } else if (identical(cond, FALSE)){ if (identical(length(list(...)), 0L)){ msg <- paste0("`", deparse(match.call()[[2]]), "`", " is not 'TRUE'") stop(msg, call. = call., domain = domain) } else { suppressWarnings( stop(..., call. = call., domain = domain) ) } } else { stop("Assertion must be either 'TRUE' or 'FALSE'") } } assert_namespace <- function(...){ res <- vapply(c(...), requireNamespace, logical(1), quietly = TRUE) if (all(res)){ return(invisible(TRUE)) } else { pkgs <- c(...) if (identical(length(pkgs), 1L)){ msg <- sprintf(paste( "This function requires the package '%s'. You can install it with", '`install.packages("%s")`.'), pkgs, pkgs ) } else { msg <- sprintf( paste( "This function requires the packages %s. You can install them with", "`install.packages(%s)`." ), paste(names(res)[!res], collapse = ", "), deparse(names(res)) ) } } stop(msg) } # predicates -------------------------------------------------------------- is_error <- function(x){ inherits(x, "error") } is_try_error <- function(x){ inherits(x, "try-error") } is_scalar <- function(x){ identical(length(x), 1L) } is_POSIXct <- function(x){ inherits(x, "POSIXct") } is_scalar_POSIXct <- function(x){ is_POSIXct(x) && is_scalar(x) } is_POSIXlt <- function(x){ inherits(x, "POSIXlt") } is_scalar_POSIXlt <- function(x){ is_POSIXlt(x) && is_scalar(x) } is_POSIXt <- function(x){ inherits(x, "POSIXt") } is_scalar_POSIXt <- function(x){ is_POSIXt(x) && is_scalar(x) } is_Date <- function(x){ inherits(x, "Date") } is_scalar_Date <- function(x){ is_Date(x) && is_scalar(x) } is_scalar_list <- function(x){ is_list(x) && is_scalar(x) } is_scalar_atomic <- function(x){ is.atomic(x) && is_scalar(x) } is_scalar_logical <- function(x){ is.logical(x) && is_scalar(x) } is_scalar_integer <- function(x){ is.integer(x) && is_scalar(x) } is_scalar_factor <- function(x){ is.factor(x) && is_scalar(x) } is_scalar_list <- function(x){ is.list(x) && is_scalar(x) } is_scalar_numeric <- function(x){ is.numeric(x) && is_scalar(x) } is_scalar_character <- function(x){ is.character(x) && is_scalar(x) } is_vector <- function(x){ is.atomic(x) || is.list(x) } is_bool <- function(x){ is.logical(x) && !anyNA(x) } #' Check if Object is a Boolean #' #' Check wheter an object is either `TRUE` or `FALSE`. #' #' @param x Any \R Object. #' @return either `TRUE` or `FALSE` #' @noRd #' is_scalar_bool <- function(x){ identical(x, TRUE) || identical(x, FALSE) } #' Check if Object is Integer-like #' #' Check wheter an object is either `TRUE` or `FALSE`. #' #' @param x Any \R Object. #' @return either `TRUE` or `FALSE` #' @noRd #' is_integerish <- function(x){ if (!is.numeric(x)){ FALSE } else { all(as.integer(x) == x) } } is_scalar_integerish <- function(x){ is_scalar(x) && is_integerish(x) } is_n <- function(x){ is_scalar_integerish(x) && identical(x > 0, TRUE) } is_n0 <- function(x){ is_scalar_integerish(x) && identical(x >= 0, TRUE) } #' Check if Objects have the same length #' #' @param ... Any number of \R Objects. #' #' @return either `TRUE` or `FALSE` #' @noRd is_equal_length <- function(...){ lengths <- vapply(list(...), length, 1L) identical(length(unique(lengths)), 1L) } #' Check if Object has length 0 #' #' Check wheter an object is either `TRUE` or `FALSE`. #' #' @param x Any \R Object. #' @return either `TRUE` or `FALSE` #' @noRd #' is_empty <- function(x){ identical(length(x), 0L) } #' Check if a String is Blank #' #' Check wheter a character vector contains only of spaces #' #' @param x Any \R Object. #' @return either `TRUE` or `FALSE` #' @noRd #' is_blank <- function(x){ trimws(x) == "" } #' Test if a Vector or Combination of Vectors is a Candidate Key #' #' Checks if all elements of the atomic vector `x`, or the combination of #' all elements of `x` if `x` is a `list`, are unique and neither `NA` or #' `infinite`. #' #' @param x a atomic vector or a list of atomic vectors #' #' @return `TRUE/FALSE` #' @noRd #' #' @examples #' #' is_candidate_key(c(1, 2, 3)) #' is_candidate_key(c(1, 2, NA)) #' is_candidate_key(c(1, 2, Inf)) #' #' td <- data.frame( #' x = 1:10, #' y = 1:2, #' z = 1:5 #' ) #' #' is_candidate_key(list(td$x, td$z)) #' # a data.frame is just a special list #' is_candidate_key(td[, c("y", "z")]) is_candidate_key <- function(x){ if (is.atomic(x)){ # !is.infinite instead of is.finite because x can be a character vector length(x) > 1 && all(!is.infinite(x)) && !any(is.na(x)) && identical(length(unique(x)), length(x)) } else if (is.list(x)){ length(x) > 0 && length(x[[1]] > 0) && do.call(is_equal_length, x) && all(vapply(x, function(.x) all(!is.infinite(.x)), logical(1))) && all(vapply(x, function(.x) !any(is.na(.x)), logical(1))) && !any(duplicated(as.data.frame(x))) } } # https://modern-sql.com/feature/is-distinct-from is_not_distinct_from <- function(x, y){ ((x == y) & !is.na(x) & !is.na(y)) | (is.na(x) & is.na(y)) } is_distinct_from <- function(x, y){ ((x != y) & !is.na(x) & !is.na(y)) | (is.na(x) != is.na(y)) } is_windows_path <- function(x){ nchar(x) >= 2 & grepl("^[A-Za-z].*", x) & substr(x, 2, 2) == ":" } # equalish ---------------------------------------------------------------- #' Check for equality within a tolerance level #' #' #' #' @param x,y `numeric` vectors #' @param tolerance `numeric` scalar. tolerance level (absolute value). Defaults #' to `.Machine$double.eps^0.5` which is a sensible default for comparing #' floating point numbers. #' #' @return `equalish()` returns TRUE if the absolute difference between `x` and #' `y` is less than `tolerance`. #' @noRd #' @seealso [.Machine] #' #' #' @examples #' a <- 0.7 #' b <- 0.2 #' a - b == 0.5 #' equalish(a - b, 0.5) #' equalish <- function(x, y, tolerance = .Machine$double.eps ^ 0.5){ assert(is_scalar_numeric(tolerance) && tolerance >= 0) abs(x - y) < tolerance } #' @return `equalish_frac()` returns `TRUE` if the relative difference between #' `x` and `y` is smaller than `tolerance`. The relative difference is #' defined as `abs(x - y) / pmax(abs(x), abs(y))`. If both `x` and `y` are #' `0` the relative difference is not defined, but this function will still #' return `TRUE`. #' #' @noRd #' @examples #' #' equalish_frac(1000, 1010, tolerance = 0.01) #' equalish_frac(1000, 1010, tolerance = 0.009) #' equalish_frac(0, 0) #' equalish_frac <- function(x, y, tolerance = .Machine$double.eps ^ 0.5){ assert(is_scalar_numeric(tolerance) && tolerance >= 0) res <- abs(x - y) / pmax(abs(x), abs(y)) < tolerance res[x == 0 & y == 0] <- TRUE res } # all_are ----------------------------------------------------------------- #' Convert vector if identical elements to scalar #' #' Returns `unique(x)` if all elements of `x` are identical, throws an error if #' not. #' #' @inheritParams all_are_identical #' #' @return A scalar of the same type as `x` #' @noRd as_scalar <- function(x){ res <- unique(x) if (is_scalar(res)){ return(res) } else { stop("Not all elements of x are identical") } } #' Test if all elements of a vector are identical #' #' @param x any object that can be handled by [unique()] (usually a vector or #' list) #' @param empty_value Value to return if function is called on a vector of #' length 0 (e.g. `NULL`, `numeric()`, ...) #' #' @noRd #' @family special equality checks #' @return `TRUE/FALSE` #' #' @examples #' #' all_are_identical(c(1,2,3)) #' all_are_identical(c(1,1,1)) #' all_are_identical <- function(x, empty_value = FALSE) { assert(length(empty_value) <= 1) if (length(x) > 0L) { return(identical(length(unique(x)), 1L)) } else { if (is.null(x)){ warning("'x' is NULL") } else { warning("'x' is an empty vector") } return(empty_value) } } #' Test if all elements of a vector are unique #' #' @inheritParams all_are_identical #' #' @return TRUE/FALSE #' #' @noRd #' @family special equality checks #' #' @examples #' #' all_are_identical(c(1,2,3)) #' all_are_identical(c(1,1,1)) #' all_are_distinct <- function( x, empty_value = FALSE ){ assert(length(empty_value) <= 1) if (identical(length(x), 1L)) { return(TRUE) } else if (length(x) > 1L) { return(identical(length(unique(x)), length(x))) } else { if (is.null(x)){ warning("'x' is NULL") } else { warning("'x' is an empty vector") } return(empty_value) } } n_distinct <- function(x){ length(unique(x)) } # misc -------------------------------------------------------------------- pad_left <- function( x, width = max(nchar(paste(x))), pad = " " ){ diff <- pmax(width - nchar(paste(x)), 0L) padding <- vapply(diff, function(i) paste(rep.int(pad, i), collapse = ""), character(1)) paste0(padding, x) } pad_right <- function( x, width = max(nchar(paste(x))), pad = " " ){ diff <- pmax(width - nchar(paste(x)), 0L) padding <- vapply(diff, function(i) paste(rep.int(pad, i), collapse = ""), character(1)) paste0(x, padding) } `%||%` <- function(x, y){ if (is.null(x)) y else (x) } preview_object <- function( x, width = 32, brackets = c("(", ")"), quotes = c("`", "`"), dots = ".." ){ if (!is.atomic(x)) return(class_fmt(x)) if (is.numeric(x)) x <- format(x, justify = "none", drop0trailing = TRUE, trim = TRUE) res <- ptrunc(x, collapse = ", ", width = width, dots = dots) if (length(x) > 1) res <- paste0(brackets[[1]], res, brackets[[2]]) else res <- paste0(quotes[[1]], res, quotes[[2]]) res } #' Clean up paths to make them comparable, inspired by fs::path_tidy #' #' @param x `character` vector #' #' @return a `character` vector #' @noRd path_tidy <- function(x){ x <- gsub("\\\\", "/", x) x <- gsub("(?!^)/+", "/", x, perl = TRUE) sel <- x != "/" x[sel] <- gsub("/$", "", x[sel]) sel <- is_windows_path(x) if (any(sel)){ clean_win <- function(.x){ substr(.x, 1, 1) <- toupper(substr(.x, 1 ,1)) .sel <- nchar(.x) == 2 .x[.sel] <- paste0(.x[.sel], "/") .x } x[sel] <- clean_win(x[sel]) } x } # nocov end
% File man/L.o_MCMN.Rd \name{L.o_MCMN} \alias{L.o_MCMN} \title{Returns the mean number of customers of a MultiClass Mixed Network} \description{ Returns the mean number of customers of a MultiClass Mixed Network } \usage{ \method{L}{o_MCMN}(x, \dots) } \arguments{ \item{x}{a object of class o_MCMN} \item{\dots}{aditional arguments} } \details{Returns the mean number of customers of a MultiClass Mixed Network} \references{ [Lazowska84] Edward D. Lazowska, John Zahorjan, G. Scott Graham, and Kenneth C. Sevcik (1984).\cr \emph{Quantitative System Performance: Computer System Analysis Using Queueing Network Models}.\cr Prentice-Hall, Inc., Englewood Cliffs, New Jersey } \seealso{ \code{\link{QueueingModel.i_MCMN}}. } \examples{ ## See example in pag 147 in reference [Lazowska84] for more details. classes <- 4 vLambda <- c(1, 1/2) vNumber <- c(1, 1) vThink <- c(0, 0) nodes <- 2 vType <- c("Q", "Q") vVisit <- matrix(data=1, nrow=4, ncol=2) vService <- matrix(data=c(1/4, 1/2, 1/2, 1, 1/6, 1, 1, 4/3), nrow=4, ncol=2) i_mcmn1 <- NewInput.MCMN(classes, vLambda, vNumber, vThink, nodes, vType, vVisit, vService) # Build the model o_mcmn1 <- QueueingModel(i_mcmn1) L(o_mcmn1) } \keyword{MultiClass Mixed Network}
/man/L.o_MCMN.Rd
no_license
cran/queueing
R
false
false
1,240
rd
% File man/L.o_MCMN.Rd \name{L.o_MCMN} \alias{L.o_MCMN} \title{Returns the mean number of customers of a MultiClass Mixed Network} \description{ Returns the mean number of customers of a MultiClass Mixed Network } \usage{ \method{L}{o_MCMN}(x, \dots) } \arguments{ \item{x}{a object of class o_MCMN} \item{\dots}{aditional arguments} } \details{Returns the mean number of customers of a MultiClass Mixed Network} \references{ [Lazowska84] Edward D. Lazowska, John Zahorjan, G. Scott Graham, and Kenneth C. Sevcik (1984).\cr \emph{Quantitative System Performance: Computer System Analysis Using Queueing Network Models}.\cr Prentice-Hall, Inc., Englewood Cliffs, New Jersey } \seealso{ \code{\link{QueueingModel.i_MCMN}}. } \examples{ ## See example in pag 147 in reference [Lazowska84] for more details. classes <- 4 vLambda <- c(1, 1/2) vNumber <- c(1, 1) vThink <- c(0, 0) nodes <- 2 vType <- c("Q", "Q") vVisit <- matrix(data=1, nrow=4, ncol=2) vService <- matrix(data=c(1/4, 1/2, 1/2, 1, 1/6, 1, 1, 4/3), nrow=4, ncol=2) i_mcmn1 <- NewInput.MCMN(classes, vLambda, vNumber, vThink, nodes, vType, vVisit, vService) # Build the model o_mcmn1 <- QueueingModel(i_mcmn1) L(o_mcmn1) } \keyword{MultiClass Mixed Network}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/set_key_token.R \name{set_key_token} \alias{set_key_token} \title{Set Key-Token} \usage{ set_key_token(key_file = NULL, token_file = NULL, path = getwd()) } \arguments{ \item{key_file}{The name of the file which contains the Trello key} \item{token_file}{The name of the file which contains the user token} \item{path}{The path to the location of the user token file} } \description{ This function allows you to set the user key and token for API calls. You can either link to text files which contain the key and token, or you can leave the arguments blank and manually input the key and token when prompted } \keyword{repello}
/man/set_key_token.Rd
no_license
thomasgstewart/repello
R
false
true
710
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/set_key_token.R \name{set_key_token} \alias{set_key_token} \title{Set Key-Token} \usage{ set_key_token(key_file = NULL, token_file = NULL, path = getwd()) } \arguments{ \item{key_file}{The name of the file which contains the Trello key} \item{token_file}{The name of the file which contains the user token} \item{path}{The path to the location of the user token file} } \description{ This function allows you to set the user key and token for API calls. You can either link to text files which contain the key and token, or you can leave the arguments blank and manually input the key and token when prompted } \keyword{repello}
install.packages("statnet") library(statnet) install.packages("coda") library(coda) library(igraph) data <- read.csv("Social Network Analysis.csv", header=TRUE, sep = ",", stringsAsFactors = F) airport_code <- read.csv("Airport_Code.csv", header=TRUE, sep = ",", stringsAsFactors = F) data$codeshare <- NULL data$source.airport.id <- NULL data$destination.airport.id <- NULL data$airline.ID <- NULL data$equipment <- NULL #Remove the few observations where "stop" column is equal to 1 (we want only direct flights) data <- subset(data, data$stops<1) data$stops <- NULL #add columns with city and country per airport code set.seed(1) my_network2 <- graph.data.frame(data[,2:3], directed = TRUE) # don't run it takes too much time with entire data # plot(my_network2) #subset my_network3 <- graph.data.frame(data[1:50,2:3], directed = TRUE) #my_network3 <- graph_from_data_frame(data, directed = TRUE, vertices = NULL) 2nd method summary(my_network3) plot(my_network3, main="Airport routes", cex.main=0.8) # Plot network #flomodel_1 <- ergm(graph1~edges) #model1 <- ergm(flomarriage~edges) ############### library(readxl) library(statnet) library(coda) library(igraph) library(dplyr) library(intergraph) library(network) flights <- read.csv("Social Network Analysis.csv", header=TRUE, sep = ",", stringsAsFactors = F) airport_codes <- read_excel("airport-codes.xls", sheet = "Airport_Codes") flights$codeshare <- NULL flights$source.airport.id <- NULL flights$destination.airport.id <- NULL flights$airline.ID <- NULL flights$equipment <- NULL flights$stops <- NULL flights <- merge(flights, airport_codes, by.x = "source.airport", by.y = "Airport Code", all.x = T) names(flights) <- c("source.airport","airline","destination.apirport","Source_Country") flights <- merge(flights, airport_codes, by.x = "destination.apirport", by.y = "Airport Code", all.x = T) names(flights) <- c("destination.airport","source.airport","airline","Source_Country", "Destination_Country") flights$destination.airport <- NULL flights$source.airport <- NULL flights <- flights[((!is.na(flights$Source_Country)) & (!is.na(flights$Destination_Country))),] any(is.na(flights)) # Remove Domestic flights flights <- flights[flights$Source_Country != flights$Destination_Country,] # flights <- flights %>% # group_by(Source_Country, Destination_Country) %>% # summarize(count = n()) %>% # arrange(Source_Country, Destination_Country) set.seed(1) flights <- unique(flights) my_network2 <- graph.data.frame(flights[,2:3], directed = TRUE) plot(my_network2) my_network <- asNetwork(my_network2) my_ergm_01 <- ergm(my_network~edges) summary(my_ergm_01) my_ergm_01 <- ergm(my_network~ edges + triangle) summary(my_ergm_01)
/Siddhant/ERGM.R
no_license
lucasbonnett/SNA
R
false
false
2,736
r
install.packages("statnet") library(statnet) install.packages("coda") library(coda) library(igraph) data <- read.csv("Social Network Analysis.csv", header=TRUE, sep = ",", stringsAsFactors = F) airport_code <- read.csv("Airport_Code.csv", header=TRUE, sep = ",", stringsAsFactors = F) data$codeshare <- NULL data$source.airport.id <- NULL data$destination.airport.id <- NULL data$airline.ID <- NULL data$equipment <- NULL #Remove the few observations where "stop" column is equal to 1 (we want only direct flights) data <- subset(data, data$stops<1) data$stops <- NULL #add columns with city and country per airport code set.seed(1) my_network2 <- graph.data.frame(data[,2:3], directed = TRUE) # don't run it takes too much time with entire data # plot(my_network2) #subset my_network3 <- graph.data.frame(data[1:50,2:3], directed = TRUE) #my_network3 <- graph_from_data_frame(data, directed = TRUE, vertices = NULL) 2nd method summary(my_network3) plot(my_network3, main="Airport routes", cex.main=0.8) # Plot network #flomodel_1 <- ergm(graph1~edges) #model1 <- ergm(flomarriage~edges) ############### library(readxl) library(statnet) library(coda) library(igraph) library(dplyr) library(intergraph) library(network) flights <- read.csv("Social Network Analysis.csv", header=TRUE, sep = ",", stringsAsFactors = F) airport_codes <- read_excel("airport-codes.xls", sheet = "Airport_Codes") flights$codeshare <- NULL flights$source.airport.id <- NULL flights$destination.airport.id <- NULL flights$airline.ID <- NULL flights$equipment <- NULL flights$stops <- NULL flights <- merge(flights, airport_codes, by.x = "source.airport", by.y = "Airport Code", all.x = T) names(flights) <- c("source.airport","airline","destination.apirport","Source_Country") flights <- merge(flights, airport_codes, by.x = "destination.apirport", by.y = "Airport Code", all.x = T) names(flights) <- c("destination.airport","source.airport","airline","Source_Country", "Destination_Country") flights$destination.airport <- NULL flights$source.airport <- NULL flights <- flights[((!is.na(flights$Source_Country)) & (!is.na(flights$Destination_Country))),] any(is.na(flights)) # Remove Domestic flights flights <- flights[flights$Source_Country != flights$Destination_Country,] # flights <- flights %>% # group_by(Source_Country, Destination_Country) %>% # summarize(count = n()) %>% # arrange(Source_Country, Destination_Country) set.seed(1) flights <- unique(flights) my_network2 <- graph.data.frame(flights[,2:3], directed = TRUE) plot(my_network2) my_network <- asNetwork(my_network2) my_ergm_01 <- ergm(my_network~edges) summary(my_ergm_01) my_ergm_01 <- ergm(my_network~ edges + triangle) summary(my_ergm_01)
suppressMessages(library (shiny)) #library(markdown) #library(ggplot2) suppressMessages(library(DT)) suppressMessages(library(limma)) suppressMessages(library(shinydashboard)) suppressMessages(library(edgeR)) suppressMessages(library(RColorBrewer)) suppressMessages(library(tidyverse)) suppressMessages(library(fgsea)) suppressMessages(library(data.table)) #library(Glimma) suppressMessages(library(org.Hs.eg.db)) suppressMessages(library(msigdbr)) #library(shinyBS) suppressMessages(library(topconfects)) suppressMessages(library(enrichR)) suppressMessages(library(viper)) suppressMessages(library(dorothea)) suppressMessages(library("tmod")) suppressMessages(library(GEOquery)) suppressMessages(library(pheatmap)) suppressMessages(library(umap)) suppressMessages(library("maptools")) suppressMessages(library(dplyr)) suppressMessages(library(recount)) suppressMessages(library(SummarizedExperiment)) #suppressMessages(library("tximport")) #suppressMessages(library("scaterlegacy")) #suppressMessages(library("fastqcr")) Hs.GO <- msigdbr(species = "Homo sapiens", category = "C5") Hs.GOBP <- msigdbr(species = "Homo sapiens", category = "C5", subcategory = "BP") Hs.Reactome <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:REACTOME") #Hs.motif <- msigdbr(species = "Homo sapiens", category = "C3") Hs.GOCC.full <- msigdbr(species = "Homo sapiens", category = "C5", subcategory = "CC") # Gene Ontology: Molecular Function (Full) Hs.GOMF.full <- msigdbr(species = "Homo sapiens", category = "C5", subcategory = "MF") # Human Phenotype Ontology Hs.HPO <- msigdbr(species = "Homo sapiens", category = "C5", subcategory = "HPO") Hs.Biocarta <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:BIOCARTA") # KEGG Hs.KEGG <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:KEGG") # Pathway Interaction Database Hs.PID <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:PID") # WikiPathways Hs.WikiPathways <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:WIKIPATHWAYS") # MSigDB Chemical and Genetic Perturbations Hs.CGP <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CGP") # MSigDB Computational Genesets Hs.Comp <- msigdbr(species = "Homo sapiens", category = "C4") # MSigDB Oncogenic Signature Genesets Hs.Oncogenic <- msigdbr(species = "Homo sapiens", category = "C6") # MSigDB Immunologic signature Genesets Hs.Immune <- msigdbr(species = "Homo sapiens", category = "C7") # MSigDB Cell Types Hs.CellType <- msigdbr(species = "Homo sapiens", category = "C8") Hs.Hallmark <- msigdbr(species = "Homo sapiens", category = "H") #Hs.GOBP.Entrez <- split(x = as.numeric(Hs.GOBP$entrez_gene), f = Hs.GOBP$gs_name) Hs.GOBP.Symbol <- split(x = Hs.GOBP$gene_symbol, f = Hs.GOBP$gs_name) #Hs.Hallmark.Entrez <- Hs.Hallmark %>% split(x = .$entrez_gene, f = .$gs_name) Hs.Hallmark.Symbol <- Hs.Hallmark %>% split(x = .$gene_symbol, f = .$gs_name) #Hs.Reactome.Entrez <- Hs.Reactome %>% split(x = .$entrez_gene, f = .$gs_name) Hs.Reactome.Symbol <- Hs.Reactome %>% split(x = .$gene_symbol, f = .$gs_name) Hs.GO.Symbol <- Hs.GO %>% split(x = .$gene_symbol, f = .$gs_name) Hs.CellType.Symbol <- Hs.CellType %>% split(x = .$gene_symbol, f = .$gs_name) Hs.Immune.Symbol <- Hs.Immune %>% split(x = .$gene_symbol, f = .$gs_name) Hs.Oncogenic.Symbol <- Hs.Oncogenic %>% split(x = .$gene_symbol, f = .$gs_name) Hs.Comp.Symbol <- Hs.Comp %>% split(x = .$gene_symbol, f = .$gs_name) Hs.CGP.Symbol <- Hs.CGP %>% split(x = .$gene_symbol, f = .$gs_name) Hs.WikiPathways.Symbol <- Hs.WikiPathways %>% split(x = .$gene_symbol, f = .$gs_name) Hs.PID.Symbol <- Hs.PID %>% split(x = .$gene_symbol, f = .$gs_name) Hs.KEGG.Symbol <- Hs.KEGG %>% split(x = .$gene_symbol, f = .$gs_name) Hs.Biocarta.Symbol <- Hs.Biocarta %>% split(x = .$gene_symbol, f = .$gs_name) Hs.HPO.Symbol <- Hs.HPO %>% split(x = .$gene_symbol, f = .$gs_name) Hs.GOMF.Symbol <- Hs.GOMF.full %>% split(x = .$gene_symbol, f = .$gs_name) Hs.GOCC.full.Symbol <- Hs.GOCC.full %>% split(x = .$gene_symbol, f = .$gs_name) # For Enrichr Enrichrdbs <- listEnrichrDbs() dbs <- c("ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X", "ENCODE_TF_ChIP-seq_2015", "ChEA_2016", "TRANSFAC_and_JASPAR_PWMs", "TargetScan_microRNA", "ARCHS4_TFs_Coexp", "TRRUST_Transcription_Factors_2019", "TargetScan_microRNA_2017", "miRTarBase_2017") dbs_ontology <- c("GO_Biological_Process_2018", "GO_Biological_Process_2017b", "GO_Molecular_Function_2018", "GO_Molecular_Function_2017b", "GO_Cellular_Component_2018", "GO_Cellular_Component_2017b", "MSigDB_Hallmark_2020", "Reactome_2016", "BioCarta_2016", "KEGG_2019_Human", "Panther_2016", "WikiPathways_2019_Human", "BioPlanet_2019") # For DoRothEA TF analysis # data(dorothea_hs, package = "dorothea") #Subset the regulon for stringency regulon_a = dorothea_hs %>% dplyr::filter(confidence %in% c("A")) regulon_b = dorothea_hs %>% dplyr::filter(confidence %in% c("A", "B")) regulon_c = dorothea_hs %>% dplyr::filter(confidence %in% c("A", "B", "C")) regulon_d = dorothea_hs %>% dplyr::filter(confidence %in% c("A", "B", "C", "D")) regulon_e = dorothea_hs %>% dplyr::filter(confidence %in% c("A", "B", "C", "D", "E")) ###fgsea TF # ENCODE-ChEA Consensus Hs.ECC <- qusage::read.gmt(file.path("./data", "ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X.gmt")) # ChEA 2016 (Enrichr) Hs.ChEA2016 <- qusage::read.gmt(file.path("./data", "ChEA_2016.gmt")) # ENCODE 2015 (Enrichr) Hs.ENCODE <- qusage::read.gmt(file.path("./data", "ENCODE_TF_ChIP-seq_2015.gmt")) # ReMap ChIP-seq 2018 Human Hs.ReMap <- qusage::read.gmt(file.path("./data", "ReMap_ChIP-seq.gmt")) # TRRUST 2019 Human Hs.TRRUST <- qusage::read.gmt(file.path("./data", "TRRUST_Transcription_Factors_2019.gmt")) # ChEA3 Literature ChIP-Seq Hs.Literature <- qusage::read.gmt(file.path("./data", "Literature_ChIP-seq.gmt")) # TRANSFAC/JASPAR PWMs (Enrichr) Hs.TRANSFACJASPAR <- qusage::read.gmt(file.path("./data", "TRANSFAC_and_JASPAR_PWMs.gmt")) # Gene Transcription Regulation Database (GTRD v20.06) Hs.GTRD <- msigdbr(species = "Homo sapiens", category = "C3", subcategory = "TFT:GTRD") Hs.GTRD.sel = split(x = Hs.GTRD$gene_symbol, f = Hs.GTRD$gs_name) # MSigDB Legacy TF targets Hs.TFLegacy <- msigdbr(species = "Homo sapiens", category = "C3", subcategory = "TFT:TFT_Legacy") Hs.TFLegacy.sel = split(x = Hs.TFLegacy$gene_symbol, f = Hs.TFLegacy$gs_name) # miRTarBase 2017 Hs.miR <- qusage::read.gmt(file.path("./data", "miRTarBase_2017.gmt")) # miRNA TargetScan 2017 Hs.miRTargetScan <- qusage::read.gmt(file.path("./data", "TargetScan_microRNA_2017.gmt")) # miRDB v6.0 Hs.miRNA <- msigdbr(species = "Homo sapiens", category = "C3", subcategory = "MIR:MIRDB") Hs.miRNA.sel = split(x = Hs.miRNA$gene_symbol, f = Hs.miRNA$gs_name) ##cerno msig <- tmodImportMSigDB(file.path("./data", "msigdb_v7.2.xml")) #Select the Hallmark gene sets (you can selectwhichever geneset db you would like) Hallmark.sel <- msig$MODULES$Category == "H" # Gene Ontology Biological Process (MSigDB Filtered) GOBP.sel <- msig$MODULES$Subcategory == "C5_GO:BP" # Gene Ontology Biological Process (Full) GOBPfull.sel <- msig$MODULES$Subcategory == "GO:BP" #reactome Reactome.sel <- (msig$MODULES$Subcategory == "CP:REACTOME") | (msig$MODULES$Subcategory == "C2_CP:REACTOME") # Biocarta BioCARTA.sel <- msig$MODULES$Subcategory == "C2_CP:BIOCARTA" # Gene Ontology Molecular Function (MSigDB Filtered) GOMF.sel <- msig$MODULES$Subcategory == "C5_GO:MF" # Gene Ontology Molecular Function (Full) GOMFfull.sel <- msig$MODULES$Subcategory == "GO:MF" # Gene Ontology Cellular Compartment (MSigDB Filtered) GOCC.sel <- msig$MODULES$Subcategory == "C5_GO:CC" # Gene Ontology Cellular Compartment (Full) GOCCfull.sel <- msig$MODULES$Subcategory == "C5_GO:CC" # Human Phenotype Ontology HPO.sel <- msig$MODULES$Subcategory == "HPO" # KEGG Kegg.sel <- msig$MODULES$Subcategory == "CP:KEGG" # Pathway Interaction Database PID.sel <- msig$MODULES$Subcategory == "CP:PID" #Wikipathways Wiki.sel <- msig$MODULES$Subcategory == "CP:WIKIPATHWAYS" # MSigDB Chemical and Genetic Perturbations CGP.sel <- msig$MODULES$Subcategory == "C2_CGP" # MSigDB Computational Genesets CM.sel <- msig$MODULES$Subcategory == "CM" # MSigDB Oncogenic Signature Genesets Onc.sel <- msig$MODULES$Category == "C6" # MSigDB Immunologic signature Genesets Imm.sel <- msig$MODULES$Category == "C7" # MSigDB Cell Types CellType.sel <- msig$MODULES$Category == "C8" data(tmod) ECC.gmt <- qusage::read.gmt(file.path("./data", "ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X.gmt")) ECC.MODULES <- as.data.frame(read.csv(file.path("./data", "ECCTFlist_MODULES.csv"))) ECC.MODULES2GENES <- as.list(ECC.gmt) rownames(ECC.MODULES) <- ECC.MODULES$ID # Make ReMap ChIP-Seq mset for use with tmod ReMap.gmt <- qusage::read.gmt(file.path("./data", "ReMap_ChIP-seq.gmt")) ReMap.MODULES <- as.data.frame(read.csv(file.path("./data", "ReMapTFlist_MODULES.csv"))) ReMap.MODULES2GENES <- as.list(ReMap.gmt) rownames(ReMap.MODULES) <- ReMap.MODULES$ID # TRRUST 2019 Human TRRUST.gmt <- qusage::read.gmt(file.path("./data", "TRRUST_Transcription_Factors_2019.gmt")) TRRUST.MODULES <- as.data.frame(read.csv(file.path("./data", "TRRUST_Transcription_Factors_2019_MODULES.csv"))) TRRUST.MODULES2GENES <- as.list(TRRUST.gmt) rownames(TRRUST.MODULES) <- TRRUST.MODULES$ID # TRANSFAC/JASPAR PWMs (Enrichr) TFJ.gmt <- qusage::read.gmt(file.path("./data", "TRANSFAC_and_JASPAR_PWMs.gmt")) TFJ.MODULES <- as.data.frame(read.csv(file.path("./data", "TRANSFAC_and_JASPAR_PWMs_MODULES.csv"))) TFJ.MODULES2GENES <- as.list(TFJ.gmt) rownames(TFJ.MODULES) <- TFJ.MODULES$ID # Gene Transcription Regulation Database (GTRD v20.06) GTRD.sel <- msig$MODULES$Subcategory == "TFT:GTRD" # miRTarBase 2017 miRTarBase.gmt <- qusage::read.gmt(file.path("./data", "miRTarBase_2017.gmt")) miRTarBase.MODULES <- as.data.frame(read.csv(file.path("./data", "miRTarBase_MODULES.csv"))) miRTarBase.MODULES2GENES <- as.list(miRTarBase.gmt) rownames(miRTarBase.MODULES) <- miRTarBase.MODULES$ID # use msetmiRTarBase # miRDB v6.0 MIRDB.sel <- msig$MODULES$Subcategory == "MIR:MIRDB" # create transcript to gene (t2g) # martGRCh38.99 <- biomaRt::useMart(biomart = "ENSEMBL_MART_ENSEMBL", # dataset = "hsapiens_gene_ensembl", # host = 'jan2020.archive.ensembl.org', # path="/biomart/martservice") # GRCh38.99t2g<- biomaRt::getBM(attributes = c("ensembl_transcript_id_version", # "ensembl_gene_id", # "external_gene_name"), # mart = martGRCh38.99) # GRCh38.99t2g <- dplyr::rename(GRCh38.99t2g, # TXNAME = ensembl_transcript_id_version, # ENSEMBL = ensembl_gene_id, # Symbol = external_gene_name)
/global.R
permissive
SubramaniamLab/DEGenR
R
false
false
11,548
r
suppressMessages(library (shiny)) #library(markdown) #library(ggplot2) suppressMessages(library(DT)) suppressMessages(library(limma)) suppressMessages(library(shinydashboard)) suppressMessages(library(edgeR)) suppressMessages(library(RColorBrewer)) suppressMessages(library(tidyverse)) suppressMessages(library(fgsea)) suppressMessages(library(data.table)) #library(Glimma) suppressMessages(library(org.Hs.eg.db)) suppressMessages(library(msigdbr)) #library(shinyBS) suppressMessages(library(topconfects)) suppressMessages(library(enrichR)) suppressMessages(library(viper)) suppressMessages(library(dorothea)) suppressMessages(library("tmod")) suppressMessages(library(GEOquery)) suppressMessages(library(pheatmap)) suppressMessages(library(umap)) suppressMessages(library("maptools")) suppressMessages(library(dplyr)) suppressMessages(library(recount)) suppressMessages(library(SummarizedExperiment)) #suppressMessages(library("tximport")) #suppressMessages(library("scaterlegacy")) #suppressMessages(library("fastqcr")) Hs.GO <- msigdbr(species = "Homo sapiens", category = "C5") Hs.GOBP <- msigdbr(species = "Homo sapiens", category = "C5", subcategory = "BP") Hs.Reactome <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:REACTOME") #Hs.motif <- msigdbr(species = "Homo sapiens", category = "C3") Hs.GOCC.full <- msigdbr(species = "Homo sapiens", category = "C5", subcategory = "CC") # Gene Ontology: Molecular Function (Full) Hs.GOMF.full <- msigdbr(species = "Homo sapiens", category = "C5", subcategory = "MF") # Human Phenotype Ontology Hs.HPO <- msigdbr(species = "Homo sapiens", category = "C5", subcategory = "HPO") Hs.Biocarta <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:BIOCARTA") # KEGG Hs.KEGG <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:KEGG") # Pathway Interaction Database Hs.PID <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:PID") # WikiPathways Hs.WikiPathways <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CP:WIKIPATHWAYS") # MSigDB Chemical and Genetic Perturbations Hs.CGP <- msigdbr(species = "Homo sapiens", category = "C2", subcategory = "CGP") # MSigDB Computational Genesets Hs.Comp <- msigdbr(species = "Homo sapiens", category = "C4") # MSigDB Oncogenic Signature Genesets Hs.Oncogenic <- msigdbr(species = "Homo sapiens", category = "C6") # MSigDB Immunologic signature Genesets Hs.Immune <- msigdbr(species = "Homo sapiens", category = "C7") # MSigDB Cell Types Hs.CellType <- msigdbr(species = "Homo sapiens", category = "C8") Hs.Hallmark <- msigdbr(species = "Homo sapiens", category = "H") #Hs.GOBP.Entrez <- split(x = as.numeric(Hs.GOBP$entrez_gene), f = Hs.GOBP$gs_name) Hs.GOBP.Symbol <- split(x = Hs.GOBP$gene_symbol, f = Hs.GOBP$gs_name) #Hs.Hallmark.Entrez <- Hs.Hallmark %>% split(x = .$entrez_gene, f = .$gs_name) Hs.Hallmark.Symbol <- Hs.Hallmark %>% split(x = .$gene_symbol, f = .$gs_name) #Hs.Reactome.Entrez <- Hs.Reactome %>% split(x = .$entrez_gene, f = .$gs_name) Hs.Reactome.Symbol <- Hs.Reactome %>% split(x = .$gene_symbol, f = .$gs_name) Hs.GO.Symbol <- Hs.GO %>% split(x = .$gene_symbol, f = .$gs_name) Hs.CellType.Symbol <- Hs.CellType %>% split(x = .$gene_symbol, f = .$gs_name) Hs.Immune.Symbol <- Hs.Immune %>% split(x = .$gene_symbol, f = .$gs_name) Hs.Oncogenic.Symbol <- Hs.Oncogenic %>% split(x = .$gene_symbol, f = .$gs_name) Hs.Comp.Symbol <- Hs.Comp %>% split(x = .$gene_symbol, f = .$gs_name) Hs.CGP.Symbol <- Hs.CGP %>% split(x = .$gene_symbol, f = .$gs_name) Hs.WikiPathways.Symbol <- Hs.WikiPathways %>% split(x = .$gene_symbol, f = .$gs_name) Hs.PID.Symbol <- Hs.PID %>% split(x = .$gene_symbol, f = .$gs_name) Hs.KEGG.Symbol <- Hs.KEGG %>% split(x = .$gene_symbol, f = .$gs_name) Hs.Biocarta.Symbol <- Hs.Biocarta %>% split(x = .$gene_symbol, f = .$gs_name) Hs.HPO.Symbol <- Hs.HPO %>% split(x = .$gene_symbol, f = .$gs_name) Hs.GOMF.Symbol <- Hs.GOMF.full %>% split(x = .$gene_symbol, f = .$gs_name) Hs.GOCC.full.Symbol <- Hs.GOCC.full %>% split(x = .$gene_symbol, f = .$gs_name) # For Enrichr Enrichrdbs <- listEnrichrDbs() dbs <- c("ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X", "ENCODE_TF_ChIP-seq_2015", "ChEA_2016", "TRANSFAC_and_JASPAR_PWMs", "TargetScan_microRNA", "ARCHS4_TFs_Coexp", "TRRUST_Transcription_Factors_2019", "TargetScan_microRNA_2017", "miRTarBase_2017") dbs_ontology <- c("GO_Biological_Process_2018", "GO_Biological_Process_2017b", "GO_Molecular_Function_2018", "GO_Molecular_Function_2017b", "GO_Cellular_Component_2018", "GO_Cellular_Component_2017b", "MSigDB_Hallmark_2020", "Reactome_2016", "BioCarta_2016", "KEGG_2019_Human", "Panther_2016", "WikiPathways_2019_Human", "BioPlanet_2019") # For DoRothEA TF analysis # data(dorothea_hs, package = "dorothea") #Subset the regulon for stringency regulon_a = dorothea_hs %>% dplyr::filter(confidence %in% c("A")) regulon_b = dorothea_hs %>% dplyr::filter(confidence %in% c("A", "B")) regulon_c = dorothea_hs %>% dplyr::filter(confidence %in% c("A", "B", "C")) regulon_d = dorothea_hs %>% dplyr::filter(confidence %in% c("A", "B", "C", "D")) regulon_e = dorothea_hs %>% dplyr::filter(confidence %in% c("A", "B", "C", "D", "E")) ###fgsea TF # ENCODE-ChEA Consensus Hs.ECC <- qusage::read.gmt(file.path("./data", "ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X.gmt")) # ChEA 2016 (Enrichr) Hs.ChEA2016 <- qusage::read.gmt(file.path("./data", "ChEA_2016.gmt")) # ENCODE 2015 (Enrichr) Hs.ENCODE <- qusage::read.gmt(file.path("./data", "ENCODE_TF_ChIP-seq_2015.gmt")) # ReMap ChIP-seq 2018 Human Hs.ReMap <- qusage::read.gmt(file.path("./data", "ReMap_ChIP-seq.gmt")) # TRRUST 2019 Human Hs.TRRUST <- qusage::read.gmt(file.path("./data", "TRRUST_Transcription_Factors_2019.gmt")) # ChEA3 Literature ChIP-Seq Hs.Literature <- qusage::read.gmt(file.path("./data", "Literature_ChIP-seq.gmt")) # TRANSFAC/JASPAR PWMs (Enrichr) Hs.TRANSFACJASPAR <- qusage::read.gmt(file.path("./data", "TRANSFAC_and_JASPAR_PWMs.gmt")) # Gene Transcription Regulation Database (GTRD v20.06) Hs.GTRD <- msigdbr(species = "Homo sapiens", category = "C3", subcategory = "TFT:GTRD") Hs.GTRD.sel = split(x = Hs.GTRD$gene_symbol, f = Hs.GTRD$gs_name) # MSigDB Legacy TF targets Hs.TFLegacy <- msigdbr(species = "Homo sapiens", category = "C3", subcategory = "TFT:TFT_Legacy") Hs.TFLegacy.sel = split(x = Hs.TFLegacy$gene_symbol, f = Hs.TFLegacy$gs_name) # miRTarBase 2017 Hs.miR <- qusage::read.gmt(file.path("./data", "miRTarBase_2017.gmt")) # miRNA TargetScan 2017 Hs.miRTargetScan <- qusage::read.gmt(file.path("./data", "TargetScan_microRNA_2017.gmt")) # miRDB v6.0 Hs.miRNA <- msigdbr(species = "Homo sapiens", category = "C3", subcategory = "MIR:MIRDB") Hs.miRNA.sel = split(x = Hs.miRNA$gene_symbol, f = Hs.miRNA$gs_name) ##cerno msig <- tmodImportMSigDB(file.path("./data", "msigdb_v7.2.xml")) #Select the Hallmark gene sets (you can selectwhichever geneset db you would like) Hallmark.sel <- msig$MODULES$Category == "H" # Gene Ontology Biological Process (MSigDB Filtered) GOBP.sel <- msig$MODULES$Subcategory == "C5_GO:BP" # Gene Ontology Biological Process (Full) GOBPfull.sel <- msig$MODULES$Subcategory == "GO:BP" #reactome Reactome.sel <- (msig$MODULES$Subcategory == "CP:REACTOME") | (msig$MODULES$Subcategory == "C2_CP:REACTOME") # Biocarta BioCARTA.sel <- msig$MODULES$Subcategory == "C2_CP:BIOCARTA" # Gene Ontology Molecular Function (MSigDB Filtered) GOMF.sel <- msig$MODULES$Subcategory == "C5_GO:MF" # Gene Ontology Molecular Function (Full) GOMFfull.sel <- msig$MODULES$Subcategory == "GO:MF" # Gene Ontology Cellular Compartment (MSigDB Filtered) GOCC.sel <- msig$MODULES$Subcategory == "C5_GO:CC" # Gene Ontology Cellular Compartment (Full) GOCCfull.sel <- msig$MODULES$Subcategory == "C5_GO:CC" # Human Phenotype Ontology HPO.sel <- msig$MODULES$Subcategory == "HPO" # KEGG Kegg.sel <- msig$MODULES$Subcategory == "CP:KEGG" # Pathway Interaction Database PID.sel <- msig$MODULES$Subcategory == "CP:PID" #Wikipathways Wiki.sel <- msig$MODULES$Subcategory == "CP:WIKIPATHWAYS" # MSigDB Chemical and Genetic Perturbations CGP.sel <- msig$MODULES$Subcategory == "C2_CGP" # MSigDB Computational Genesets CM.sel <- msig$MODULES$Subcategory == "CM" # MSigDB Oncogenic Signature Genesets Onc.sel <- msig$MODULES$Category == "C6" # MSigDB Immunologic signature Genesets Imm.sel <- msig$MODULES$Category == "C7" # MSigDB Cell Types CellType.sel <- msig$MODULES$Category == "C8" data(tmod) ECC.gmt <- qusage::read.gmt(file.path("./data", "ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X.gmt")) ECC.MODULES <- as.data.frame(read.csv(file.path("./data", "ECCTFlist_MODULES.csv"))) ECC.MODULES2GENES <- as.list(ECC.gmt) rownames(ECC.MODULES) <- ECC.MODULES$ID # Make ReMap ChIP-Seq mset for use with tmod ReMap.gmt <- qusage::read.gmt(file.path("./data", "ReMap_ChIP-seq.gmt")) ReMap.MODULES <- as.data.frame(read.csv(file.path("./data", "ReMapTFlist_MODULES.csv"))) ReMap.MODULES2GENES <- as.list(ReMap.gmt) rownames(ReMap.MODULES) <- ReMap.MODULES$ID # TRRUST 2019 Human TRRUST.gmt <- qusage::read.gmt(file.path("./data", "TRRUST_Transcription_Factors_2019.gmt")) TRRUST.MODULES <- as.data.frame(read.csv(file.path("./data", "TRRUST_Transcription_Factors_2019_MODULES.csv"))) TRRUST.MODULES2GENES <- as.list(TRRUST.gmt) rownames(TRRUST.MODULES) <- TRRUST.MODULES$ID # TRANSFAC/JASPAR PWMs (Enrichr) TFJ.gmt <- qusage::read.gmt(file.path("./data", "TRANSFAC_and_JASPAR_PWMs.gmt")) TFJ.MODULES <- as.data.frame(read.csv(file.path("./data", "TRANSFAC_and_JASPAR_PWMs_MODULES.csv"))) TFJ.MODULES2GENES <- as.list(TFJ.gmt) rownames(TFJ.MODULES) <- TFJ.MODULES$ID # Gene Transcription Regulation Database (GTRD v20.06) GTRD.sel <- msig$MODULES$Subcategory == "TFT:GTRD" # miRTarBase 2017 miRTarBase.gmt <- qusage::read.gmt(file.path("./data", "miRTarBase_2017.gmt")) miRTarBase.MODULES <- as.data.frame(read.csv(file.path("./data", "miRTarBase_MODULES.csv"))) miRTarBase.MODULES2GENES <- as.list(miRTarBase.gmt) rownames(miRTarBase.MODULES) <- miRTarBase.MODULES$ID # use msetmiRTarBase # miRDB v6.0 MIRDB.sel <- msig$MODULES$Subcategory == "MIR:MIRDB" # create transcript to gene (t2g) # martGRCh38.99 <- biomaRt::useMart(biomart = "ENSEMBL_MART_ENSEMBL", # dataset = "hsapiens_gene_ensembl", # host = 'jan2020.archive.ensembl.org', # path="/biomart/martservice") # GRCh38.99t2g<- biomaRt::getBM(attributes = c("ensembl_transcript_id_version", # "ensembl_gene_id", # "external_gene_name"), # mart = martGRCh38.99) # GRCh38.99t2g <- dplyr::rename(GRCh38.99t2g, # TXNAME = ensembl_transcript_id_version, # ENSEMBL = ensembl_gene_id, # Symbol = external_gene_name)
# load packages library(tidyverse) library(broom) # load data frame df <- read_csv("data/ht.csv") # fit model # tidy the fit with broom
/lm-with-broom.R
no_license
pos5737/the-regression-line
R
false
false
140
r
# load packages library(tidyverse) library(broom) # load data frame df <- read_csv("data/ht.csv") # fit model # tidy the fit with broom
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emoticons.R \name{FindUnicodeEmojisSeq} \alias{FindUnicodeEmojisSeq} \title{Find Unicode emoji sequences} \usage{ FindUnicodeEmojisSeq(text, include.text = TRUE) } \arguments{ \item{text}{Text to find emoji sequences in.} \item{include.text}{If TRUE, includes the original text in the table.} } \value{ A data.table object with emoji sequences extracted from the text. } \description{ Find Unicode emoji sequences }
/man/FindUnicodeEmojisSeq.Rd
no_license
M3SOulu/EmoticonFindeR
R
false
true
495
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emoticons.R \name{FindUnicodeEmojisSeq} \alias{FindUnicodeEmojisSeq} \title{Find Unicode emoji sequences} \usage{ FindUnicodeEmojisSeq(text, include.text = TRUE) } \arguments{ \item{text}{Text to find emoji sequences in.} \item{include.text}{If TRUE, includes the original text in the table.} } \value{ A data.table object with emoji sequences extracted from the text. } \description{ Find Unicode emoji sequences }
source("global.R") SETUP_COLORS <- c(ETC = MAIN_COLOR, IPWE = DARK_SECONDARY_COLOR, `limited IPWE` = DARK_SECONDARY_COLOR, FBTE = THIRD_COLOR, TE = SECONDARY_COLOR) labelSetups <- function(setup_comparison, type = c("limit", "method")) { setup_comparison[ batch_size == dplyr::nth(unique(batch_size), -2) & setup == "limited IPWE", label := scales::percent(limit, accuracy = limit * 100), .(sd, setup_detailed) ] } createConvexHullData <- function(setup_comparison, chosen_setup, n = 10000) { rbind( setup_comparison[setup == chosen_setup], data.table( mse = c(setup_comparison[, min(mse)], 9.9, 10), welfare = c(n / 2, setup_comparison[, max(welfare)], n / 2), setup = chosen_setup, bias = 0 ), fill = TRUE ) %>% .[, .SD[chull(mse, welfare)]] %>% .[mse <= 10] %>% .[order(mse)] } plotWelfareVsMSE <- function(setups, n = 10000, colors = SETUP_COLORS, mse_limits, legend_position) { ggplot(mapping = aes(welfare, mse, color = setup)) + geom_path( data = setups[setup != "limited IPWE"], aes(alpha = alpha), size = 1 ) + geom_polygon( data = createConvexHullData(setups, "limited IPWE", n), fill = SETUP_COLORS["IPWE"], color = NA, alpha = 0.3 ) + geom_path( data = createConvexHullData(setups, "limited IPWE", n), linetype = "dashed", alpha = 1 ) + scale_color_manual(values = colors, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = mse_limits, xlim = c(n / 2, n)) + labs(x = "Welfare", y = "MSE") } # Illustration ---------------------------------------------------------------- # batch_size = 1000 putTogetherSetupsWithETC(10000, 10)[(batch_size == 1000 & limit == 0) | (batch_size == 10000 & setup == "ETC")] %>% labelSetups() %>% .[is.na(label), label := setup] %>% .[, method := ifelse(label %in% c("RCT", "ETC"), "traditional", "bandit")] %>% ggplot(aes(welfare, mse, color = method)) + geom_point(aes(shape = (bias < 0.02), alpha = alpha), size = 2) + geom_label_repel(aes(label = label), segment.size = 0.2, point.padding = 0.5, show.legend = FALSE) + # annotate("rect", xmin = 5000, xmax = 7800, ymin = 2.6, ymax = 3.7, fill = "white") + annotate("label", x = 4900, y = 2.8, label = " Biased estimates are hollow", hjust = 0, size = 8 / .pt, label.size = 0) + scale_shape_manual(values = c(21, 16), guide = FALSE) + scale_color_manual(values = TWO_COLOR_SCHEME) + scale_y_reverse() + coord_cartesian(ylim = c(3.7, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE") + theme( legend.title = element_blank(), legend.position = c(0.18, 0.14), legend.text = element_text(size = 8), legend.background = element_rect(fill = "white", size = 0) ) saveChart("illustration-welfare-vs-te-limited") # TS only setups <- putTogetherSetupsWithETC(10000, 10)[limit == 0 & setup == "TE"] ggplot(setups, aes(welfare, mse)) + geom_blank() + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE") saveChart("illustration-welfare-vs-te-empty") ggplot(setups, aes(welfare, mse, color = setup, alpha = alpha)) + geom_path(aes(group = setup_detailed), size = 1) + geom_point(aes(shape = (bias < 0.02), size = batch_size)) + annotate( "label", x = 5100, y = 1.4, label = "Biased estimates are hollow \nPoint size is proportional to batch size", hjust = 0, size = 8 / .pt, label.size = 0 ) + scale_size_continuous(breaks = c(10, 100, 1000), range = c(0.5, 5), guide = FALSE) + scale_shape_manual(values = c(21, 16), guide = FALSE) + scale_color_manual(values = SETUP_COLORS, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE") saveChart("illustration-welfare-vs-te-batch-size-te-only") # TS + ETC + FBTE setups <- putTogetherSetupsWithETC(10000, 10)[(limit == 0 & setup != "IPWE") | (batch_size == 10000 & setup == "TE")] ggplot(setups, aes(welfare, mse, color = setup, alpha = alpha)) + geom_path(aes(group = setup_detailed), size = 1) + geom_point(aes(shape = (bias < 0.02), size = batch_size)) + annotate( "label", x = 5100, y = 1.4, label = "Biased estimates are hollow \nPoint size is proportional to batch size", hjust = 0, size = 8 / .pt, label.size = 0 ) + annotate( "label", x = 7500, y = 1, label = "ETC", color = SETUP_COLORS["ETC"] ) + annotate( "label", x = 9500, y = 1, label = "TS-FB", color = SETUP_COLORS["FBTE"] ) + annotate( "label", x = 9500, y = 0, label = "TS", color = SETUP_COLORS["TE"] ) + scale_size_continuous(breaks = c(10, 100, 1000), range = c(0.5, 5), guide = FALSE) + scale_shape_manual(values = c(21, 16), guide = FALSE) + scale_color_manual(values = SETUP_COLORS, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE (reversed)") saveChart("illustration-welfare-vs-te-batch-size") limited_setups <- putTogetherSetupsWithETC(10000, 10)[setup == "limited IPWE"] %>% labelSetups() ggplot(limited_setups, aes(welfare, mse, color = setup, alpha = alpha)) + geom_point(aes(shape = (bias < 0.02), size = batch_size)) + geom_path(aes(group = setup_detailed), size = 1) + geom_polygon( data = createConvexHullData(limited_setups, "limited IPWE", n = 10000), fill = SETUP_COLORS["IPWE"], color = NA, alpha = 0.3 ) + geom_path( data = createConvexHullData(limited_setups, "limited IPWE", n = 10000), linetype = "dashed", alpha = 1 ) + geom_label_repel( aes(label = label), nudge_x = -50, segment.size = 0.2, box.padding = 0.1, point.padding = 0.5, show.legend = FALSE, na.rm = TRUE ) + annotate( "text", x = 5100, y = 1.3, label = "Biased estimates are hollow \nPoint size is proportional to batch size \nThe shaded area shows the set of possible choices", hjust = 0, size = 8 / .pt ) + scale_size_continuous(breaks = c(10, 100, 1000), range = c(0.5, 5), guide = FALSE) + scale_shape_manual(values = c(`FALSE` = 1, `TRUE` = 16), guide = FALSE) + scale_color_manual(values = SETUP_COLORS, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE (reversed)") saveChart("illustration-welfare-vs-te-batch-size-limited") all_setups <- putTogetherSetupsWithETC(10000, 10)[!(setup %in% c("limited TE", "IPWE"))] plotWelfareVsMSE(all_setups, mse_limits = c(1.5, 0)) + annotate( "label", x = 7500, y = 1, label = "ETC", color = SETUP_COLORS["ETC"] ) + annotate( "label", x = 9500, y = 1, label = "TS-FB", color = SETUP_COLORS["FBTE"] ) + annotate( "label", x = 9500, y = 0, label = "TS", color = SETUP_COLORS["TE"] ) + annotate( "text", x = 7000, y = 0.25, label = "LTS-IPW", color = SETUP_COLORS["IPWE"] ) + annotate( "segment", x = 9000, xend = 9000, y = 1.1, yend = 1.4, arrow = arrow(length = unit(0.1,"cm")) ) + annotate( "text", x = 8500, y = 1.25, label = "Smaller \nbatches", size = 8 / .pt, hjust = 0.5 ) saveChart("illustration-welfare-vs-te-batch-size-full") # link by batch size # frontier: ~ mainly best batch size and play with limit all_setups %>% .[setup == "limited IPWE", setup_detailed := paste("limited IPWE", batch_size)] %>% .[setup == "limited IPWE", max_mse := max(mse), by = batch_size] %>% .[setup == "limited IPWE" & mse == max_mse, label := batch_size] %>% ggplot(aes(welfare, mse, color = setup, alpha = alpha)) + geom_point(aes(shape = (bias < 0.02), size = batch_size)) + geom_label_repel(aes(label = label), segment.size = 0.2, box.padding = 0.1, point.padding = 0.5, show.legend = FALSE, na.rm = TRUE) + geom_path(aes(group = setup_detailed), size = 1) + scale_size_continuous(breaks = c(10, 100, 1000), range = c(0.5, 3), guide = FALSE) + scale_shape_manual(values = c(`FALSE` = 1, `TRUE` = 16), guide = FALSE) + scale_color_manual(values = SETUP_COLORS, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(3.7, 0), xlim = c(5000, 10000)) + labs(y = "MSE") n <- 10000 putTogetherSetupsWithETC(n, 30)[setup == "limited IPWE"] %>% .[, setup_detailed := paste("limited IPWE", batch_size)] %>% .[, max_mse := max(mse), by = batch_size] %>% .[mse == max_mse & batch_size %in% c(10, 100, 1000, 5000), label := batch_size] %>% .[order(mse)] %>% ggplot(aes(welfare, mse, color = factor(batch_size), alpha = alpha)) + geom_path(aes(group = setup_detailed)) + # geom_label_repel( # aes(label = label), # segment.size = 0.2, box.padding = 0.2, show.legend = FALSE, na.rm = TRUE) + scale_color_manual(values = generateGradientColors(BATCH_SIZES), guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(n / 2, n)) + labs(y = "MSE") # other n --------------------------------------------------------------------- NS <- c(2000, 10000, 20000, 40000) walk(NS, ~{ all_setups <- putTogetherSetupsWithETC(.x, 10)[!(setup %in% c("limited TE", "IPWE"))] plotWelfareVsMSE(all_setups, n = .x, mse_limits = c(1.5, 0)) + annotate(geom = "label", label=glue("n == {.x}"), x = .x * 0.9, y = 1.25, parse = TRUE) saveChart(glue("welfare-vs-mse-n{.x}")) }) # other sd -------------------------------------------------------------------- walk(SDS, ~{ all_setups <- putTogetherSetupsWithETC(10000, .x)[!(setup %in% c("limited TE", "IPWE"))] plotWelfareVsMSE(all_setups, mse_limits = c(1.5, 0)) + annotate(geom = "label", label=glue("sigma == {.x}"), x = 9000, y = 1.25, parse = TRUE) saveChart(glue("welfare-vs-mse-sd{.x}")) })
/scripts/compare-outcome-and-te.R
no_license
divenyijanos/eliminating-bias-in-bandit
R
false
false
10,078
r
source("global.R") SETUP_COLORS <- c(ETC = MAIN_COLOR, IPWE = DARK_SECONDARY_COLOR, `limited IPWE` = DARK_SECONDARY_COLOR, FBTE = THIRD_COLOR, TE = SECONDARY_COLOR) labelSetups <- function(setup_comparison, type = c("limit", "method")) { setup_comparison[ batch_size == dplyr::nth(unique(batch_size), -2) & setup == "limited IPWE", label := scales::percent(limit, accuracy = limit * 100), .(sd, setup_detailed) ] } createConvexHullData <- function(setup_comparison, chosen_setup, n = 10000) { rbind( setup_comparison[setup == chosen_setup], data.table( mse = c(setup_comparison[, min(mse)], 9.9, 10), welfare = c(n / 2, setup_comparison[, max(welfare)], n / 2), setup = chosen_setup, bias = 0 ), fill = TRUE ) %>% .[, .SD[chull(mse, welfare)]] %>% .[mse <= 10] %>% .[order(mse)] } plotWelfareVsMSE <- function(setups, n = 10000, colors = SETUP_COLORS, mse_limits, legend_position) { ggplot(mapping = aes(welfare, mse, color = setup)) + geom_path( data = setups[setup != "limited IPWE"], aes(alpha = alpha), size = 1 ) + geom_polygon( data = createConvexHullData(setups, "limited IPWE", n), fill = SETUP_COLORS["IPWE"], color = NA, alpha = 0.3 ) + geom_path( data = createConvexHullData(setups, "limited IPWE", n), linetype = "dashed", alpha = 1 ) + scale_color_manual(values = colors, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = mse_limits, xlim = c(n / 2, n)) + labs(x = "Welfare", y = "MSE") } # Illustration ---------------------------------------------------------------- # batch_size = 1000 putTogetherSetupsWithETC(10000, 10)[(batch_size == 1000 & limit == 0) | (batch_size == 10000 & setup == "ETC")] %>% labelSetups() %>% .[is.na(label), label := setup] %>% .[, method := ifelse(label %in% c("RCT", "ETC"), "traditional", "bandit")] %>% ggplot(aes(welfare, mse, color = method)) + geom_point(aes(shape = (bias < 0.02), alpha = alpha), size = 2) + geom_label_repel(aes(label = label), segment.size = 0.2, point.padding = 0.5, show.legend = FALSE) + # annotate("rect", xmin = 5000, xmax = 7800, ymin = 2.6, ymax = 3.7, fill = "white") + annotate("label", x = 4900, y = 2.8, label = " Biased estimates are hollow", hjust = 0, size = 8 / .pt, label.size = 0) + scale_shape_manual(values = c(21, 16), guide = FALSE) + scale_color_manual(values = TWO_COLOR_SCHEME) + scale_y_reverse() + coord_cartesian(ylim = c(3.7, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE") + theme( legend.title = element_blank(), legend.position = c(0.18, 0.14), legend.text = element_text(size = 8), legend.background = element_rect(fill = "white", size = 0) ) saveChart("illustration-welfare-vs-te-limited") # TS only setups <- putTogetherSetupsWithETC(10000, 10)[limit == 0 & setup == "TE"] ggplot(setups, aes(welfare, mse)) + geom_blank() + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE") saveChart("illustration-welfare-vs-te-empty") ggplot(setups, aes(welfare, mse, color = setup, alpha = alpha)) + geom_path(aes(group = setup_detailed), size = 1) + geom_point(aes(shape = (bias < 0.02), size = batch_size)) + annotate( "label", x = 5100, y = 1.4, label = "Biased estimates are hollow \nPoint size is proportional to batch size", hjust = 0, size = 8 / .pt, label.size = 0 ) + scale_size_continuous(breaks = c(10, 100, 1000), range = c(0.5, 5), guide = FALSE) + scale_shape_manual(values = c(21, 16), guide = FALSE) + scale_color_manual(values = SETUP_COLORS, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE") saveChart("illustration-welfare-vs-te-batch-size-te-only") # TS + ETC + FBTE setups <- putTogetherSetupsWithETC(10000, 10)[(limit == 0 & setup != "IPWE") | (batch_size == 10000 & setup == "TE")] ggplot(setups, aes(welfare, mse, color = setup, alpha = alpha)) + geom_path(aes(group = setup_detailed), size = 1) + geom_point(aes(shape = (bias < 0.02), size = batch_size)) + annotate( "label", x = 5100, y = 1.4, label = "Biased estimates are hollow \nPoint size is proportional to batch size", hjust = 0, size = 8 / .pt, label.size = 0 ) + annotate( "label", x = 7500, y = 1, label = "ETC", color = SETUP_COLORS["ETC"] ) + annotate( "label", x = 9500, y = 1, label = "TS-FB", color = SETUP_COLORS["FBTE"] ) + annotate( "label", x = 9500, y = 0, label = "TS", color = SETUP_COLORS["TE"] ) + scale_size_continuous(breaks = c(10, 100, 1000), range = c(0.5, 5), guide = FALSE) + scale_shape_manual(values = c(21, 16), guide = FALSE) + scale_color_manual(values = SETUP_COLORS, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE (reversed)") saveChart("illustration-welfare-vs-te-batch-size") limited_setups <- putTogetherSetupsWithETC(10000, 10)[setup == "limited IPWE"] %>% labelSetups() ggplot(limited_setups, aes(welfare, mse, color = setup, alpha = alpha)) + geom_point(aes(shape = (bias < 0.02), size = batch_size)) + geom_path(aes(group = setup_detailed), size = 1) + geom_polygon( data = createConvexHullData(limited_setups, "limited IPWE", n = 10000), fill = SETUP_COLORS["IPWE"], color = NA, alpha = 0.3 ) + geom_path( data = createConvexHullData(limited_setups, "limited IPWE", n = 10000), linetype = "dashed", alpha = 1 ) + geom_label_repel( aes(label = label), nudge_x = -50, segment.size = 0.2, box.padding = 0.1, point.padding = 0.5, show.legend = FALSE, na.rm = TRUE ) + annotate( "text", x = 5100, y = 1.3, label = "Biased estimates are hollow \nPoint size is proportional to batch size \nThe shaded area shows the set of possible choices", hjust = 0, size = 8 / .pt ) + scale_size_continuous(breaks = c(10, 100, 1000), range = c(0.5, 5), guide = FALSE) + scale_shape_manual(values = c(`FALSE` = 1, `TRUE` = 16), guide = FALSE) + scale_color_manual(values = SETUP_COLORS, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(5000, 10000)) + labs(x = "Welfare", y = "MSE (reversed)") saveChart("illustration-welfare-vs-te-batch-size-limited") all_setups <- putTogetherSetupsWithETC(10000, 10)[!(setup %in% c("limited TE", "IPWE"))] plotWelfareVsMSE(all_setups, mse_limits = c(1.5, 0)) + annotate( "label", x = 7500, y = 1, label = "ETC", color = SETUP_COLORS["ETC"] ) + annotate( "label", x = 9500, y = 1, label = "TS-FB", color = SETUP_COLORS["FBTE"] ) + annotate( "label", x = 9500, y = 0, label = "TS", color = SETUP_COLORS["TE"] ) + annotate( "text", x = 7000, y = 0.25, label = "LTS-IPW", color = SETUP_COLORS["IPWE"] ) + annotate( "segment", x = 9000, xend = 9000, y = 1.1, yend = 1.4, arrow = arrow(length = unit(0.1,"cm")) ) + annotate( "text", x = 8500, y = 1.25, label = "Smaller \nbatches", size = 8 / .pt, hjust = 0.5 ) saveChart("illustration-welfare-vs-te-batch-size-full") # link by batch size # frontier: ~ mainly best batch size and play with limit all_setups %>% .[setup == "limited IPWE", setup_detailed := paste("limited IPWE", batch_size)] %>% .[setup == "limited IPWE", max_mse := max(mse), by = batch_size] %>% .[setup == "limited IPWE" & mse == max_mse, label := batch_size] %>% ggplot(aes(welfare, mse, color = setup, alpha = alpha)) + geom_point(aes(shape = (bias < 0.02), size = batch_size)) + geom_label_repel(aes(label = label), segment.size = 0.2, box.padding = 0.1, point.padding = 0.5, show.legend = FALSE, na.rm = TRUE) + geom_path(aes(group = setup_detailed), size = 1) + scale_size_continuous(breaks = c(10, 100, 1000), range = c(0.5, 3), guide = FALSE) + scale_shape_manual(values = c(`FALSE` = 1, `TRUE` = 16), guide = FALSE) + scale_color_manual(values = SETUP_COLORS, guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(3.7, 0), xlim = c(5000, 10000)) + labs(y = "MSE") n <- 10000 putTogetherSetupsWithETC(n, 30)[setup == "limited IPWE"] %>% .[, setup_detailed := paste("limited IPWE", batch_size)] %>% .[, max_mse := max(mse), by = batch_size] %>% .[mse == max_mse & batch_size %in% c(10, 100, 1000, 5000), label := batch_size] %>% .[order(mse)] %>% ggplot(aes(welfare, mse, color = factor(batch_size), alpha = alpha)) + geom_path(aes(group = setup_detailed)) + # geom_label_repel( # aes(label = label), # segment.size = 0.2, box.padding = 0.2, show.legend = FALSE, na.rm = TRUE) + scale_color_manual(values = generateGradientColors(BATCH_SIZES), guide = FALSE) + scale_y_reverse() + coord_cartesian(ylim = c(1.5, 0), xlim = c(n / 2, n)) + labs(y = "MSE") # other n --------------------------------------------------------------------- NS <- c(2000, 10000, 20000, 40000) walk(NS, ~{ all_setups <- putTogetherSetupsWithETC(.x, 10)[!(setup %in% c("limited TE", "IPWE"))] plotWelfareVsMSE(all_setups, n = .x, mse_limits = c(1.5, 0)) + annotate(geom = "label", label=glue("n == {.x}"), x = .x * 0.9, y = 1.25, parse = TRUE) saveChart(glue("welfare-vs-mse-n{.x}")) }) # other sd -------------------------------------------------------------------- walk(SDS, ~{ all_setups <- putTogetherSetupsWithETC(10000, .x)[!(setup %in% c("limited TE", "IPWE"))] plotWelfareVsMSE(all_setups, mse_limits = c(1.5, 0)) + annotate(geom = "label", label=glue("sigma == {.x}"), x = 9000, y = 1.25, parse = TRUE) saveChart(glue("welfare-vs-mse-sd{.x}")) })
#' estrarre categorie #' #' @param data vettore #' @param nstring vettore se si deve estrarre una sola variabile, lista per estrarre più variabili. #' #' @return #' @export estrai_cat<-function(data,nstring,label=NULL) { if(!is.list(nstring)) { if(length(nstring)==1) x<-substr(data,nstring,nstring) if(length(nstring)==2) x<-substr(data,nstring[1],nstring[2]) } else { x<-matrix(0,length(data),length(nstring)) for (i in 1:length(nstring)) { if(length(nstring[[i]])==1) x[,i]<-substr(data,nstring[[i]],nstring[[i]]) if(length(nstring[[i]])==2) x[,i]<-substr(data,nstring[[i]][1],nstring[[i]][2]) } x<-as.data.frame(x) if(length(label)!=length(nstring)) colnames(x)<-paste(label,1:ncol(x),sep='') else colnames(x) <- label } return(x) }
/R/estrai_cat.R
no_license
ablanda/Esame
R
false
false
783
r
#' estrarre categorie #' #' @param data vettore #' @param nstring vettore se si deve estrarre una sola variabile, lista per estrarre più variabili. #' #' @return #' @export estrai_cat<-function(data,nstring,label=NULL) { if(!is.list(nstring)) { if(length(nstring)==1) x<-substr(data,nstring,nstring) if(length(nstring)==2) x<-substr(data,nstring[1],nstring[2]) } else { x<-matrix(0,length(data),length(nstring)) for (i in 1:length(nstring)) { if(length(nstring[[i]])==1) x[,i]<-substr(data,nstring[[i]],nstring[[i]]) if(length(nstring[[i]])==2) x[,i]<-substr(data,nstring[[i]][1],nstring[[i]][2]) } x<-as.data.frame(x) if(length(label)!=length(nstring)) colnames(x)<-paste(label,1:ncol(x),sep='') else colnames(x) <- label } return(x) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/matrixplot_SC_functions.R \name{plotCorValue_SC} \alias{plotCorValue_SC} \title{plotCorValue_SC()} \usage{ plotCorValue_SC(xVar, yVar, cexText = 7.5, cexCI = 1.8, cor_use = "complete.obs", cor_method = "spearman") } \arguments{ \item{xVar,}{yVar variables for correlation score} } \value{ returns a spearman correlation score } \description{ Function calculates pairwise correlation between 2 variables } \details{ NA } \examples{ plotCorValue_SC() } \references{ https://hlplab.wordpress.com/2012/03/20/correlation-plot-matrices-using-the-ellipse-library/ }
/man/plotCorValue_SC.Rd
no_license
RubD/fluidSCqpcr
R
false
false
648
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/matrixplot_SC_functions.R \name{plotCorValue_SC} \alias{plotCorValue_SC} \title{plotCorValue_SC()} \usage{ plotCorValue_SC(xVar, yVar, cexText = 7.5, cexCI = 1.8, cor_use = "complete.obs", cor_method = "spearman") } \arguments{ \item{xVar,}{yVar variables for correlation score} } \value{ returns a spearman correlation score } \description{ Function calculates pairwise correlation between 2 variables } \details{ NA } \examples{ plotCorValue_SC() } \references{ https://hlplab.wordpress.com/2012/03/20/correlation-plot-matrices-using-the-ellipse-library/ }
## Import data file, subsetting to 2007-02-01 and 2007-02-02 power.consumption <- subset( read.csv("household_power_consumption.txt", na.strings="?", sep=";", header=TRUE) , Date %in% c("1/2/2007", "2/2/2007") ) ## Convert the date and time variables from strings to date/datettime power.consumption$Date.convert <- as.Date(power.consumption$Date, "%d/%m/%Y") power.consumption$Time.convert <- strptime(paste(power.consumption$Date, power.consumption$Time, sep=" "), "%d/%m/%Y %T") power.consumption$Date <- NULL power.consumption$Time <- NULL names(power.consumption)[names(power.consumption)=="Date.convert"] <- "Date" names(power.consumption)[names(power.consumption)=="Time.convert"] <- "Time" ## Open File png(file="plot2.png", width = 480, height = 480) ## Create plot plot(power.consumption$Time, power.consumption$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)" ) ## Close file dev.off()
/plot2.R
no_license
Quakozzla/ExData_Plotting1
R
false
false
1,150
r
## Import data file, subsetting to 2007-02-01 and 2007-02-02 power.consumption <- subset( read.csv("household_power_consumption.txt", na.strings="?", sep=";", header=TRUE) , Date %in% c("1/2/2007", "2/2/2007") ) ## Convert the date and time variables from strings to date/datettime power.consumption$Date.convert <- as.Date(power.consumption$Date, "%d/%m/%Y") power.consumption$Time.convert <- strptime(paste(power.consumption$Date, power.consumption$Time, sep=" "), "%d/%m/%Y %T") power.consumption$Date <- NULL power.consumption$Time <- NULL names(power.consumption)[names(power.consumption)=="Date.convert"] <- "Date" names(power.consumption)[names(power.consumption)=="Time.convert"] <- "Time" ## Open File png(file="plot2.png", width = 480, height = 480) ## Create plot plot(power.consumption$Time, power.consumption$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)" ) ## Close file dev.off()
##----------------------------------------------------------------------------## ## Tab: Clusters. ##----------------------------------------------------------------------------## ##----------------------------------------------------------------------------## ## Cluster tree. ##----------------------------------------------------------------------------## # UI element output[["clusters_tree_UI"]] <- renderUI({ if ( !is.null(sample_data()$clusters$tree) ) { plotOutput("clusters_tree_plot") } else { textOutput("clusters_tree_text") } }) # plot output[["clusters_tree_plot"]] <- renderPlot({ tree <- sample_data()$clusters$tree tree$tip.label <- paste0("Cluster ", tree$tip.label) colors_tree <- reactive_colors()$clusters ggplot(tree, aes(x, y)) + scale_y_reverse() + ggtree::geom_tree() + ggtree::theme_tree() + ggtree::geom_tiplab(size = 5, hjust = -0.2) + ggtree::geom_tippoint(color = colors_tree, shape = 16, size = 6) + coord_cartesian(clip = 'off') + theme(plot.margin = unit(c(0,2.5,0,0), 'cm')) }) # alternative text output[["clusters_tree_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_tree_info"]], { showModal( modalDialog( clusters_tree_info[["text"]], title = clusters_tree_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## Clusters by samples. ##----------------------------------------------------------------------------## # UI element: buttons output[["clusters_by_sample_UI_buttons"]] <- renderUI({ tagList( shinyWidgets::materialSwitch( inputId = "clusters_by_sample_select_metric_for_bar_plot", label = "Show composition in percent [%]:", status = "primary", inline = TRUE ), shinyWidgets::materialSwitch( inputId = "clusters_by_sample_show_table", label = "Show table:", status = "primary", inline = TRUE ) ) }) # UI element: rest output[["clusters_by_sample_UI_rest"]] <- renderUI({ tagList( plotly::plotlyOutput("clusters_by_sample_plot"), { if ( !is.null(input[["clusters_by_sample_show_table"]]) && input[["clusters_by_sample_show_table"]] == TRUE ) { DT::dataTableOutput("clusters_by_sample_table") } } ) }) # bar plot output[["clusters_by_sample_plot"]] <- plotly::renderPlotly({ # calculate table (must be merged later if user chooses to display in %) temp_table_original <- calculateTableAB('cluster','sample') # process table temp_table_to_plot <- temp_table_original %>% select(-total_cell_count) %>% reshape2::melt(id.vars = "cluster") %>% rename(sample = variable, cells = value) if ( input[['clusters_by_sample_select_metric_for_bar_plot']] != TRUE ) { # generate bar plot with actual cell counts temp_table_to_plot %>% plotly::plot_ly( x = ~cluster, y = ~cells, type = "bar", color = ~sample, colors = reactive_colors()$samples, hoverinfo = "text", text = ~paste0("<b>", .$sample, ": </b>", formatC(.$cells, big.mark = ',')) ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of cells", hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } else { # normalize counts to 100% and generate bar plot in percent temp_table_to_plot %>% left_join( ., temp_table_original[ , c("cluster", "total_cell_count") ], by = "cluster" ) %>% mutate(pct = cells / total_cell_count * 100) %>% plotly::plot_ly( x = ~cluster, y = ~pct, type = "bar", color = ~sample, colors = reactive_colors()$samples, hoverinfo = "text", text = ~paste0("<b>", .$sample, ": </b>", format(round(.$pct, 1), nsmall = 1), "%") ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage [%]", range = c(0,100), hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } }) # table output[["clusters_by_sample_table"]] <- DT::renderDataTable({ # generate table temp_table <- calculateTableAB('cluster','sample') if ( input[["clusters_by_sample_select_metric_for_bar_plot"]] == TRUE ) { # normalize counts to 100% percent for ( i in 3:ncol(temp_table) ) { temp_table[,i] <- round(temp_table[,i] / temp_table$total_cell_count * 100, digits = 1) } } # process table and convert to DT temp_table %>% rename( Cluster = cluster, "# of cells" = total_cell_count ) %>% DT::datatable( filter = "none", selection = "none", escape = FALSE, autoHideNavigation = TRUE, rownames = FALSE, class = "cell-border stripe", options = list( scrollX = TRUE, sDom = '<"top">lrt<"bottom">ip', lengthMenu = c(20, 30, 50, 100), pageLength = 20 ) ) }) # info box observeEvent(input[["clusters_by_sample_info"]], { showModal( modalDialog( clusters_by_sample_info[["text"]], title = clusters_by_sample_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## nUMI ##----------------------------------------------------------------------------## # UI element output[["clusters_nUMI_UI"]] <- renderUI({ if ( "nUMI" %in% names(sample_data()$cells) ) { plotly::plotlyOutput("clusters_nUMI_plot") } else { textOutput("clusters_nUMI_text") } }) # box plot output[["clusters_nUMI_plot"]] <- plotly::renderPlotly({ plotly::plot_ly( sample_data()$cells, x = ~cluster, y = ~nUMI, type = "violin", box = list( visible = TRUE ), meanline = list( visible = TRUE ), color = ~cluster, colors = reactive_colors()$clusters, source = "subset", showlegend = FALSE, hoverinfo = "y", marker = list( size = 5 ) ) %>% plotly::layout( title = "", xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of transcripts", hoverformat = ".0f", mirror = TRUE, showline = TRUE ), dragmode = "select", hovermode = "compare" ) }) # alternative text output[["clusters_nUMI_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_nUMI_info"]], { showModal( modalDialog( clusters_nUMI_info[["text"]], title = clusters_nUMI_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## nGene ##----------------------------------------------------------------------------## # UI element output[["clusters_nGene_UI"]] <- renderUI({ if ( "nGene" %in% names(sample_data()$cells) ) { plotly::plotlyOutput("clusters_nGene_plot") } else { textOutput("clusters_nGene_text") } }) # box plot output[["clusters_nGene_plot"]] <- plotly::renderPlotly({ plotly::plot_ly( sample_data()$cells, x = ~cluster, y = ~nGene, type = "violin", box = list( visible = TRUE ), meanline = list( visible = TRUE ), color = ~cluster, colors = reactive_colors()$clusters, source = "subset", showlegend = FALSE, hoverinfo = "y", marker = list( size = 5 ) ) %>% plotly::layout( title = "", xaxis = list( title ="", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of expressed genes", hoverformat = ".0f", mirror = TRUE, showline = TRUE ), dragmode = "select", hovermode = "compare" ) }) # alternative text output[["clusters_nGene_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_nGene_info"]], { showModal( modalDialog( clusters_nGene_info[["text"]], title = clusters_nGene_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## percent_mt ##----------------------------------------------------------------------------## # UI element output[["clusters_percent_mt_UI"]] <- renderUI({ if ( "percent_mt" %in% names(sample_data()$cells) ) { plotly::plotlyOutput("clusters_percent_mt_plot") } else { textOutput("clusters_percent_mt_text") } }) # box plot output[["clusters_percent_mt_plot"]] <- plotly::renderPlotly({ plotly::plot_ly( sample_data()$cells, x = ~cluster, y = ~percent_mt*100, type = "violin", box = list( visible = TRUE ), meanline = list( visible = TRUE ), color = ~cluster, colors = reactive_colors()$clusters, source = "subset", showlegend = FALSE, hoverinfo = "y", marker = list( size = 5 ) ) %>% plotly::layout( title = "", xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage of transcripts [%]", range = c(0, 100), hoverformat = ".1f", mirror = TRUE, showline = TRUE ), dragmode = "select", hovermode = "compare" ) }) # alternative text output[["clusters_percent_mt_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_percent_mt_info"]], { showModal( modalDialog( clusters_percent_mt_info[["text"]], title = clusters_percent_mt_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## percent_ribo ##----------------------------------------------------------------------------## # UI element output[["clusters_percent_ribo_UI"]] <- renderUI({ if ( "percent_ribo" %in% names(sample_data()$cells) ) { plotly::plotlyOutput("clusters_percent_ribo_plot") } else { textOutput("clusters_percent_ribo_text") } }) # alternative text output[["clusters_percent_ribo_text"]] <- renderText({ "Data not available." }) # box plot output[["clusters_percent_ribo_plot"]] <- plotly::renderPlotly({ plotly::plot_ly( sample_data()$cells, x = ~cluster, y = ~percent_ribo*100, type = "violin", box = list( visible = TRUE ), meanline = list( visible = TRUE ), color = ~cluster, colors = reactive_colors()$clusters, source = "subset", showlegend = FALSE, hoverinfo = "y", marker = list( size = 5 ) ) %>% plotly::layout( title = "", xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage of transcripts [%]", range = c(0, 100), hoverformat = ".2f", mirror = TRUE, showline = TRUE ), dragmode = "select", hovermode = "compare" ) }) # info box observeEvent(input[["clusters_percent_ribo_info"]], { showModal( modalDialog( clusters_percent_ribo_info[["text"]], title = clusters_percent_ribo_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## cell cycle: Seurat ##----------------------------------------------------------------------------## # UI element: buttons output[["clusters_by_cell_cycle_seurat_UI_buttons"]] <- renderUI({ if ( "cell_cycle_seurat" %in% colnames(sample_data()$cells) ) { tagList( shinyWidgets::materialSwitch( inputId = "clusters_by_cell_cycle_seurat_select_metric_for_bar_plot", label = "Show composition in percent [%]:", status = "primary", inline = TRUE ), shinyWidgets::materialSwitch( inputId = "clusters_by_cell_cycle_seurat_show_table", label = "Show table:", status = "primary", inline = TRUE ) ) } else { textOutput("clusters_by_cell_cycle_seurat_text") } }) # UI element: rest output[["clusters_by_cell_cycle_seurat_UI_rest"]] <- renderUI({ if ( "cell_cycle_seurat" %in% colnames(sample_data()$cells) ) { tagList( plotly::plotlyOutput("clusters_by_cell_cycle_seurat_plot"), { if ( !is.null(input[["clusters_by_cell_cycle_seurat_show_table"]]) && input[["clusters_by_cell_cycle_seurat_show_table"]] == TRUE ) { DT::dataTableOutput("clusters_by_cell_cycle_seurat_table") } } ) } }) # bar plot output[["clusters_by_cell_cycle_seurat_plot"]] <- plotly::renderPlotly({ temp_table_original <- calculateTableAB('cluster','cell_cycle_seurat') temp_table_to_plot <- temp_table_original %>% select(-total_cell_count) %>% reshape2::melt(id.vars = "cluster") %>% rename(phase = variable, cells = value) %>% mutate(phase = factor(phase, levels = c("G1", "S", "G2M"))) if ( input[['clusters_by_cell_cycle_seurat_select_metric_for_bar_plot']] != TRUE ) { temp_table_to_plot %>% plotly::plot_ly( x = ~cluster, y = ~cells, type = "bar", color = ~phase, colors = cell_cycle_colorset, hoverinfo = "text", text = ~paste0("<b>", .$phase, ": </b>", formatC(.$cells, big.mark = ',')) ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of cells", hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } else { temp_table_to_plot %>% left_join( ., temp_table_original[ , c("cluster", "total_cell_count") ], by = "cluster" ) %>% mutate(pct = cells / total_cell_count * 100) %>% plotly::plot_ly( x = ~cluster, y = ~pct, type = "bar", color = ~phase, colors = cell_cycle_colorset, hoverinfo = "text", text = ~paste0("<b>", .$phase, ": </b>", format(round(.$pct, 1), nsmall = 1), "%") ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage [%]", range = c(0,100), hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } }) # table output[["clusters_by_cell_cycle_seurat_table"]] <- DT::renderDataTable({ temp_table <- calculateTableAB('cluster','cell_cycle_seurat') if ( input[["clusters_by_cell_cycle_seurat_select_metric_for_bar_plot"]] == TRUE ) { for ( i in 3:ncol(temp_table) ) { temp_table[,i] <- round(temp_table[,i] / temp_table$total_cell_count * 100, digits = 1) } } temp_table %>% rename( Cluster = cluster, "# of cells" = total_cell_count ) %>% DT::datatable( filter = "none", selection = "none", escape = FALSE, autoHideNavigation = TRUE, rownames = FALSE, class = "cell-border stripe", options = list( scrollX = TRUE, sDom = '<"top">lrt<"bottom">ip', lengthMenu = c(15, 30, 50, 100), pageLength = 15 ) ) }) # alternative text output[["clusters_by_cell_cycle_seurat_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_by_cell_cycle_seurat_info"]], { showModal( modalDialog( clusters_by_cell_cycle_seurat_info[["text"]], title = clusters_by_cell_cycle_seurat_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## cell cycle: Cyclone ##----------------------------------------------------------------------------## # UI element: buttons output[["clusters_by_cell_cycle_cyclone_UI_buttons"]] <- renderUI({ if ( "cell_cycle_cyclone" %in% colnames(sample_data()$cells) ) { tagList( shinyWidgets::materialSwitch( inputId = "clusters_by_cell_cycle_cyclone_select_metric_for_bar_plot", label = "Show composition in percent [%]:", status = "primary", inline = TRUE ), shinyWidgets::materialSwitch( inputId = "clusters_by_cell_cycle_cyclone_show_table", label = "Show table:", status = "primary", inline = TRUE ) ) } else { textOutput("clusters_by_cell_cycle_cyclone_text") } }) # UI element: rest output[["clusters_by_cell_cycle_cyclone_UI_rest"]] <- renderUI({ if ( "cell_cycle_cyclone" %in% colnames(sample_data()$cells) ) { tagList( plotly::plotlyOutput("clusters_by_cell_cycle_cyclone_plot"), { if ( !is.null(input[["clusters_by_cell_cycle_cyclone_show_table"]]) && input[["clusters_by_cell_cycle_cyclone_show_table"]] == TRUE ) { DT::dataTableOutput("clusters_by_cell_cycle_cyclone_table") } } ) } }) # bar plot output[["clusters_by_cell_cycle_cyclone_plot"]] <- plotly::renderPlotly({ temp_table_original <- calculateTableAB('cluster','cell_cycle_cyclone') temp_table_to_plot <- temp_table_original %>% select(-total_cell_count) %>% reshape2::melt(id.vars = "cluster") %>% rename(phase = variable, cells = value) %>% mutate(phase = factor(phase, levels = c("G1", "S", "G2M", "-"))) if ( input[['clusters_by_cell_cycle_cyclone_select_metric_for_bar_plot']] != TRUE ) { temp_table_to_plot %>% plotly::plot_ly( x = ~cluster, y = ~cells, type = "bar", color = ~phase, colors = cell_cycle_colorset, hoverinfo = "text", text = ~paste0("<b>", .$phase, ": </b>", formatC(.$cells, big.mark = ',')) ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of cells", hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } else { temp_table_to_plot %>% left_join( ., temp_table_original[ , c("cluster", "total_cell_count") ], by = "cluster" ) %>% mutate(pct = cells / total_cell_count * 100) %>% plotly::plot_ly( x = ~cluster, y = ~pct, type = "bar", color = ~phase, colors = cell_cycle_colorset, hoverinfo = "text", text = ~paste0("<b>", .$phase, ": </b>", format(round(.$pct, 1), nsmall = 1), "%") ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage [%]", range = c(0,100), hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } }) # table output[["clusters_by_cell_cycle_cyclone_table"]] <- DT::renderDataTable({ temp_table <- calculateTableAB('cluster','cell_cycle_cyclone') if ( input[["clusters_by_cell_cycle_cyclone_select_metric_for_bar_plot"]] == TRUE ) { for ( i in 3:ncol(temp_table) ) { temp_table[,i] <- round(temp_table[,i] / temp_table$total_cell_count * 100, digits = 1) } } temp_table %>% rename( Cluster = cluster, "# of cells" = total_cell_count ) %>% DT::datatable( filter = "none", selection = "none", escape = FALSE, autoHideNavigation = TRUE, rownames = FALSE, class = "cell-border stripe", options = list( scrollX = TRUE, sDom = '<"top">lrt<"bottom">ip', lengthMenu = c(15, 30, 50, 100), pageLength = 15 ) ) }) # alternative text output[["clusters_by_cell_cycle_cyclone_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_by_cell_cycle_cyclone_info"]], { showModal( modalDialog( clusters_by_cell_cycle_cyclone_info[["text"]], title = clusters_by_cell_cycle_cyclone_info[["title"]], easyClose = TRUE, footer = NULL ) ) })
/inst/shiny/v1.2/clusters/server.R
permissive
whtns/cerebroApp
R
false
false
20,505
r
##----------------------------------------------------------------------------## ## Tab: Clusters. ##----------------------------------------------------------------------------## ##----------------------------------------------------------------------------## ## Cluster tree. ##----------------------------------------------------------------------------## # UI element output[["clusters_tree_UI"]] <- renderUI({ if ( !is.null(sample_data()$clusters$tree) ) { plotOutput("clusters_tree_plot") } else { textOutput("clusters_tree_text") } }) # plot output[["clusters_tree_plot"]] <- renderPlot({ tree <- sample_data()$clusters$tree tree$tip.label <- paste0("Cluster ", tree$tip.label) colors_tree <- reactive_colors()$clusters ggplot(tree, aes(x, y)) + scale_y_reverse() + ggtree::geom_tree() + ggtree::theme_tree() + ggtree::geom_tiplab(size = 5, hjust = -0.2) + ggtree::geom_tippoint(color = colors_tree, shape = 16, size = 6) + coord_cartesian(clip = 'off') + theme(plot.margin = unit(c(0,2.5,0,0), 'cm')) }) # alternative text output[["clusters_tree_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_tree_info"]], { showModal( modalDialog( clusters_tree_info[["text"]], title = clusters_tree_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## Clusters by samples. ##----------------------------------------------------------------------------## # UI element: buttons output[["clusters_by_sample_UI_buttons"]] <- renderUI({ tagList( shinyWidgets::materialSwitch( inputId = "clusters_by_sample_select_metric_for_bar_plot", label = "Show composition in percent [%]:", status = "primary", inline = TRUE ), shinyWidgets::materialSwitch( inputId = "clusters_by_sample_show_table", label = "Show table:", status = "primary", inline = TRUE ) ) }) # UI element: rest output[["clusters_by_sample_UI_rest"]] <- renderUI({ tagList( plotly::plotlyOutput("clusters_by_sample_plot"), { if ( !is.null(input[["clusters_by_sample_show_table"]]) && input[["clusters_by_sample_show_table"]] == TRUE ) { DT::dataTableOutput("clusters_by_sample_table") } } ) }) # bar plot output[["clusters_by_sample_plot"]] <- plotly::renderPlotly({ # calculate table (must be merged later if user chooses to display in %) temp_table_original <- calculateTableAB('cluster','sample') # process table temp_table_to_plot <- temp_table_original %>% select(-total_cell_count) %>% reshape2::melt(id.vars = "cluster") %>% rename(sample = variable, cells = value) if ( input[['clusters_by_sample_select_metric_for_bar_plot']] != TRUE ) { # generate bar plot with actual cell counts temp_table_to_plot %>% plotly::plot_ly( x = ~cluster, y = ~cells, type = "bar", color = ~sample, colors = reactive_colors()$samples, hoverinfo = "text", text = ~paste0("<b>", .$sample, ": </b>", formatC(.$cells, big.mark = ',')) ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of cells", hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } else { # normalize counts to 100% and generate bar plot in percent temp_table_to_plot %>% left_join( ., temp_table_original[ , c("cluster", "total_cell_count") ], by = "cluster" ) %>% mutate(pct = cells / total_cell_count * 100) %>% plotly::plot_ly( x = ~cluster, y = ~pct, type = "bar", color = ~sample, colors = reactive_colors()$samples, hoverinfo = "text", text = ~paste0("<b>", .$sample, ": </b>", format(round(.$pct, 1), nsmall = 1), "%") ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage [%]", range = c(0,100), hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } }) # table output[["clusters_by_sample_table"]] <- DT::renderDataTable({ # generate table temp_table <- calculateTableAB('cluster','sample') if ( input[["clusters_by_sample_select_metric_for_bar_plot"]] == TRUE ) { # normalize counts to 100% percent for ( i in 3:ncol(temp_table) ) { temp_table[,i] <- round(temp_table[,i] / temp_table$total_cell_count * 100, digits = 1) } } # process table and convert to DT temp_table %>% rename( Cluster = cluster, "# of cells" = total_cell_count ) %>% DT::datatable( filter = "none", selection = "none", escape = FALSE, autoHideNavigation = TRUE, rownames = FALSE, class = "cell-border stripe", options = list( scrollX = TRUE, sDom = '<"top">lrt<"bottom">ip', lengthMenu = c(20, 30, 50, 100), pageLength = 20 ) ) }) # info box observeEvent(input[["clusters_by_sample_info"]], { showModal( modalDialog( clusters_by_sample_info[["text"]], title = clusters_by_sample_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## nUMI ##----------------------------------------------------------------------------## # UI element output[["clusters_nUMI_UI"]] <- renderUI({ if ( "nUMI" %in% names(sample_data()$cells) ) { plotly::plotlyOutput("clusters_nUMI_plot") } else { textOutput("clusters_nUMI_text") } }) # box plot output[["clusters_nUMI_plot"]] <- plotly::renderPlotly({ plotly::plot_ly( sample_data()$cells, x = ~cluster, y = ~nUMI, type = "violin", box = list( visible = TRUE ), meanline = list( visible = TRUE ), color = ~cluster, colors = reactive_colors()$clusters, source = "subset", showlegend = FALSE, hoverinfo = "y", marker = list( size = 5 ) ) %>% plotly::layout( title = "", xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of transcripts", hoverformat = ".0f", mirror = TRUE, showline = TRUE ), dragmode = "select", hovermode = "compare" ) }) # alternative text output[["clusters_nUMI_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_nUMI_info"]], { showModal( modalDialog( clusters_nUMI_info[["text"]], title = clusters_nUMI_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## nGene ##----------------------------------------------------------------------------## # UI element output[["clusters_nGene_UI"]] <- renderUI({ if ( "nGene" %in% names(sample_data()$cells) ) { plotly::plotlyOutput("clusters_nGene_plot") } else { textOutput("clusters_nGene_text") } }) # box plot output[["clusters_nGene_plot"]] <- plotly::renderPlotly({ plotly::plot_ly( sample_data()$cells, x = ~cluster, y = ~nGene, type = "violin", box = list( visible = TRUE ), meanline = list( visible = TRUE ), color = ~cluster, colors = reactive_colors()$clusters, source = "subset", showlegend = FALSE, hoverinfo = "y", marker = list( size = 5 ) ) %>% plotly::layout( title = "", xaxis = list( title ="", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of expressed genes", hoverformat = ".0f", mirror = TRUE, showline = TRUE ), dragmode = "select", hovermode = "compare" ) }) # alternative text output[["clusters_nGene_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_nGene_info"]], { showModal( modalDialog( clusters_nGene_info[["text"]], title = clusters_nGene_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## percent_mt ##----------------------------------------------------------------------------## # UI element output[["clusters_percent_mt_UI"]] <- renderUI({ if ( "percent_mt" %in% names(sample_data()$cells) ) { plotly::plotlyOutput("clusters_percent_mt_plot") } else { textOutput("clusters_percent_mt_text") } }) # box plot output[["clusters_percent_mt_plot"]] <- plotly::renderPlotly({ plotly::plot_ly( sample_data()$cells, x = ~cluster, y = ~percent_mt*100, type = "violin", box = list( visible = TRUE ), meanline = list( visible = TRUE ), color = ~cluster, colors = reactive_colors()$clusters, source = "subset", showlegend = FALSE, hoverinfo = "y", marker = list( size = 5 ) ) %>% plotly::layout( title = "", xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage of transcripts [%]", range = c(0, 100), hoverformat = ".1f", mirror = TRUE, showline = TRUE ), dragmode = "select", hovermode = "compare" ) }) # alternative text output[["clusters_percent_mt_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_percent_mt_info"]], { showModal( modalDialog( clusters_percent_mt_info[["text"]], title = clusters_percent_mt_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## percent_ribo ##----------------------------------------------------------------------------## # UI element output[["clusters_percent_ribo_UI"]] <- renderUI({ if ( "percent_ribo" %in% names(sample_data()$cells) ) { plotly::plotlyOutput("clusters_percent_ribo_plot") } else { textOutput("clusters_percent_ribo_text") } }) # alternative text output[["clusters_percent_ribo_text"]] <- renderText({ "Data not available." }) # box plot output[["clusters_percent_ribo_plot"]] <- plotly::renderPlotly({ plotly::plot_ly( sample_data()$cells, x = ~cluster, y = ~percent_ribo*100, type = "violin", box = list( visible = TRUE ), meanline = list( visible = TRUE ), color = ~cluster, colors = reactive_colors()$clusters, source = "subset", showlegend = FALSE, hoverinfo = "y", marker = list( size = 5 ) ) %>% plotly::layout( title = "", xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage of transcripts [%]", range = c(0, 100), hoverformat = ".2f", mirror = TRUE, showline = TRUE ), dragmode = "select", hovermode = "compare" ) }) # info box observeEvent(input[["clusters_percent_ribo_info"]], { showModal( modalDialog( clusters_percent_ribo_info[["text"]], title = clusters_percent_ribo_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## cell cycle: Seurat ##----------------------------------------------------------------------------## # UI element: buttons output[["clusters_by_cell_cycle_seurat_UI_buttons"]] <- renderUI({ if ( "cell_cycle_seurat" %in% colnames(sample_data()$cells) ) { tagList( shinyWidgets::materialSwitch( inputId = "clusters_by_cell_cycle_seurat_select_metric_for_bar_plot", label = "Show composition in percent [%]:", status = "primary", inline = TRUE ), shinyWidgets::materialSwitch( inputId = "clusters_by_cell_cycle_seurat_show_table", label = "Show table:", status = "primary", inline = TRUE ) ) } else { textOutput("clusters_by_cell_cycle_seurat_text") } }) # UI element: rest output[["clusters_by_cell_cycle_seurat_UI_rest"]] <- renderUI({ if ( "cell_cycle_seurat" %in% colnames(sample_data()$cells) ) { tagList( plotly::plotlyOutput("clusters_by_cell_cycle_seurat_plot"), { if ( !is.null(input[["clusters_by_cell_cycle_seurat_show_table"]]) && input[["clusters_by_cell_cycle_seurat_show_table"]] == TRUE ) { DT::dataTableOutput("clusters_by_cell_cycle_seurat_table") } } ) } }) # bar plot output[["clusters_by_cell_cycle_seurat_plot"]] <- plotly::renderPlotly({ temp_table_original <- calculateTableAB('cluster','cell_cycle_seurat') temp_table_to_plot <- temp_table_original %>% select(-total_cell_count) %>% reshape2::melt(id.vars = "cluster") %>% rename(phase = variable, cells = value) %>% mutate(phase = factor(phase, levels = c("G1", "S", "G2M"))) if ( input[['clusters_by_cell_cycle_seurat_select_metric_for_bar_plot']] != TRUE ) { temp_table_to_plot %>% plotly::plot_ly( x = ~cluster, y = ~cells, type = "bar", color = ~phase, colors = cell_cycle_colorset, hoverinfo = "text", text = ~paste0("<b>", .$phase, ": </b>", formatC(.$cells, big.mark = ',')) ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of cells", hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } else { temp_table_to_plot %>% left_join( ., temp_table_original[ , c("cluster", "total_cell_count") ], by = "cluster" ) %>% mutate(pct = cells / total_cell_count * 100) %>% plotly::plot_ly( x = ~cluster, y = ~pct, type = "bar", color = ~phase, colors = cell_cycle_colorset, hoverinfo = "text", text = ~paste0("<b>", .$phase, ": </b>", format(round(.$pct, 1), nsmall = 1), "%") ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage [%]", range = c(0,100), hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } }) # table output[["clusters_by_cell_cycle_seurat_table"]] <- DT::renderDataTable({ temp_table <- calculateTableAB('cluster','cell_cycle_seurat') if ( input[["clusters_by_cell_cycle_seurat_select_metric_for_bar_plot"]] == TRUE ) { for ( i in 3:ncol(temp_table) ) { temp_table[,i] <- round(temp_table[,i] / temp_table$total_cell_count * 100, digits = 1) } } temp_table %>% rename( Cluster = cluster, "# of cells" = total_cell_count ) %>% DT::datatable( filter = "none", selection = "none", escape = FALSE, autoHideNavigation = TRUE, rownames = FALSE, class = "cell-border stripe", options = list( scrollX = TRUE, sDom = '<"top">lrt<"bottom">ip', lengthMenu = c(15, 30, 50, 100), pageLength = 15 ) ) }) # alternative text output[["clusters_by_cell_cycle_seurat_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_by_cell_cycle_seurat_info"]], { showModal( modalDialog( clusters_by_cell_cycle_seurat_info[["text"]], title = clusters_by_cell_cycle_seurat_info[["title"]], easyClose = TRUE, footer = NULL ) ) }) ##----------------------------------------------------------------------------## ## cell cycle: Cyclone ##----------------------------------------------------------------------------## # UI element: buttons output[["clusters_by_cell_cycle_cyclone_UI_buttons"]] <- renderUI({ if ( "cell_cycle_cyclone" %in% colnames(sample_data()$cells) ) { tagList( shinyWidgets::materialSwitch( inputId = "clusters_by_cell_cycle_cyclone_select_metric_for_bar_plot", label = "Show composition in percent [%]:", status = "primary", inline = TRUE ), shinyWidgets::materialSwitch( inputId = "clusters_by_cell_cycle_cyclone_show_table", label = "Show table:", status = "primary", inline = TRUE ) ) } else { textOutput("clusters_by_cell_cycle_cyclone_text") } }) # UI element: rest output[["clusters_by_cell_cycle_cyclone_UI_rest"]] <- renderUI({ if ( "cell_cycle_cyclone" %in% colnames(sample_data()$cells) ) { tagList( plotly::plotlyOutput("clusters_by_cell_cycle_cyclone_plot"), { if ( !is.null(input[["clusters_by_cell_cycle_cyclone_show_table"]]) && input[["clusters_by_cell_cycle_cyclone_show_table"]] == TRUE ) { DT::dataTableOutput("clusters_by_cell_cycle_cyclone_table") } } ) } }) # bar plot output[["clusters_by_cell_cycle_cyclone_plot"]] <- plotly::renderPlotly({ temp_table_original <- calculateTableAB('cluster','cell_cycle_cyclone') temp_table_to_plot <- temp_table_original %>% select(-total_cell_count) %>% reshape2::melt(id.vars = "cluster") %>% rename(phase = variable, cells = value) %>% mutate(phase = factor(phase, levels = c("G1", "S", "G2M", "-"))) if ( input[['clusters_by_cell_cycle_cyclone_select_metric_for_bar_plot']] != TRUE ) { temp_table_to_plot %>% plotly::plot_ly( x = ~cluster, y = ~cells, type = "bar", color = ~phase, colors = cell_cycle_colorset, hoverinfo = "text", text = ~paste0("<b>", .$phase, ": </b>", formatC(.$cells, big.mark = ',')) ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Number of cells", hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } else { temp_table_to_plot %>% left_join( ., temp_table_original[ , c("cluster", "total_cell_count") ], by = "cluster" ) %>% mutate(pct = cells / total_cell_count * 100) %>% plotly::plot_ly( x = ~cluster, y = ~pct, type = "bar", color = ~phase, colors = cell_cycle_colorset, hoverinfo = "text", text = ~paste0("<b>", .$phase, ": </b>", format(round(.$pct, 1), nsmall = 1), "%") ) %>% plotly::layout( xaxis = list( title = "", mirror = TRUE, showline = TRUE ), yaxis = list( title = "Percentage [%]", range = c(0,100), hoverformat = ".2f", mirror = TRUE, zeroline = FALSE, showline = TRUE ), barmode = "stack", hovermode = "compare" ) } }) # table output[["clusters_by_cell_cycle_cyclone_table"]] <- DT::renderDataTable({ temp_table <- calculateTableAB('cluster','cell_cycle_cyclone') if ( input[["clusters_by_cell_cycle_cyclone_select_metric_for_bar_plot"]] == TRUE ) { for ( i in 3:ncol(temp_table) ) { temp_table[,i] <- round(temp_table[,i] / temp_table$total_cell_count * 100, digits = 1) } } temp_table %>% rename( Cluster = cluster, "# of cells" = total_cell_count ) %>% DT::datatable( filter = "none", selection = "none", escape = FALSE, autoHideNavigation = TRUE, rownames = FALSE, class = "cell-border stripe", options = list( scrollX = TRUE, sDom = '<"top">lrt<"bottom">ip', lengthMenu = c(15, 30, 50, 100), pageLength = 15 ) ) }) # alternative text output[["clusters_by_cell_cycle_cyclone_text"]] <- renderText({ "Data not available." }) # info box observeEvent(input[["clusters_by_cell_cycle_cyclone_info"]], { showModal( modalDialog( clusters_by_cell_cycle_cyclone_info[["text"]], title = clusters_by_cell_cycle_cyclone_info[["title"]], easyClose = TRUE, footer = NULL ) ) })
# Exploratory data analysis - course project 1 - plot 4 # Getting the data # Download the data and place it wherever you want, following line asks you to choose the file. data_full<-read.csv(file.choose(), header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"') data_full$Date<-as.Date(data_full$Date,format="%d/%m/%Y") # Subsetting required data. data<-subset(data_full,subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) rm(data_full) # Converting dates. datetime<-paste(as.Date(data$Date), data$Time) data$Datetime<-as.POSIXct(datetime) # Creating plot #4. par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0)) with(data, { plot(Global_active_power~Datetime, type="l", ylab="Global Active Power", xlab="") plot(Voltage~Datetime, type="l", ylab="Voltage", xlab="") plot(Sub_metering_1~Datetime, type="l", ylab="Energy Sub Metering", xlab="") lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power",xlab="") }) # Saving plot #4 to file. dev.copy(png, file="plot4.png", height=480, width=480) dev.off()
/plot4.R
no_license
jrybin/project1
R
false
false
1,400
r
# Exploratory data analysis - course project 1 - plot 4 # Getting the data # Download the data and place it wherever you want, following line asks you to choose the file. data_full<-read.csv(file.choose(), header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"') data_full$Date<-as.Date(data_full$Date,format="%d/%m/%Y") # Subsetting required data. data<-subset(data_full,subset=(Date >= "2007-02-01" & Date <= "2007-02-02")) rm(data_full) # Converting dates. datetime<-paste(as.Date(data$Date), data$Time) data$Datetime<-as.POSIXct(datetime) # Creating plot #4. par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0)) with(data, { plot(Global_active_power~Datetime, type="l", ylab="Global Active Power", xlab="") plot(Voltage~Datetime, type="l", ylab="Voltage", xlab="") plot(Sub_metering_1~Datetime, type="l", ylab="Energy Sub Metering", xlab="") lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power",xlab="") }) # Saving plot #4 to file. dev.copy(png, file="plot4.png", height=480, width=480) dev.off()
install.packages("keras") library(keras) install_keras() remove.packages("keras") Sys.getenv("R_LIBS_USER") library(keras) mnist <- dataset_mnist() x_train <- mnist$train$x y_train <- mnist$train$y x_test <- mnist$test$x y_test <- mnist$test$y # reshape x_train <- array_reshape(x_train, c(nrow(x_train), 784)) x_test <- array_reshape(x_test, c(nrow(x_test), 784)) # rescale x_train <- x_train / 255 x_test <- x_test / 255 y_train <- to_categorical(y_train, 10) y_test <- to_categorical(y_test, 10) model <- keras_model_sequential() model %>% layer_dense(units = 256, activation = 'relu', input_shape = c(784)) %>% layer_dropout(rate = 0.4) %>% layer_dense(units = 128, activation = 'relu') %>% layer_dropout(rate = 0.3) %>% layer_dense(units = 10, activation = 'softmax') summary(model) model %>% compile( loss = 'categorical_crossentropy', optimizer = optimizer_rmsprop(), metrics = c('accuracy') ) history <- model %>% fit( x_train, y_train, epochs = 30, batch_size = 128, validation_split = 0.2 ) plot(history) model %>% evaluate(x_test, y_test) model %>% predict_classes(x_test)
/Koding R/tutorial/keras.R
no_license
rizkanns/lstm-forecast-rainfall
R
false
false
1,120
r
install.packages("keras") library(keras) install_keras() remove.packages("keras") Sys.getenv("R_LIBS_USER") library(keras) mnist <- dataset_mnist() x_train <- mnist$train$x y_train <- mnist$train$y x_test <- mnist$test$x y_test <- mnist$test$y # reshape x_train <- array_reshape(x_train, c(nrow(x_train), 784)) x_test <- array_reshape(x_test, c(nrow(x_test), 784)) # rescale x_train <- x_train / 255 x_test <- x_test / 255 y_train <- to_categorical(y_train, 10) y_test <- to_categorical(y_test, 10) model <- keras_model_sequential() model %>% layer_dense(units = 256, activation = 'relu', input_shape = c(784)) %>% layer_dropout(rate = 0.4) %>% layer_dense(units = 128, activation = 'relu') %>% layer_dropout(rate = 0.3) %>% layer_dense(units = 10, activation = 'softmax') summary(model) model %>% compile( loss = 'categorical_crossentropy', optimizer = optimizer_rmsprop(), metrics = c('accuracy') ) history <- model %>% fit( x_train, y_train, epochs = 30, batch_size = 128, validation_split = 0.2 ) plot(history) model %>% evaluate(x_test, y_test) model %>% predict_classes(x_test)
# # market-cap.R, 15 Jul 16 # # Data from: # Daily chart for April 21 2015 on The Economist webpage # Economist {Data team} # # Example from: # Empirical Software Engineering using R # Derek M. Jones source("ESEUR_config.r") library("plyr") plot_layout(2, 1) pal_col=rainbow(3) total_valu=function(df) { if (nrow(df) != 281) print(df) # t=data.frame(year=df$x[3:140], ticker=df$ticker[3:140], t=data.frame(year=1980+df$x[3:140]*(35/475), ticker=df$ticker[3:140], market_cap=df$y[280:143]-df$y[3:140]) return(t) } plot_lines=function(df) { lines(df$year, df$market_cap) } plot_ticker=function(tick_str, col_str) { tick_mc=subset(mc, ticker == tick_str) lines(tick_mc$year, tick_mc$market_cap, col=col_str) return(tick_mc) } plot_percent=function(tick_str, col_str) { tick_mc=subset(mc, ticker == tick_str) lines(tick_mc$year, 100*tick_mc$market_cap/tick_mc$tech_market_cap, col=col_str) } sum_market_cap=function(df) { return(sum(df$market_cap)) } m_cap=read.csv(paste0(ESEUR_dir, "ecosystem/real-market-cap.csv.xz"), as.is=TRUE) # ibm=subset(m_cap, ticker == "IBM") # ibm_1=subset(ibm, round == 1) # ibm_2=subset(ibm, round == 2) # plot(ibm_1$x, max(ibm_1$y)-ibm_1$y, col="red") #lines(ibm_2$x, max(ibm_2$y)-ibm_2$y, col="blue") # lines(ibm_2$x[1:140], max(ibm_2$y)-ibm_2$y[1:140], col="green") # lines(ibm_2$x[280:141]+6.76, max(ibm_2$y)-ibm_2$y[280:141], col="red") # lines(ibm_2$x[278:141], max(ibm_2$y)-ibm_2$y[280:143], col="black") # Ignore the Year/January only data valu_bounds=subset(m_cap, round == 2) mc=ddply(valu_bounds, .(ticker), total_valu) plot(0, type="n", xlim=c(1980, 2015), ylim=c(0, 70), xlab="Date", ylab="Market cap\n") # d_ply(mc, .(ticker), plot_lines) aapl=plot_ticker("AAPL", pal_col[1]) ibm=plot_ticker("IBM", pal_col[2]) MSFT=plot_ticker("MSFT", pal_col[3]) tmc=ddply(mc, .(year), sum_market_cap) mc$tech_market_cap=tmc$V1 plot(0, type="n", xlim=c(1980, 2015), ylim=c(0, 80), xlab="Date", ylab="Tech market capital share (%)\n") aapl=plot_percent("AAPL", pal_col[1]) ibm=plot_percent("IBM", pal_col[2]) MSFT=plot_percent("MSFT", pal_col[3]) legend(x="topright", legend=rev(c("Apple", "IBM", "Microsoft")), bty="n", fill=rev(pal_col), cex=1.1)
/ecosystem/market-cap.R
no_license
montahdaya/ESEUR-code-data
R
false
false
2,217
r
# # market-cap.R, 15 Jul 16 # # Data from: # Daily chart for April 21 2015 on The Economist webpage # Economist {Data team} # # Example from: # Empirical Software Engineering using R # Derek M. Jones source("ESEUR_config.r") library("plyr") plot_layout(2, 1) pal_col=rainbow(3) total_valu=function(df) { if (nrow(df) != 281) print(df) # t=data.frame(year=df$x[3:140], ticker=df$ticker[3:140], t=data.frame(year=1980+df$x[3:140]*(35/475), ticker=df$ticker[3:140], market_cap=df$y[280:143]-df$y[3:140]) return(t) } plot_lines=function(df) { lines(df$year, df$market_cap) } plot_ticker=function(tick_str, col_str) { tick_mc=subset(mc, ticker == tick_str) lines(tick_mc$year, tick_mc$market_cap, col=col_str) return(tick_mc) } plot_percent=function(tick_str, col_str) { tick_mc=subset(mc, ticker == tick_str) lines(tick_mc$year, 100*tick_mc$market_cap/tick_mc$tech_market_cap, col=col_str) } sum_market_cap=function(df) { return(sum(df$market_cap)) } m_cap=read.csv(paste0(ESEUR_dir, "ecosystem/real-market-cap.csv.xz"), as.is=TRUE) # ibm=subset(m_cap, ticker == "IBM") # ibm_1=subset(ibm, round == 1) # ibm_2=subset(ibm, round == 2) # plot(ibm_1$x, max(ibm_1$y)-ibm_1$y, col="red") #lines(ibm_2$x, max(ibm_2$y)-ibm_2$y, col="blue") # lines(ibm_2$x[1:140], max(ibm_2$y)-ibm_2$y[1:140], col="green") # lines(ibm_2$x[280:141]+6.76, max(ibm_2$y)-ibm_2$y[280:141], col="red") # lines(ibm_2$x[278:141], max(ibm_2$y)-ibm_2$y[280:143], col="black") # Ignore the Year/January only data valu_bounds=subset(m_cap, round == 2) mc=ddply(valu_bounds, .(ticker), total_valu) plot(0, type="n", xlim=c(1980, 2015), ylim=c(0, 70), xlab="Date", ylab="Market cap\n") # d_ply(mc, .(ticker), plot_lines) aapl=plot_ticker("AAPL", pal_col[1]) ibm=plot_ticker("IBM", pal_col[2]) MSFT=plot_ticker("MSFT", pal_col[3]) tmc=ddply(mc, .(year), sum_market_cap) mc$tech_market_cap=tmc$V1 plot(0, type="n", xlim=c(1980, 2015), ylim=c(0, 80), xlab="Date", ylab="Tech market capital share (%)\n") aapl=plot_percent("AAPL", pal_col[1]) ibm=plot_percent("IBM", pal_col[2]) MSFT=plot_percent("MSFT", pal_col[3]) legend(x="topright", legend=rev(c("Apple", "IBM", "Microsoft")), bty="n", fill=rev(pal_col), cex=1.1)
#' Plot indices of abundance and associated quantities. #' #' Plot indices of abundance and associated quantities. #' #' #' @param replist list created by \code{SS_output} #' @param subplots vector controlling which subplots to create #' @param plot plot to active plot device? #' @param print print to PNG files? #' @param fleets optional vector to subset fleets for which plots will be made #' @param fleetnames optional replacement for fleenames used in data file #' @param smooth add smoothed line to plots of observed vs. expected sample #' sizes #' @param add add to existing plot (not yet implemented) #' @param datplot make plot of data only? #' @param labels vector of labels for plots (titles and axis labels) #' @param col1 vector of colors for points in each season for time series plot. #' Default is red for single season models and a rainbow using the #' rich.colors.short function for multiple seasons. #' @param col2 vector of colors for points in each season for obs. vs. exp. #' plot. Default is blue for single season models and a rainbow using the #' rich.colors.short function for multiple seasons. #' @param col3 color of line showing expected index in time series plot. #' Default is blue. #' @param col4 color of smoother shown in obs. vs. exp. plots. Default is red. #' @param pch1 single value or vector of plotting characters (pch parameter) #' for time-series plots of index fit. Default=21. #' @param pch2 single value or vector of plotting characters (pch parameter) #' for sample size plots of index fit. Default=16. #' @param cex character expansion factor for points showing observed values. #' Default=1. #' @param bg Background color for points with pch=21. #' @param legend add a legend to seasonal colors (only for seasonal models) #' @param legendloc add a legend to seasonal colors (default is "topright") #' @param seasnames optional vector of names for each season to replace #' defaults if a legend is used #' @param pwidth width of plot written to PNG file #' @param pheight height of plot written to PNG file #' @param punits units for PNG file #' @param res resolution for PNG file #' @param ptsize ptsize for PNG file #' @param cex.main character expansion for plot titles #' @param addmain switch which allows the plot title to be left off #' @param plotdir directory where PNG files will be written. by default it will #' be the directory where the model was run. #' @param minyr First year to show in plot (for zooming in on a subset of #' values) #' @param maxyr Last year to show in plot (for zooming in on a subset of #' values) #' @param verbose report progress to R GUI? #' @author Ian Stewart, Ian Taylor #' @export #' @seealso \code{\link{SS_plots}}, \code{\link{SS_output}} #' @keywords hplot SSplotIndices <- function(replist,subplots=1:9, plot=TRUE,print=FALSE, fleets="all",fleetnames="default", smooth=TRUE,add=FALSE,datplot=FALSE, labels=c("Year", #1 "Index", #2 "Observed index", #3 "Expected index", #4 "Log index", #5 "Log observed index", #6 "Log expected index", #7 "Standardized index", #8 "Catchability (Q)", #9 "Time-varying catchability", #10 "Vulnerable biomass", #11 "Catchability vs. vulnerable biomass"), #12 col1="default", col2="default", col3="blue", col4="red", pch1=21, pch2=16, cex=1, bg="white", legend=TRUE, legendloc="topright", seasnames=NULL, pwidth=7,pheight=7,punits="in",res=300,ptsize=12,cex.main=1, addmain=TRUE,plotdir="default", minyr=NULL, maxyr=NULL, verbose=TRUE) { cpue <- replist$cpue if(is.null(dim(cpue))){ cat("skipping index plots: no CPUE data in this model\n") return() } pngfun <- function(filename,caption=NA){ png(filename=file,width=pwidth,height=pheight, units=punits,res=res,pointsize=ptsize) plotinfo <- rbind(plotinfo,data.frame(file=file,caption=caption)) return(plotinfo) } plotinfo <- NULL if(length(grep("supr_per",cpue$Supr_Per))){ cat("Note: some indices have superperiods. Values will be plotted in year/season associated with data in report file.\n") cpue <- cpue[!is.na(cpue$Dev),] } FleetNames <- replist$FleetNames nfleets <- replist$nfleets nseasons <- replist$nseasons if(nseasons>1){ # if seasons, put CPUE at season midpoint cpue$YrSeas <- cpue$Yr + (cpue$Seas - 0.5)/nseasons }else{ # if no seasons, put at integer year value cpue$YrSeas <- cpue$Yr } if(plotdir=="default") plotdir <- replist$inputs$dir if(fleetnames[1]=="default") fleetnames <- FleetNames if(fleets[1]=="all"){ fleets <- 1:nfleets }else{ if(length(intersect(fleets,1:nfleets))!=length(fleets)){ return("Input 'fleets' should be 'all' or a vector of values between 1 and nfleets.") }} # subset fleets as requested fleetvec <- intersect(fleets, unique(as.numeric(cpue$FleetNum))) # use fancy colors only if any index spans more than one season usecol <- FALSE for(ifleet in fleetvec){ if(length(unique(cpue$Seas[cpue$Obs > 0 & cpue$FleetNum==ifleet])) > 1){ usecol <- TRUE }else{ legend=FALSE } } if(col1[1]=="default"){ colvec1 <- "black" if(usecol & nseasons==4) colvec1 <- c("blue4","green3","orange2","red3") if(usecol & !nseasons %in% c(1,4)) colvec1 <- rich.colors.short(nseasons) }else{ colvec1 <- col1 } if(col2[1]=="default"){ colvec2 <- "blue" if(usecol & nseasons==4) colvec2 <- c("blue4","green3","orange2","red3") if(usecol & !nseasons %in% c(1,4)) colvec2 <- rich.colors.short(nseasons) }else{ colvec2 <- col2 } if(is.null(seasnames)) seasnames <- paste("Season",1:nseasons,sep="") allcpue <- data.frame() # loop over fleets for(ifleet in fleetvec){ Fleet <- fleetnames[ifleet] cpueuse <- cpue[cpue$Obs > 0 & cpue$FleetNum==ifleet,] cpueuse <- cpueuse[order(cpueuse$YrSeas),] # look for time-vary time <- diff(range(cpueuse$Calc_Q))>0 # look for time-varying effective Q time2 <- diff(range(cpueuse$Eff_Q))>0 # Teresa's model had NA values in Eff_Q for unknown reasons # line below will allow model to play on if(is.na(time2)) time2 <- FALSE x <- cpueuse$YrSeas y <- cpueuse$Obs z <- cpueuse$Exp include <- !is.na(cpueuse$Like) if(usecol) s <- cpueuse$Seas else s <- 1 # only use colorvector if more than 1 season if(datplot){ cpueuse$Index <- rep(ifleet,length(cpueuse$YrSeas)) cpueuse$stdvalue <- cpueuse$Obs/mean(cpueuse$Obs) tempcpue <- cbind(cpueuse$Index,cpueuse$YrSeas,cpueuse$Obs,cpueuse$stdvalue) colnames(tempcpue) <- c("Index","year","value","stdvalue") allcpue <- rbind(allcpue,tempcpue) } uiw <- qlnorm(.975,meanlog=log(y),sdlog=cpueuse$SE) - y if(max(uiw)==Inf){ cat("!warning: removing upper interval on indices with infinite upper quantile values\n", " check the uncertainty inputs to for the indices\n") uiw[uiw==Inf] <- 1000*max(cpueuse$Obs[uiw==Inf]) } liw <- y - qlnorm(.025,meanlog=log(y),sdlog=cpueuse$SE) npoints <- length(z) main=paste(labels[2], Fleet,sep=" ") if(!addmain) main <- "" addlegend <- function(pch, colvec){ names <- paste(seasnames,"observations") } # print(cbind(x, y, liw, uiw)) # debugging line cpuefun1 <- function(addexpected=TRUE){ # plot of time-series of observed and expected (if requested) xlim <- c(max(minyr,min(x)),min(maxyr,max(x))) if(!add) plot(x=x[include], y=y[include], type='n', xlab=labels[1], ylab=labels[2], main=main, cex.main=cex.main, xlim=xlim, ylim=c(0,max(y+uiw,na.rm=TRUE))) plotCI(x=x[include],y=y[include],sfrac=0.005,uiw=uiw[include],liw=liw[include], ylo=0,col=colvec1[s], main=main,cex.main=cex.main,lty=1,add=TRUE,pch=pch1, bg=bg,cex=cex) abline(h=0,col="grey") if(addexpected) lines(x,z,lwd=2,col=col3) if(legend & length(colvec1)>1) legend(x=legendloc, legend=seasnames, pch=pch1, col=colvec1, cex=cex) } cpuefun2 <- function(){ # plot of observed vs. expected with smoother if(!add) plot(y[include],z[include],xlab=labels[3],main=main,cex.main=cex.main, ylim=c(0,max(z)),xlim=c(0,max(y)),ylab=labels[4]) points(y[include],z[include],col=colvec2[s],pch=pch2,cex=cex) abline(h=0,col="grey") lines(x=c(0,max(z[include])),y=c(0,max(z[include]))) if(smooth && npoints > 6 && diff(range(y))>0){ psmooth <- loess(z[include]~y[include],degree=1) lines(psmooth$x[order(psmooth$x)],psmooth$fit[order(psmooth$x)], lwd=1.2,col=col4,lty="dashed") } if(legend & length(colvec2)>1) legend(x=legendloc, legend=seasnames, pch=pch2, col=colvec2, cex=cex) } if(plot){ if(1 %in% subplots & datplot) cpuefun1(addexpected=FALSE) if(2 %in% subplots) cpuefun1() if(3 %in% subplots) cpuefun2() } if(print){ if(1 %in% subplots & datplot){ file <- paste(plotdir,"/index1_cpuedata_",Fleet,".png",sep="") caption <- paste("Index data for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun1(addexpected=FALSE) dev.off() } if(2 %in% subplots){ file <- paste(plotdir,"/index2_cpuefit_",Fleet,".png",sep="") caption <- paste("Fit to index data for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun1() dev.off() } if(3 %in% subplots){ file <- paste(plotdir,"/index3_cpuecheck_",Fleet,".png",sep="") caption <- paste("Observed vs. expected index values with smoother for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun2() dev.off() } } # same plots again in log space (someday should create generalized set of commands) main <- paste(labels[5], Fleet, sep=" ") if(!addmain) main <- "" uiw <- qnorm(.975,mean=log(y),sd=cpueuse$SE) - log(y) liw <- log(y) - qnorm(.025,mean=log(y),sd=cpueuse$SE) cpuefun3 <- function(addexpected=TRUE){ # plot of time-series of log(observed) and log(expected) (if requested) xlim <- c(max(minyr,min(x)),min(maxyr,max(x))) if(!add) plot(x=x[include], y=log(y[include]), type='n', xlab=labels[1], ylab=labels[5], main=main, cex.main=cex.main, xlim=xlim, ylim=range(log(y[include])-liw[include], log(y[include])+uiw[include],na.rm=TRUE)) plotCI(x=x[include],y=log(y[include]),sfrac=0.005,uiw=uiw[include], liw=liw[include], col=colvec1[s],lty=1,add=TRUE,pch=pch1,bg=bg,cex=cex) if(addexpected) lines(x,log(z),lwd=2,col=col3) if(length(colvec1)>1) legend(x=legendloc, legend=seasnames, pch=pch1, col=colvec1, cex=cex) } cpuefun4 <- function(){ # plot of log(observed) vs. log(expected) with smoother if(!add) plot(log(y[include]),log(z[include]),type='n',xlab=labels[6],main=main, cex.main=cex.main,ylab=labels[7]) points(log(y[include]),log(z[include]),col=colvec2[s],pch=pch2) lines(x=range(log(z[include])),y=range(log(z[include]))) if(smooth && npoints > 6 && diff(range(y))>0){ psmooth <- loess(log(z[include])~log(y[include]),degree=1) lines(psmooth$x[order(psmooth$x)],psmooth$fit[order(psmooth$x)], lwd=1.2,col=col4,lty="dashed")} if(length(colvec2)>1) legend(x=legendloc, legend=seasnames, pch=pch2, col=colvec2, cex=cex) } cpuefun5 <- function(){ # plot of time-varying catchability (if present) main <- paste(labels[10], Fleet, sep=" ") if(!addmain) main <- "" q <- cpueuse$Calc_Q if(!add) plot(x,q,type='o',xlab=labels[1],main=main, cex.main=cex.main,ylab=labels[9], col=colvec2[1],pch=pch2) } cpuefun6 <- function(){ # plot of time-varying catchability (if present) main <- paste(labels[12], Fleet, sep=" ") if(!addmain) main <- "" v <- cpueuse$Vuln_bio q1 <- cpueuse$Calc_Q q2 <- cpueuse$Eff_Q if(all(q1==q2)) ylab <- labels[9] else ylab <- "Effective catchability" if(!add) plot(v,q2,type='o',xlab=labels[11],main=main, cex.main=cex.main,ylab=ylab, col=colvec2[1],pch=pch2) } if(plot){ if(4 %in% subplots & datplot) cpuefun3(addexpected=FALSE) if(5 %in% subplots) cpuefun3() if(6 %in% subplots) cpuefun4() if(7 %in% subplots & time) cpuefun5() if(8 %in% subplots & time2) cpuefun6() } if(print){ if(4 %in% subplots & datplot){ file <- paste(plotdir,"/index4_logcpuedata_",Fleet,".png",sep="") caption <- paste("Log index data for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun3(addexpected=FALSE) dev.off() } if(5 %in% subplots){ file <- paste(plotdir,"/index5_logcpuefit_",Fleet,".png",sep="") caption <- paste("Fit to index data on log scale for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun3() dev.off() } if(6 %in% subplots){ file <- paste(plotdir,"/index6_logcpuecheck_",Fleet,".png",sep="") caption <- paste("log(observed) vs. log(expected) index values with smoother for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun4() dev.off() } if(7 %in% subplots & time){ file <- paste(plotdir,"/index7_timevaryingQ_",Fleet,".png",sep="") caption <- paste("Timeseries of catchability for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun5() dev.off() } if(8 %in% subplots & time){ file <- paste(plotdir,"/index8_Q_vs_Vuln_bio_",Fleet,".png",sep="") caption <- paste("Catchability vs. vulnerable biomass for fleet ",Fleet,"<br> \n", "This plot should illustrate curvature of nonlinear catchability relationship<br> \n", "Or reveal patterns associated with random-walk catchability<br> \n", "It was inspired by Jim Thorson, so blame him if you don't like it.",sep="") plotinfo <- pngfun(file=file, caption=caption) cpuefun6() dev.off() } } } # nfleets ### New the standardized plot of all CPUE indices if(datplot==TRUE & nrow(allcpue)>0){ all_cpue_fun <- function(){ main="All cpue plot" if(!addmain) main <- "" xlim <- c(min(allcpue$year,na.rm=TRUE)-1,max(allcpue$year,na.rm=TRUE)+1) # change range if requested xlim[1] <- max(xlim[1],minyr) xlim[2] <- min(xlim[2],maxyr) ylim <- c(range(allcpue$stdvalue,na.rm=TRUE)) usecols <- rich.colors.short(max(allcpue$Index,na.rm=TRUE)) if(max(allcpue$Index,na.rm=TRUE) >= 2){ usecols <- rich.colors.short(max(allcpue$Index,na.rm=TRUE)+1)[-1] } if(!add) plot(0, type="n", xlab=labels[1], main=main, cex.main=cex.main, col=usecols[1], ylab=labels[8], xlim=xlim,ylim=ylim) for(ifleet in fleetvec){ points(x=allcpue$year[allcpue$Index==ifleet],y=allcpue$stdvalue[allcpue$Index==ifleet], pch=pch2,col=usecols[ifleet], cex=cex, lwd=0.4,lty="dashed", type="o") } } # end all_cpue_fun if(plot & (9 %in% subplots)){all_cpue_fun()} if(print & (9 %in% subplots)){ file <- paste(plotdir,"/index9_standcpueall",".png",sep="") caption <- "Standardized indices overlaid" plotinfo <- pngfun(file=file, caption=caption) all_cpue_fun() dev.off()} } # end datplot if(!is.null(plotinfo)) plotinfo$category <- "Index" return(invisible(plotinfo)) } # end function
/R/SSplotIndices.R
no_license
huihualee/r4ss
R
false
false
16,630
r
#' Plot indices of abundance and associated quantities. #' #' Plot indices of abundance and associated quantities. #' #' #' @param replist list created by \code{SS_output} #' @param subplots vector controlling which subplots to create #' @param plot plot to active plot device? #' @param print print to PNG files? #' @param fleets optional vector to subset fleets for which plots will be made #' @param fleetnames optional replacement for fleenames used in data file #' @param smooth add smoothed line to plots of observed vs. expected sample #' sizes #' @param add add to existing plot (not yet implemented) #' @param datplot make plot of data only? #' @param labels vector of labels for plots (titles and axis labels) #' @param col1 vector of colors for points in each season for time series plot. #' Default is red for single season models and a rainbow using the #' rich.colors.short function for multiple seasons. #' @param col2 vector of colors for points in each season for obs. vs. exp. #' plot. Default is blue for single season models and a rainbow using the #' rich.colors.short function for multiple seasons. #' @param col3 color of line showing expected index in time series plot. #' Default is blue. #' @param col4 color of smoother shown in obs. vs. exp. plots. Default is red. #' @param pch1 single value or vector of plotting characters (pch parameter) #' for time-series plots of index fit. Default=21. #' @param pch2 single value or vector of plotting characters (pch parameter) #' for sample size plots of index fit. Default=16. #' @param cex character expansion factor for points showing observed values. #' Default=1. #' @param bg Background color for points with pch=21. #' @param legend add a legend to seasonal colors (only for seasonal models) #' @param legendloc add a legend to seasonal colors (default is "topright") #' @param seasnames optional vector of names for each season to replace #' defaults if a legend is used #' @param pwidth width of plot written to PNG file #' @param pheight height of plot written to PNG file #' @param punits units for PNG file #' @param res resolution for PNG file #' @param ptsize ptsize for PNG file #' @param cex.main character expansion for plot titles #' @param addmain switch which allows the plot title to be left off #' @param plotdir directory where PNG files will be written. by default it will #' be the directory where the model was run. #' @param minyr First year to show in plot (for zooming in on a subset of #' values) #' @param maxyr Last year to show in plot (for zooming in on a subset of #' values) #' @param verbose report progress to R GUI? #' @author Ian Stewart, Ian Taylor #' @export #' @seealso \code{\link{SS_plots}}, \code{\link{SS_output}} #' @keywords hplot SSplotIndices <- function(replist,subplots=1:9, plot=TRUE,print=FALSE, fleets="all",fleetnames="default", smooth=TRUE,add=FALSE,datplot=FALSE, labels=c("Year", #1 "Index", #2 "Observed index", #3 "Expected index", #4 "Log index", #5 "Log observed index", #6 "Log expected index", #7 "Standardized index", #8 "Catchability (Q)", #9 "Time-varying catchability", #10 "Vulnerable biomass", #11 "Catchability vs. vulnerable biomass"), #12 col1="default", col2="default", col3="blue", col4="red", pch1=21, pch2=16, cex=1, bg="white", legend=TRUE, legendloc="topright", seasnames=NULL, pwidth=7,pheight=7,punits="in",res=300,ptsize=12,cex.main=1, addmain=TRUE,plotdir="default", minyr=NULL, maxyr=NULL, verbose=TRUE) { cpue <- replist$cpue if(is.null(dim(cpue))){ cat("skipping index plots: no CPUE data in this model\n") return() } pngfun <- function(filename,caption=NA){ png(filename=file,width=pwidth,height=pheight, units=punits,res=res,pointsize=ptsize) plotinfo <- rbind(plotinfo,data.frame(file=file,caption=caption)) return(plotinfo) } plotinfo <- NULL if(length(grep("supr_per",cpue$Supr_Per))){ cat("Note: some indices have superperiods. Values will be plotted in year/season associated with data in report file.\n") cpue <- cpue[!is.na(cpue$Dev),] } FleetNames <- replist$FleetNames nfleets <- replist$nfleets nseasons <- replist$nseasons if(nseasons>1){ # if seasons, put CPUE at season midpoint cpue$YrSeas <- cpue$Yr + (cpue$Seas - 0.5)/nseasons }else{ # if no seasons, put at integer year value cpue$YrSeas <- cpue$Yr } if(plotdir=="default") plotdir <- replist$inputs$dir if(fleetnames[1]=="default") fleetnames <- FleetNames if(fleets[1]=="all"){ fleets <- 1:nfleets }else{ if(length(intersect(fleets,1:nfleets))!=length(fleets)){ return("Input 'fleets' should be 'all' or a vector of values between 1 and nfleets.") }} # subset fleets as requested fleetvec <- intersect(fleets, unique(as.numeric(cpue$FleetNum))) # use fancy colors only if any index spans more than one season usecol <- FALSE for(ifleet in fleetvec){ if(length(unique(cpue$Seas[cpue$Obs > 0 & cpue$FleetNum==ifleet])) > 1){ usecol <- TRUE }else{ legend=FALSE } } if(col1[1]=="default"){ colvec1 <- "black" if(usecol & nseasons==4) colvec1 <- c("blue4","green3","orange2","red3") if(usecol & !nseasons %in% c(1,4)) colvec1 <- rich.colors.short(nseasons) }else{ colvec1 <- col1 } if(col2[1]=="default"){ colvec2 <- "blue" if(usecol & nseasons==4) colvec2 <- c("blue4","green3","orange2","red3") if(usecol & !nseasons %in% c(1,4)) colvec2 <- rich.colors.short(nseasons) }else{ colvec2 <- col2 } if(is.null(seasnames)) seasnames <- paste("Season",1:nseasons,sep="") allcpue <- data.frame() # loop over fleets for(ifleet in fleetvec){ Fleet <- fleetnames[ifleet] cpueuse <- cpue[cpue$Obs > 0 & cpue$FleetNum==ifleet,] cpueuse <- cpueuse[order(cpueuse$YrSeas),] # look for time-vary time <- diff(range(cpueuse$Calc_Q))>0 # look for time-varying effective Q time2 <- diff(range(cpueuse$Eff_Q))>0 # Teresa's model had NA values in Eff_Q for unknown reasons # line below will allow model to play on if(is.na(time2)) time2 <- FALSE x <- cpueuse$YrSeas y <- cpueuse$Obs z <- cpueuse$Exp include <- !is.na(cpueuse$Like) if(usecol) s <- cpueuse$Seas else s <- 1 # only use colorvector if more than 1 season if(datplot){ cpueuse$Index <- rep(ifleet,length(cpueuse$YrSeas)) cpueuse$stdvalue <- cpueuse$Obs/mean(cpueuse$Obs) tempcpue <- cbind(cpueuse$Index,cpueuse$YrSeas,cpueuse$Obs,cpueuse$stdvalue) colnames(tempcpue) <- c("Index","year","value","stdvalue") allcpue <- rbind(allcpue,tempcpue) } uiw <- qlnorm(.975,meanlog=log(y),sdlog=cpueuse$SE) - y if(max(uiw)==Inf){ cat("!warning: removing upper interval on indices with infinite upper quantile values\n", " check the uncertainty inputs to for the indices\n") uiw[uiw==Inf] <- 1000*max(cpueuse$Obs[uiw==Inf]) } liw <- y - qlnorm(.025,meanlog=log(y),sdlog=cpueuse$SE) npoints <- length(z) main=paste(labels[2], Fleet,sep=" ") if(!addmain) main <- "" addlegend <- function(pch, colvec){ names <- paste(seasnames,"observations") } # print(cbind(x, y, liw, uiw)) # debugging line cpuefun1 <- function(addexpected=TRUE){ # plot of time-series of observed and expected (if requested) xlim <- c(max(minyr,min(x)),min(maxyr,max(x))) if(!add) plot(x=x[include], y=y[include], type='n', xlab=labels[1], ylab=labels[2], main=main, cex.main=cex.main, xlim=xlim, ylim=c(0,max(y+uiw,na.rm=TRUE))) plotCI(x=x[include],y=y[include],sfrac=0.005,uiw=uiw[include],liw=liw[include], ylo=0,col=colvec1[s], main=main,cex.main=cex.main,lty=1,add=TRUE,pch=pch1, bg=bg,cex=cex) abline(h=0,col="grey") if(addexpected) lines(x,z,lwd=2,col=col3) if(legend & length(colvec1)>1) legend(x=legendloc, legend=seasnames, pch=pch1, col=colvec1, cex=cex) } cpuefun2 <- function(){ # plot of observed vs. expected with smoother if(!add) plot(y[include],z[include],xlab=labels[3],main=main,cex.main=cex.main, ylim=c(0,max(z)),xlim=c(0,max(y)),ylab=labels[4]) points(y[include],z[include],col=colvec2[s],pch=pch2,cex=cex) abline(h=0,col="grey") lines(x=c(0,max(z[include])),y=c(0,max(z[include]))) if(smooth && npoints > 6 && diff(range(y))>0){ psmooth <- loess(z[include]~y[include],degree=1) lines(psmooth$x[order(psmooth$x)],psmooth$fit[order(psmooth$x)], lwd=1.2,col=col4,lty="dashed") } if(legend & length(colvec2)>1) legend(x=legendloc, legend=seasnames, pch=pch2, col=colvec2, cex=cex) } if(plot){ if(1 %in% subplots & datplot) cpuefun1(addexpected=FALSE) if(2 %in% subplots) cpuefun1() if(3 %in% subplots) cpuefun2() } if(print){ if(1 %in% subplots & datplot){ file <- paste(plotdir,"/index1_cpuedata_",Fleet,".png",sep="") caption <- paste("Index data for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun1(addexpected=FALSE) dev.off() } if(2 %in% subplots){ file <- paste(plotdir,"/index2_cpuefit_",Fleet,".png",sep="") caption <- paste("Fit to index data for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun1() dev.off() } if(3 %in% subplots){ file <- paste(plotdir,"/index3_cpuecheck_",Fleet,".png",sep="") caption <- paste("Observed vs. expected index values with smoother for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun2() dev.off() } } # same plots again in log space (someday should create generalized set of commands) main <- paste(labels[5], Fleet, sep=" ") if(!addmain) main <- "" uiw <- qnorm(.975,mean=log(y),sd=cpueuse$SE) - log(y) liw <- log(y) - qnorm(.025,mean=log(y),sd=cpueuse$SE) cpuefun3 <- function(addexpected=TRUE){ # plot of time-series of log(observed) and log(expected) (if requested) xlim <- c(max(minyr,min(x)),min(maxyr,max(x))) if(!add) plot(x=x[include], y=log(y[include]), type='n', xlab=labels[1], ylab=labels[5], main=main, cex.main=cex.main, xlim=xlim, ylim=range(log(y[include])-liw[include], log(y[include])+uiw[include],na.rm=TRUE)) plotCI(x=x[include],y=log(y[include]),sfrac=0.005,uiw=uiw[include], liw=liw[include], col=colvec1[s],lty=1,add=TRUE,pch=pch1,bg=bg,cex=cex) if(addexpected) lines(x,log(z),lwd=2,col=col3) if(length(colvec1)>1) legend(x=legendloc, legend=seasnames, pch=pch1, col=colvec1, cex=cex) } cpuefun4 <- function(){ # plot of log(observed) vs. log(expected) with smoother if(!add) plot(log(y[include]),log(z[include]),type='n',xlab=labels[6],main=main, cex.main=cex.main,ylab=labels[7]) points(log(y[include]),log(z[include]),col=colvec2[s],pch=pch2) lines(x=range(log(z[include])),y=range(log(z[include]))) if(smooth && npoints > 6 && diff(range(y))>0){ psmooth <- loess(log(z[include])~log(y[include]),degree=1) lines(psmooth$x[order(psmooth$x)],psmooth$fit[order(psmooth$x)], lwd=1.2,col=col4,lty="dashed")} if(length(colvec2)>1) legend(x=legendloc, legend=seasnames, pch=pch2, col=colvec2, cex=cex) } cpuefun5 <- function(){ # plot of time-varying catchability (if present) main <- paste(labels[10], Fleet, sep=" ") if(!addmain) main <- "" q <- cpueuse$Calc_Q if(!add) plot(x,q,type='o',xlab=labels[1],main=main, cex.main=cex.main,ylab=labels[9], col=colvec2[1],pch=pch2) } cpuefun6 <- function(){ # plot of time-varying catchability (if present) main <- paste(labels[12], Fleet, sep=" ") if(!addmain) main <- "" v <- cpueuse$Vuln_bio q1 <- cpueuse$Calc_Q q2 <- cpueuse$Eff_Q if(all(q1==q2)) ylab <- labels[9] else ylab <- "Effective catchability" if(!add) plot(v,q2,type='o',xlab=labels[11],main=main, cex.main=cex.main,ylab=ylab, col=colvec2[1],pch=pch2) } if(plot){ if(4 %in% subplots & datplot) cpuefun3(addexpected=FALSE) if(5 %in% subplots) cpuefun3() if(6 %in% subplots) cpuefun4() if(7 %in% subplots & time) cpuefun5() if(8 %in% subplots & time2) cpuefun6() } if(print){ if(4 %in% subplots & datplot){ file <- paste(plotdir,"/index4_logcpuedata_",Fleet,".png",sep="") caption <- paste("Log index data for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun3(addexpected=FALSE) dev.off() } if(5 %in% subplots){ file <- paste(plotdir,"/index5_logcpuefit_",Fleet,".png",sep="") caption <- paste("Fit to index data on log scale for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun3() dev.off() } if(6 %in% subplots){ file <- paste(plotdir,"/index6_logcpuecheck_",Fleet,".png",sep="") caption <- paste("log(observed) vs. log(expected) index values with smoother for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun4() dev.off() } if(7 %in% subplots & time){ file <- paste(plotdir,"/index7_timevaryingQ_",Fleet,".png",sep="") caption <- paste("Timeseries of catchability for",Fleet) plotinfo <- pngfun(file=file, caption=caption) cpuefun5() dev.off() } if(8 %in% subplots & time){ file <- paste(plotdir,"/index8_Q_vs_Vuln_bio_",Fleet,".png",sep="") caption <- paste("Catchability vs. vulnerable biomass for fleet ",Fleet,"<br> \n", "This plot should illustrate curvature of nonlinear catchability relationship<br> \n", "Or reveal patterns associated with random-walk catchability<br> \n", "It was inspired by Jim Thorson, so blame him if you don't like it.",sep="") plotinfo <- pngfun(file=file, caption=caption) cpuefun6() dev.off() } } } # nfleets ### New the standardized plot of all CPUE indices if(datplot==TRUE & nrow(allcpue)>0){ all_cpue_fun <- function(){ main="All cpue plot" if(!addmain) main <- "" xlim <- c(min(allcpue$year,na.rm=TRUE)-1,max(allcpue$year,na.rm=TRUE)+1) # change range if requested xlim[1] <- max(xlim[1],minyr) xlim[2] <- min(xlim[2],maxyr) ylim <- c(range(allcpue$stdvalue,na.rm=TRUE)) usecols <- rich.colors.short(max(allcpue$Index,na.rm=TRUE)) if(max(allcpue$Index,na.rm=TRUE) >= 2){ usecols <- rich.colors.short(max(allcpue$Index,na.rm=TRUE)+1)[-1] } if(!add) plot(0, type="n", xlab=labels[1], main=main, cex.main=cex.main, col=usecols[1], ylab=labels[8], xlim=xlim,ylim=ylim) for(ifleet in fleetvec){ points(x=allcpue$year[allcpue$Index==ifleet],y=allcpue$stdvalue[allcpue$Index==ifleet], pch=pch2,col=usecols[ifleet], cex=cex, lwd=0.4,lty="dashed", type="o") } } # end all_cpue_fun if(plot & (9 %in% subplots)){all_cpue_fun()} if(print & (9 %in% subplots)){ file <- paste(plotdir,"/index9_standcpueall",".png",sep="") caption <- "Standardized indices overlaid" plotinfo <- pngfun(file=file, caption=caption) all_cpue_fun() dev.off()} } # end datplot if(!is.null(plotinfo)) plotinfo$category <- "Index" return(invisible(plotinfo)) } # end function
uv2wdws <- function(u,v) { degrees <- function(radians) 180 * radians / pi mathdegs <- degrees(atan2(v, u)) wdcalc <- ifelse (mathdegs>0, mathdegs, mathdegs+360) wd <- ifelse (wdcalc<270, 270-wdcalc, 270-wdcalc+360) wd <- round(wd,2) ws <- sqrt(u^2 + v^2) ws <- round(ws, 2) return(as.data.frame(cbind(wd, ws))) }
/R/misc/salabim/uv2wdws.R
no_license
jrminter/snippets
R
false
false
340
r
uv2wdws <- function(u,v) { degrees <- function(radians) 180 * radians / pi mathdegs <- degrees(atan2(v, u)) wdcalc <- ifelse (mathdegs>0, mathdegs, mathdegs+360) wd <- ifelse (wdcalc<270, 270-wdcalc, 270-wdcalc+360) wd <- round(wd,2) ws <- sqrt(u^2 + v^2) ws <- round(ws, 2) return(as.data.frame(cbind(wd, ws))) }
mydatawanted$Sub_metering_1 <- as.numeric(as.character(mydatawanted$Sub_metering_1)) mydatawanted$Sub_metering_2 <- as.numeric(as.character(mydatawanted$Sub_metering_2)) mydatawanted$Sub_metering_3 <- as.numeric(as.character(mydatawanted$Sub_metering_3)) png(filename='./plot3.png',width=480,height=480,units='px') with(mydatawanted,plot(MyDataTime,Sub_metering_1,type="l",xlab="",ylab = "Energy sub metering",col="black")) with(mydatawanted,lines(MyDataTime,Sub_metering_2,col="red")) with(mydatawanted,lines(MyDataTime,Sub_metering_3,col="blue")) mylegend <- c('Sub_metering_1','Sub_metering_2','Sub_metering_3') mycolor <- c('black','red','blue') legend("topright",legend=mylegend,col=mycolor,lty="solid") dev.off() ## My system is in French so there are some differences: ## Jeu for Jeudi for Thu ## Ven for Vendredi for Fri ## Sam for Samedi for Sat
/Exploratory Data Analysis/Week1/plot3.R
no_license
istik/Data-Science-JHU
R
false
false
864
r
mydatawanted$Sub_metering_1 <- as.numeric(as.character(mydatawanted$Sub_metering_1)) mydatawanted$Sub_metering_2 <- as.numeric(as.character(mydatawanted$Sub_metering_2)) mydatawanted$Sub_metering_3 <- as.numeric(as.character(mydatawanted$Sub_metering_3)) png(filename='./plot3.png',width=480,height=480,units='px') with(mydatawanted,plot(MyDataTime,Sub_metering_1,type="l",xlab="",ylab = "Energy sub metering",col="black")) with(mydatawanted,lines(MyDataTime,Sub_metering_2,col="red")) with(mydatawanted,lines(MyDataTime,Sub_metering_3,col="blue")) mylegend <- c('Sub_metering_1','Sub_metering_2','Sub_metering_3') mycolor <- c('black','red','blue') legend("topright",legend=mylegend,col=mycolor,lty="solid") dev.off() ## My system is in French so there are some differences: ## Jeu for Jeudi for Thu ## Ven for Vendredi for Fri ## Sam for Samedi for Sat
###################### Función #################### # Cleaning and treatment of original bases to create month staging table #' @param original_path : path field where original base_list[[i]] places #' @param staging_path : path field where staging base_list[[i]] places #' @return : staging table #' @param yearmonth : yyyymm fac_staging_maker <- function(yearmonth, original_path, staging_path) { #Leer archivos formato csv y extraer caracteres numéricos files <- list.files(path = paste0(external_path, "/FacturacionTC"), pattern = ".csv") position <- paste0(sapply(str_extract_all(files, "[0-9]+"), "[[", 1)) %>% as.numeric # comparing cut months from user to master files created if (FALSE %in% (yearmonth %in% position)) { stop("Does not exist table for some of the months specified") } df_position <- data.frame(files = files, position = position) files <- df_position[df_position$position == yearmonth,] files <- as.character(files[[1]]) #Leer datos print("Cargando data de facturacion") facturacion <- fread( paste0(external_path, "/FacturacionTC/", files), colClasses = "character", na.strings = c("", "NA") ) #Reducir tamaño names names(facturacion) <- tolower(names(facturacion)) #Confirmar variables meta <- os.path.join(meta_path, "facturacion/meta.csv") %>% fread() if (sum(meta$variables %!in% names(facturacion)) > 0) { print("Facturacion no contiene alguna de las variables necesarias") } #Seleccionar variables con metadata facturacion <- facturacion[, mget(meta$variables)] rm(meta) gc() facturacion <- facturacion[!(cant == "**")] #Conversion variables numericas num_vars <- c("plazo", "cant", "valor") facturacion[, (num_vars) := lapply(.SD, as.numeric), .SDcols = num_vars] print(paste("La tabla de facturacion tiene", nrow(facturacion),"columnas")) #Conversion de variables factor fact_vars <- c("clave", "tipo") facturacion[, (fact_vars) := lapply(.SD, as.factor), .SDcols = fact_vars] rm(fact_vars) gc() #Cambio nombres de variables setnames(facturacion, old = "codin", new = "cod_int") setnames(facturacion, old = "plazo", new = "cuotas") #Cargar diccionario MCC (contiene info sobre los codigos de comercio de las trx) dicc_mcc <- as.data.table(read_xlsx(path = paste0(dictionary_path, "/MCC.xlsx"), col_names = T)) names(dicc_mcc) <- tolower(names(dicc_mcc)) #Conversion tipo de variables mcc_vars <- c("mcc", "desc_mcc") dicc_mcc[, (mcc_vars) := lapply(.SD, as.factor), .SDcols = mcc_vars] #Ver cuantos mcc no cruzan con facturacion no_cruce <- facturacion[mcc %!in% dicc_mcc$mcc] if((nrow(no_cruce)>0)==TRUE){print(paste("No cruzan", nrow(no_cruce), "filas de la(s) categoría(s)", unique(no_cruce[, clave]), "de tipo", unique(no_cruce[, tipo])))} #Union de facturacion con el diccionario mcc fac <- merge(facturacion, dicc_mcc, by = "mcc", all.x = T) if(no_cruce[, clave] == "Compras" & no_cruce[, tipo] == "INTERNACIONAL"){facturacion[is.na(desc_mcc) := "Compras Internacionales"]} ################ Etapa de creacion de variables ################# #' Se requiere la creacion de las siguientes variables: #' Valores de compra: (media, mediana y moda) #' Cuotas: (media, mediana y moda) #' Numero de transacciones #' Dummy de compra de cartera #' Numero de compras nacionales #' Maxima cuota de compras nacionales #' Numero de compras internacionales ##Funcion para hallar la moda #' @usage mode(x) mode <- function(x) { ux <- unique(x) ux[which.max(tabulate(match(x, ux)))] } ##'Crear dummy de compra de cartera. Para ello en la tabla que se crea a continuacion se debe hacer la suma de compra ##'de cartera y luego crear la dummy facturacion[, dummy_ccartera := ifelse(clave == "Ccartera", "1", "0")] facturacion$dummy_ccartera <- as.numeric(facturacion$dummy_ccartera) ##Creacion variables numero de compras nacionales #1) Crear dummy de compra nacional facturacion[, dummy_comp_nal := ifelse(clave == "Compras" & tipo == "NACIONAL", "1", "0")] facturacion$dummy_comp_nal <- as.numeric(facturacion$dummy_comp_nal) ##Creacion variable numero de compras internacionales #1) Crear dummy de compras internacionales facturacion[, dummy_comp_int := ifelse(clave== "Compras" & tipo == "INTERNACIONAL", "1", "0")] facturacion$dummy_comp_int <- as.numeric(facturacion$dummy_comp_int) print("Creando tabla de facturacion con variables necesarias.") fac_tc <- facturacion[, .( ##Numero de transacciones trx = sum(cant), ##Promedio del valor vlr_pro = mean(valor), ##Mediana del valor vlr_median = median(valor), ##Moda del valor vlr_mode = mode(valor), ##Promedio de cuotas cuotas_pro = mean(cuotas), ##Mediana de las cuotas cuotas_median = median(cuotas), ##Moda de las cuotas cuotas_mode = mode(cuotas), ##Maxima cuota max_cuota = max(cuotas), ##Añadir dummy de compra de cartera dummy_ccartera = sum(dummy_ccartera), ##Numero de compras nacionales total_comp_nal = sum(dummy_comp_nal), ##Numero de compras internacionales total_comp_int = sum(dummy_comp_int) ), by = .(cod_int)][order(cod_int,-trx)] print(paste("Con respecto a la tabla de facturacion original, al agrupar por codigo interno quedan", nrow(fac_tc), "filas")) ##Crear dummy de compra de cartera fac_tc[, dummy_ccartera := ifelse(dummy_ccartera > 0, "1", "0")] #Verificar que no haya duplicados de codigo interno ifelse((fac_tc[duplicated(cod_int)] %>% nrow()) == 0, "No hay duplicados en la version 1 de la tabla final. Todo good.", "Hay duplicados en la version 1 de la tabla final :(") #Comprobar que no haya duplicados ifelse((fac_tc[duplicated(cod_int)] %>% nrow()) == 0, "No hay duplicados en la tabla final. Todo good.", "Hay duplicados en la tabla final :(") ############################################################################# ##Save data save <- list(paste0("fact_", position[position == yearmonth])) saveRDS((fac_tc), file = os.path.join(staging_path, paste0(save, ".rds"))) print("Proceso finalizado") }
/scripts/table_creation/fac_staging_maker.R
permissive
carolinart/churn-model
R
false
false
6,650
r
###################### Función #################### # Cleaning and treatment of original bases to create month staging table #' @param original_path : path field where original base_list[[i]] places #' @param staging_path : path field where staging base_list[[i]] places #' @return : staging table #' @param yearmonth : yyyymm fac_staging_maker <- function(yearmonth, original_path, staging_path) { #Leer archivos formato csv y extraer caracteres numéricos files <- list.files(path = paste0(external_path, "/FacturacionTC"), pattern = ".csv") position <- paste0(sapply(str_extract_all(files, "[0-9]+"), "[[", 1)) %>% as.numeric # comparing cut months from user to master files created if (FALSE %in% (yearmonth %in% position)) { stop("Does not exist table for some of the months specified") } df_position <- data.frame(files = files, position = position) files <- df_position[df_position$position == yearmonth,] files <- as.character(files[[1]]) #Leer datos print("Cargando data de facturacion") facturacion <- fread( paste0(external_path, "/FacturacionTC/", files), colClasses = "character", na.strings = c("", "NA") ) #Reducir tamaño names names(facturacion) <- tolower(names(facturacion)) #Confirmar variables meta <- os.path.join(meta_path, "facturacion/meta.csv") %>% fread() if (sum(meta$variables %!in% names(facturacion)) > 0) { print("Facturacion no contiene alguna de las variables necesarias") } #Seleccionar variables con metadata facturacion <- facturacion[, mget(meta$variables)] rm(meta) gc() facturacion <- facturacion[!(cant == "**")] #Conversion variables numericas num_vars <- c("plazo", "cant", "valor") facturacion[, (num_vars) := lapply(.SD, as.numeric), .SDcols = num_vars] print(paste("La tabla de facturacion tiene", nrow(facturacion),"columnas")) #Conversion de variables factor fact_vars <- c("clave", "tipo") facturacion[, (fact_vars) := lapply(.SD, as.factor), .SDcols = fact_vars] rm(fact_vars) gc() #Cambio nombres de variables setnames(facturacion, old = "codin", new = "cod_int") setnames(facturacion, old = "plazo", new = "cuotas") #Cargar diccionario MCC (contiene info sobre los codigos de comercio de las trx) dicc_mcc <- as.data.table(read_xlsx(path = paste0(dictionary_path, "/MCC.xlsx"), col_names = T)) names(dicc_mcc) <- tolower(names(dicc_mcc)) #Conversion tipo de variables mcc_vars <- c("mcc", "desc_mcc") dicc_mcc[, (mcc_vars) := lapply(.SD, as.factor), .SDcols = mcc_vars] #Ver cuantos mcc no cruzan con facturacion no_cruce <- facturacion[mcc %!in% dicc_mcc$mcc] if((nrow(no_cruce)>0)==TRUE){print(paste("No cruzan", nrow(no_cruce), "filas de la(s) categoría(s)", unique(no_cruce[, clave]), "de tipo", unique(no_cruce[, tipo])))} #Union de facturacion con el diccionario mcc fac <- merge(facturacion, dicc_mcc, by = "mcc", all.x = T) if(no_cruce[, clave] == "Compras" & no_cruce[, tipo] == "INTERNACIONAL"){facturacion[is.na(desc_mcc) := "Compras Internacionales"]} ################ Etapa de creacion de variables ################# #' Se requiere la creacion de las siguientes variables: #' Valores de compra: (media, mediana y moda) #' Cuotas: (media, mediana y moda) #' Numero de transacciones #' Dummy de compra de cartera #' Numero de compras nacionales #' Maxima cuota de compras nacionales #' Numero de compras internacionales ##Funcion para hallar la moda #' @usage mode(x) mode <- function(x) { ux <- unique(x) ux[which.max(tabulate(match(x, ux)))] } ##'Crear dummy de compra de cartera. Para ello en la tabla que se crea a continuacion se debe hacer la suma de compra ##'de cartera y luego crear la dummy facturacion[, dummy_ccartera := ifelse(clave == "Ccartera", "1", "0")] facturacion$dummy_ccartera <- as.numeric(facturacion$dummy_ccartera) ##Creacion variables numero de compras nacionales #1) Crear dummy de compra nacional facturacion[, dummy_comp_nal := ifelse(clave == "Compras" & tipo == "NACIONAL", "1", "0")] facturacion$dummy_comp_nal <- as.numeric(facturacion$dummy_comp_nal) ##Creacion variable numero de compras internacionales #1) Crear dummy de compras internacionales facturacion[, dummy_comp_int := ifelse(clave== "Compras" & tipo == "INTERNACIONAL", "1", "0")] facturacion$dummy_comp_int <- as.numeric(facturacion$dummy_comp_int) print("Creando tabla de facturacion con variables necesarias.") fac_tc <- facturacion[, .( ##Numero de transacciones trx = sum(cant), ##Promedio del valor vlr_pro = mean(valor), ##Mediana del valor vlr_median = median(valor), ##Moda del valor vlr_mode = mode(valor), ##Promedio de cuotas cuotas_pro = mean(cuotas), ##Mediana de las cuotas cuotas_median = median(cuotas), ##Moda de las cuotas cuotas_mode = mode(cuotas), ##Maxima cuota max_cuota = max(cuotas), ##Añadir dummy de compra de cartera dummy_ccartera = sum(dummy_ccartera), ##Numero de compras nacionales total_comp_nal = sum(dummy_comp_nal), ##Numero de compras internacionales total_comp_int = sum(dummy_comp_int) ), by = .(cod_int)][order(cod_int,-trx)] print(paste("Con respecto a la tabla de facturacion original, al agrupar por codigo interno quedan", nrow(fac_tc), "filas")) ##Crear dummy de compra de cartera fac_tc[, dummy_ccartera := ifelse(dummy_ccartera > 0, "1", "0")] #Verificar que no haya duplicados de codigo interno ifelse((fac_tc[duplicated(cod_int)] %>% nrow()) == 0, "No hay duplicados en la version 1 de la tabla final. Todo good.", "Hay duplicados en la version 1 de la tabla final :(") #Comprobar que no haya duplicados ifelse((fac_tc[duplicated(cod_int)] %>% nrow()) == 0, "No hay duplicados en la tabla final. Todo good.", "Hay duplicados en la tabla final :(") ############################################################################# ##Save data save <- list(paste0("fact_", position[position == yearmonth])) saveRDS((fac_tc), file = os.path.join(staging_path, paste0(save, ".rds"))) print("Proceso finalizado") }
?pie example(pie) # 成大 105 學年度專任教師統計資料 # 資料來源: 人事室教職員人數統計表 t1 <- c(644, 379, 267, 28, 26) pie(t1) names(t1) <- c("教授", "副教授", "助理教授", "講師", "其他") pie(t1) pie(t1, clockwise=TRUE) (t2 <- t1/sum(t1)) (t2 <- round(t1/sum(t1) * 100, 2)) (t2 <- paste0(round(t1/sum(t1) * 100, 2), "%")) pie(t1, labels=t2, col=c("darkgoldenrod1", "darkolivegreen2", "salmon", "cadetblue2", "brown2")) legend("topleft", names(t1), cex=0.7, fill=c("darkgoldenrod1", "darkolivegreen2", "salmon", "cadetblue2", "brown2"))
/12/ex5-1-5_pie.R
no_license
harute931507/R_practice
R
false
false
624
r
?pie example(pie) # 成大 105 學年度專任教師統計資料 # 資料來源: 人事室教職員人數統計表 t1 <- c(644, 379, 267, 28, 26) pie(t1) names(t1) <- c("教授", "副教授", "助理教授", "講師", "其他") pie(t1) pie(t1, clockwise=TRUE) (t2 <- t1/sum(t1)) (t2 <- round(t1/sum(t1) * 100, 2)) (t2 <- paste0(round(t1/sum(t1) * 100, 2), "%")) pie(t1, labels=t2, col=c("darkgoldenrod1", "darkolivegreen2", "salmon", "cadetblue2", "brown2")) legend("topleft", names(t1), cex=0.7, fill=c("darkgoldenrod1", "darkolivegreen2", "salmon", "cadetblue2", "brown2"))
getVocabList <- function() { #GETVOCABLIST reads the fixed vocabulary list in vocab.txt and returns a #cell array of the words # vocabList <- GETVOCABLIST() reads the fixed vocabulary list in vocab.txt # and returns a cell array of the words in vocabList. ## Read the fixed vocabulary list vocabList <- read.table('vocab.txt',sep = '\t',stringsAsFactors = FALSE)[,2] vocabList }
/starter/6/getVocabList.R
no_license
faridcher/ml-course
R
false
false
406
r
getVocabList <- function() { #GETVOCABLIST reads the fixed vocabulary list in vocab.txt and returns a #cell array of the words # vocabList <- GETVOCABLIST() reads the fixed vocabulary list in vocab.txt # and returns a cell array of the words in vocabList. ## Read the fixed vocabulary list vocabList <- read.table('vocab.txt',sep = '\t',stringsAsFactors = FALSE)[,2] vocabList }
# Install libpq (for RPostgreSQL) # On Mac OS X (using Homebrew) # $ brew install postgresql # On Debian / Ubuntu # $ sudo apt-get install libpq-dev # On Redhat / CentOS # $ sudo yum install postgresql-devel # On Windows: This step is not needed # RPostgreSQL # On platforms other than Windows install.packages("RPostgreSQL", type="source") # On Windows install.packages("RPostgreSQL") install.packages(c("dplyr", "PivotalR", "MonetDB.R", "scidb"))
/book/packt/R.High.Performance.Programming/chapter-09/09-00_install_packages.R
permissive
xenron/sandbox-da-r
R
false
false
450
r
# Install libpq (for RPostgreSQL) # On Mac OS X (using Homebrew) # $ brew install postgresql # On Debian / Ubuntu # $ sudo apt-get install libpq-dev # On Redhat / CentOS # $ sudo yum install postgresql-devel # On Windows: This step is not needed # RPostgreSQL # On platforms other than Windows install.packages("RPostgreSQL", type="source") # On Windows install.packages("RPostgreSQL") install.packages(c("dplyr", "PivotalR", "MonetDB.R", "scidb"))
## This pair of functions will help to cache the inverse of a matrix ## makeCacheMatrix is a that returns a list of functions for set, get, setinverse, and getinverse makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve will return a matrix that is the inverse of 'x' ## If the inverse is cached, it will return it, ## otherwise it will calculate the inverse, cache it, and return it. cacheSolve <- function(x, ...) { i <- x$getinverse() ## if i is not null, it will return the cached data if(!is.null(i)) { message("getting cached data") return(i) } ## if i is not cached, it will set the inverse and return it data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
/cachematrix.R
no_license
rainbowsaurus/ProgrammingAssignment2
R
false
false
1,019
r
## This pair of functions will help to cache the inverse of a matrix ## makeCacheMatrix is a that returns a list of functions for set, get, setinverse, and getinverse makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve will return a matrix that is the inverse of 'x' ## If the inverse is cached, it will return it, ## otherwise it will calculate the inverse, cache it, and return it. cacheSolve <- function(x, ...) { i <- x$getinverse() ## if i is not null, it will return the cached data if(!is.null(i)) { message("getting cached data") return(i) } ## if i is not cached, it will set the inverse and return it data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
#### Archivo cloud.txt #### cloud <- read.table(file = "cloud.txt", header = T) colnames(cloud) str(cloud) class(cloud) cloud$Ispc cloud$Cloudpt #### Histograma #### help(hist) hist(cloud$Cloudpt) h_cloudpt <- hist(cloud$Cloudpt) h_cloudpt$breaks h_cloudpt$counts range(cloud$Cloudpt) b1 <- 20:34 help(seq) b2 <- seq(20, 34, 1) hist(cloud$Cloudpt , breaks = b1) #### boxplot #### help("boxplot") boxplot(cloud$Cloudpt) bp_cloudpt <- boxplot(cloud$Cloudpt)
/Ejemplo06.R
no_license
azchr4ds/Clase2R4DS
R
false
false
472
r
#### Archivo cloud.txt #### cloud <- read.table(file = "cloud.txt", header = T) colnames(cloud) str(cloud) class(cloud) cloud$Ispc cloud$Cloudpt #### Histograma #### help(hist) hist(cloud$Cloudpt) h_cloudpt <- hist(cloud$Cloudpt) h_cloudpt$breaks h_cloudpt$counts range(cloud$Cloudpt) b1 <- 20:34 help(seq) b2 <- seq(20, 34, 1) hist(cloud$Cloudpt , breaks = b1) #### boxplot #### help("boxplot") boxplot(cloud$Cloudpt) bp_cloudpt <- boxplot(cloud$Cloudpt)
######## # Author: Zane Wolf # Date Created: 9/25/2017 # Purpose: To explore and clean NYC Crime data file. # Transformed some categorical variables to indicator variables # Filled in or deleted mising Location data # Organized and parsed time variables, added indicator variables # Filled in missing Crime Data # Date Last Updated: 11/18/2017 ######## #################################################################################################################################################### # HOUSEKEEPING #################################################################################################################################################### rm(list=ls()) options(stringsAsFactors = FALSE) setwd("C:/Users/zane/Documents/School/Classes/Harvard/OEB 201 Experimental Design and Stats/NYC_Crime") # libraries library(tidyverse) #includes ggplot2, tibble, tidyr, readr, purrr, dplyr library(plyr) library(stringr) library(lubridate) #################################################################################################################################################### # IMPORT #################################################################################################################################################### nyc <- read_csv("NYPD_Complaint_Data_Historic.csv") View(nyc) headers <- names(nyc) #################################################################################################################################################### # DATA CLEANING #################################################################################################################################################### #select for Completed Crimes as.data.frame(table(nyc$CRM_ATPT_CPTD_CD)) #find out just how many were attempted....negligible, ~90,000 out of 5.5million unique(nyc$CRM_ATPT_CPTD_CD) #Completed, attempted, NA nyc <- subset(nyc, CRM_ATPT_CPTD_CD=="COMPLETED") # delete the following columns: CMPLNT_TO_DT, CMPLNT_TO_TM, RPT_DT, X_COORD_CD, Y_COORD_CD, # and Lat_Lon bad_vars <- names(nyc) %in% c("CRM_ATPT_CPTD_CD", "ADDR_PCT_CD", "CMPLNT_NUM", "CMPLNT_TO_DT", "CMPLNT_TO_TM", "RPT_DT", "X_COORD_CD", "Y_COORD_CD", "Lat_Lon") nyc <- nyc[!bad_vars] ######################################################### INDICATOR VARIABLES ################################################################# #Reduce parks to binary #if it occurred in a park, doesn't matter which park, -> 1 #if it did not occur in a park -> 0 nyc$PARKS_NM[is.na(nyc$PARKS_NM)] <- 0 nyc$PARKS_NM[nyc$PARKS_NM!=0] <- 1 #Reduce housing developments to binary #if it occurred in a housing development -> 1 #if it did not -> 0 nyc$HADEVELOPT[is.na(nyc$HADEVELOPT)] <- 0 nyc$HADEVELOPT[nyc$HADEVELOPT!=0] <- 1 #Reduce jurisdiction to binary #if NY Police Department -> 1 #if any other department -> 0 # unique(nyc$JURIS_DESC) # [1] "1" "N.Y. HOUSING POLICE" "N.Y. TRANSIT POLICE" # [4] "N.Y. STATE POLICE" "DEPT OF CORRECTIONS" "TRI-BORO BRDG TUNNL" # [7] "OTHER" "PORT AUTHORITY" "NYC PARKS" # [10] "HEALTH & HOSP CORP" "METRO NORTH" "LONG ISLAND RAILRD" # [13] "STATN IS RAPID TRANS" "N.Y. STATE PARKS" "U.S. PARK POLICE" # [16] "NEW YORK CITY SHERIFF OFFICE" "NYS DEPT TAX AND FINANCE" "AMTRACK" # [19] "CONRAIL" "POLICE DEPT NYC" "FIRE DEPT (FIRE MARSHAL)" # [22] "NYC DEPT ENVIRONMENTAL PROTECTION" "SEA GATE POLICE DEPT" "DISTRICT ATTORNEY OFFICE" # [25] "NYS DEPT ENVIRONMENTAL CONSERVATION" nyc$JURIS_DESC[nyc$JURIS_DESC=='N.Y. POLICE DEPT'] <- 1 nyc$JURIS_DESC[nyc$JURIS_DESC!=1] <- 0 ######################################################### LOCATION ##################################################################### # ~~~~~~~~~~~~~~~~~~~~~ MISSING DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #Reduce location of occurance description to binary #if inside -> 1 #if anything else -> 0 # as.data.frame(table(nyc$LOC_OF_OCCUR_DESC)) # Var1 Freq # 1 FRONT OF 1276501 # 2 INSIDE 2731916 # 3 OPPOSITE OF 151301 # 4 OUTSIDE 2962 # 5 REAR OF 118900 sum(is.na(nyc$BORO_NM)) #75 nyc <- nyc[!with(nyc, is.na(nyc$BORO_NM) | is.na(nyc$CMPLNT_FR_TM)),] #define all othe others nyc$LOC_OF_OCCUR_DESC[nyc$LOC_OF_OCCUR_DESC!='INSIDE'] <- 0 nyc$LOC_OF_OCCUR_DESC[nyc$LOC_OF_OCCUR_DESC=='INSIDE'] <- 1 #there are still a bunch of NAs...277,996 to be exact sum(is.na(nyc$LOC_OF_OCCUR_DESC)) #based on first 100 entries, I'm setting anything with a premise description as Street, Park/playground, or Gas Station as outside nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='STREET'] <- 0 nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='PARK/PLAYGROUND'] <- 0 nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='GAS STATION'] <- 0 sum(is.na(nyc$LOC_OF_OCCUR_DESC)) #which type of premise descriptions have NA as location description.... unique(nyc$PREM_TYP_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)]) #interesting, there are some that have NA for both...how many? sum(is.na(nyc$LOC_OF_OCCUR_DESC)&is.na(nyc$PREM_TYP_DESC)) #...5,897 #are they all from the same borrow? as.data.frame(table(nyc$BORO_NM[(is.na(nyc$LOC_OF_OCCUR_DESC)&is.na(nyc$PREM_TYP_DESC))])) #they seem to be pretty evenly distributed, except for staten island. as.data.frame(table(nyc$BORO_NM)) #but staten island has a lower amount of crimes reported anyway, so having a reduced number of missing location seems to be in proportion #since part of my goal is to model based off of location, crime reports without locational description data is rather useless. #I'm going to go ahead and delete the 5,897 incidences without that data nyc <- nyc[!with(nyc,is.na(nyc$LOC_OF_OCCUR_DESC) & is.na(nyc$PREM_TYP_DESC)),] #now back to the others... na_premises <- unique(nyc$PREM_TYP_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)]) # [1] "OTHER" "TRANSIT - NYC SUBWAY" "RESIDENCE - APT. HOUSE" "BUS STOP" # [5] "GROCERY/BODEGA" "TUNNEL" "RESIDENCE-HOUSE" "BRIDGE" # [9] "AIRPORT TERMINAL" "PUBLIC BUILDING" "FOOD SUPERMARKET" "BUS (NYC TRANSIT)" # [13] "OPEN AREAS (OPEN LOTS)" "PARKING LOT/GARAGE (PUBLIC)" "PARKING LOT/GARAGE (PRIVATE)" "HIGHWAY/PARKWAY" # [17] "DRY CLEANER/LAUNDRY" "HOTEL/MOTEL" "CLOTHING/BOUTIQUE" "STORAGE FACILITY" # [21] "COMMERCIAL BUILDING" "BAR/NIGHT CLUB" "CONSTRUCTION SITE" "FAST FOOD" # [25] "BANK" "CHAIN STORE" "TAXI (LIVERY LICENSED)" "HOSPITAL" # [29] "SMALL MERCHANT" "TAXI (YELLOW LICENSED)" "TAXI/LIVERY (UNLICENSED)" "TRANSIT FACILITY (OTHER)" # [33] "BUS TERMINAL" "PUBLIC SCHOOL" "BUS (OTHER)" "RESTAURANT/DINER" # [37] "BEAUTY & NAIL SALON" "MARINA/PIER" NA "RESIDENCE - PUBLIC HOUSING" # [41] "DEPARTMENT STORE" "CANDY STORE" "TELECOMM. STORE" "STORE UNCLASSIFIED" # [45] "DRUG STORE" "GYM/FITNESS FACILITY" "CHURCH" "BOOK/CARD" # [49] "CHECK CASHING BUSINESS" "ABANDONED BUILDING" "SYNAGOGUE" "LIQUOR STORE" # [53] "OTHER HOUSE OF WORSHIP" "DOCTOR/DENTIST OFFICE" "FACTORY/WAREHOUSE" "ATM" # [57] "PRIVATE/PAROCHIAL SCHOOL" "CEMETERY" "JEWELRY" "SOCIAL CLUB/POLICY" # [61] "VARIETY STORE" "TRAMWAY" "FERRY/FERRY TERMINAL" "PHOTO/COPY" # [65] "VIDEO STORE" "SHOE" "MOSQUE" "LOAN COMPANY" #going to set the following as outside.... na_prem_outside <- na_premises[c(2,4,6,8,12:16,23,27,29:33,35,38,55,57,61,62)] na_premises[c(2,4,6,8,12:16,23,27,29:33,35,38,55,57,61,62)] nyc$LOC_OF_OCCUR_DESC[with(nyc, nyc$PREM_TYP_DESC %in% na_prem_outside & is.na(nyc$LOC_OF_OCCUR_DESC))] <- 0 # #set these as the inside... na_prem_inside <- na_premises[c(3,5,7,9:11,17:22,24:26,28,34,36,37,39:54,56,58:60,63,64,66,67)] na_prem_inside nyc$LOC_OF_OCCUR_DESC[with(nyc, nyc$PREM_TYP_DESC %in% na_prem_inside & is.na(nyc$LOC_OF_OCCUR_DESC))] <- 1 # #what's left....hmm, Just 'SHOE' and 'OTHER'....what the hell type is shoe? maybe shoe store?? unique(nyc$PREM_TYP_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)]) as.data.frame(table(nyc$PREM_TYP_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)])) #set 'SHOE' to inside....why not, only a couple hundred of them nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='SHOE'] <- 1 #so what happens at Other?? as.data.frame(table(nyc$PD_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)])) # a freaking lot # lets just set it all to outside, the crimes with high frequencies (>500 occurrences) are ones that occur in 'open areas' #--makes sense, nondescript open area as 'other' nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='OTHER'] <- 1 #no more 'missing' data and all locations set to either inside (1) or outside (0)...whew. ############################################################ TIME ####################################################################### #Time for Time #want to bin time into 1-hour segments #divide Hour:Minute:Second into three separate columns nyc$Time <- nyc$CMPLNT_FR_TM library(chron) nyc$Time2 <- 60 * 24 * as.numeric(times(nyc$Time)) nyc <- separate(nyc, CMPLNT_FR_TM, sep= ":", into=c("Hour", "Minute", "Second"), fill='right', remove=FALSE) #separate date, similarly, into Month, Day, Year nyc <- separate(nyc, CMPLNT_FR_DT, sep= "/", into=c("Month", "Day", "Year"), fill='right', remove=FALSE) as.data.frame(table(nyc$Year)) #DATA DELETION #this database was supposed to be 2006-2016, but there are years here from 1905 and 1015 (prob a typo). Gonna delete the few thousand from before 2006 #2005 is also a little skewed, though. Even though it has 10,000+ events, all the other years have nearly half a million data points. yearsIWant <- c("2006","2007","2008","2009","2010","2011","2012","2013","2014","2015","2016") nyc <- subset(nyc, nyc$Year %in% yearsIWant) nyc <- subset(nyc, !nyc$Day=="01") unique(nyc$Day) #create month-day column for use in holiday determination nyc$MonthDay <- paste( nyc$Month, nyc$Day, sep="-" ) #deal with date...convert to standard format nyc$Date <- as.Date(nyc$CMPLNT_FR_DT, "%m/%d/%Y") #find out day of week nyc$DayName <- weekdays(as.Date(nyc$Date)) #find out weekday or weekend daysoftheweek <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday") daysoftheweekend <- c("Saturday", "Sunday") #create indicator variable, where 0 is a weekday and 1 is a weekend. nyc$Weekend <- as.integer(nyc$DayName %in% daysoftheweekend) #some holidays are always the same day holidays_list <- c("01-01", "02-14", "07-04", "09-04", "10-31", "12-25" ) easter_list <- c("2006-04-23", "2007-04-08", "2008-03-27", "2009-04-19", "2010-04-04", "2011-04-24", "2012-04-15", "2013-05-05", "2014-04-20", "2015-04-12") thanksgiving_list <- c("2005-11-24", "2006-11-23", "2007-11-22", "2008-11-27", "2009-11-26", "2010-11-25", "2011-11-24", "2012-11-22", "2013-11-28", "2014-11-27", "2015-11-26", "2016-11-24") #create Holiday indicator variable, 0 if not holiday, 1 if it matches any of the holidays specified above nyc$Holiday <- as.integer(nyc$MonthDay %in% holidays_list, nyc$CMPLNT_FR_DT %in% easter_list, nyc$CMPLNT_FR_DT %in% thanksgiving_list) # > as.data.frame(table(nyc$Holiday)) # Var1 Freq # 1 0 5375860 # 2 1 93413 ############################################################ CRIME ####################################################################### unique(nyc$OFNS_DESC) #...71 different classifiers unique(nyc$PD_DESC) #...410 different classifiers unique(nyc$LAW_CAT_CD) #...3 different classifiers #well, 71 is a lot better than 410... #I'm not sure there's anything between 3 and 71 without loosing a lot of data. 71 will have to do. # so I guess I'm looking at a hierarchical (between boros) multinomial (unordered categorial crime type) model? # ~~~~~~~~~~~~~~~~~~~~~ MISSING DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #oh look...NAs. Yay. # sum(is.na(nyc$OFNS_DESC)) # as.data.frame(table(nyc$PD_DESC[is.na(nyc$OFNS_DESC)])) #for the 56 cases where the OFNS_DESC is NA but the PD_DESC is not, I could substitute the PD_DESC for the OFNS_DESC #...or I could go through and find previous incidences where the PD_DESC is the same OFNS_DESC is not NA, and substitute that OFNS_DESC for the NA #probably the better way to go....but probs a lot more involved. Ugh. #solution....this takes a bit. Just FYI. #Feel free to delete the message(i) lines if you don't want to see a bazilion numbers on your screen #I use it as a progress measurement, but that's just me for (i in 1:nrow(nyc)){ if (is.na(nyc$OFNS_DESC[i])){ crimetype=nyc$PD_DESC[i] othercrimetypes=unique(nyc$OFNS_DESC[nyc$PD_DESC==crimetype]) if (length(othercrimetypes)==2){ nyc$OFNS_DESC[i] <- othercrimetypes[2] message(i) } else if (is.na(othercrimetypes)){ nyc$OFNS_DESC[i] <- nyc$PD_DESC[i] message(i) } } } # sum(is.na(nyc$OFNS_DESC)) # # #no more OFNS_DESC NAs. Yay! #reorder the columns to put similar variables together #don't care about Minute or Second, so not including those #nixing PD_DESC since I'm going to use OFNS_DESC as my response variable nyc_clean <- nyc[,c("CMPLNT_FR_DT", "Time2","Date", "Month", "Day", "Year", "MonthDay", "Holiday", "DayName", "Weekend", "CMPLNT_FR_TM", "Hour", "OFNS_DESC", "LAW_CAT_CD", "JURIS_DESC", "BORO_NM", "LOC_OF_OCCUR_DESC", "PARKS_NM", "HADEVELOPT")] #################################################################################################################################################### # CRIME CONDENSATION #################################################################################################################################################### #need to par 72 crime types down to something manageable. #get rid of boring crimes useless_crimes <- c("ABORTION", "AGRICULTURE & MRKTS LAW-UNCLASSIFIED", "ALCOHOLIC BEVERAGE CONTROL LAW", "ANTICIPATORY OFFENSES", "CHILD ABANDONMENT/NON SUPPORT", "DISORDERLY CONDUCT", "DISRUPTION OF A RELIGIOUS SERV", "ENDAN WELFARE INCOMP", "ESCAPE 3", "FORTUNE TELLING", "GAMBLING", "JOSTLING", "NEW YORK CITY HEALTH CODE", "NYS LAWS-UNCLASSIFIED FELONY", "NYS LAWS-UNCLASSIFIED VIOLATION", "OTHER STATE LAWS", "OTHER STATE LAWS (NON PENAL LA", "OTHER STATE LAWS (NON PENAL LAW)", "OTHER TRAFFIC INFRACTION", "PROSTITUTION & RELATED OFFENSES", "THEFT,RELATED OFFENSES,UNCLASS", "UNDER THE INFLUENCE OF DRUGS", "UNLAWFUL POSS. WEAP. ON SCHOOL") nyc_2 <- subset(nyc_clean, !nyc_clean$OFNS_DESC %in% useless_crimes) #Combine similar crimes #So I originally did this with a for loop and it ran for DAYS. Not joking. I'm never running a for loop in R ever again if I can help it. #the original for-loop is commented and located at the bottom of this code. #these apply functions literally took about a minute to finish. versus FOUR FREAKING DAYS. Never ever again. Lesson learned. var <- "OFNS_DESC" cd_old <- c("UNAUTHORIZED USE OF A VEHICLE", "VEHICLE AND TRAFFIC LAWS") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old, "VEHICLE/TRAFFIC LAWS RELATED",x)) cd_old_2 <- c("HOMICIDE-NEGLIGENT,UNCLASSIFIE", "HOMICIDE-NEGLIGENT-VEHICLE", "MURDER & NON-NEGL. MANSLAUGHTER") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_2, "MURDER",x)) cd_old_3 <- c("OFFENSES AGAINST PUBLIC ADMINI", "OFF. AGNST PUB ORD SENSBLTY &", "OFFENSES AGAINST MARRIAGE UNCL", "OFFENSES AGAINST PUBLIC SAFETY") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_3, "MISC. OFFENSES",x)) cd_old_4 <- c("RAPE", "SEX CRIMES") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_4, "RAPE OR SEX CRIME",x)) cd_old_5 <- c("ADMINISTRATIVE CODES", "ADMINISTRATIVE CODES") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_5, "MISCELLANEOUS PENAL LAW",x)) cd_old_6 <- c("BURGLAR'S TOOLS", "BURGLARY") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_6, "BURGLARY RELATED",x)) cd_old_7 <- c("FRAUDS", "FRAUDULENT ACCOSTING", "OFFENSES INVOLVING FRAUD") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_7, "FRAUD RELATED",x)) cd_old_8 <- c("GRAND LARCENY", "PETIT LARCENY") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_8, "GRAND/PETIT LARCENY",x)) cd_old_9 <- c("GRAND LARCENY OF MOTOR VEHICLE", "PETIT LARCENY OF MOTOR VEHICLE") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_9, "VEHICULAR GRAND/PETIT LARCENY",x)) cd_old_10 <- c("INTOXICATED & IMPAIRED DRIVING", "INTOXICATED/IMPAIRED DRIVING") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_10, "DUI",x)) cd_old_11 <- c("KIDNAPPING", "KIDNAPPING AND RELATED OFFENSES", "KIDNAPPING & RELATED OFFENSES") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_11, "KIDNAPPING RELATED",x)) cd_old_12 <- c("LOITERING", "LOITERING FOR DRUG PURPOSES", "LOITERING FOR PROSTITUTION OR", "LOITERING/DEVIATE SEX", "LOITERING/GAMBLING (CARDS, DIC") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_12, "LOITERING RELATED",x)) cd_old_13 <- c("OFFENSES AGAINST THE PERSON", "OFFENSES RELATED TO CHILDREN") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_13, "OFFENSES AGAINST HUMANS",x)) cd_old_14 <- c("OTHER OFFENSES RELATED TO THEF", "THEFT-FRAUD", "THEFT OF SERVICES") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_14, "THEFT RELATED",x)) as.data.frame(table(nyc_2$OFNS_DESC)) #down to only 26 crimes. Sweet. #narrow it down to modeling differences between Misc. Offenses and criminal mischief #the ratio between these two, which one is more likely, varies by hours and neighborhoods nyc_M <- subset(nyc_2, nyc_2$LAW_CAT_CD =="MISDEMEANOR") #ultimately an unnecessary step, but part of the original thought process nyc_M_small <- subset(nyc_M, nyc_M$OFNS_DESC %in% c("MISC. OFFENSES", "CRIMINAL MISCHIEF & RELATED OF")) nyc_2010 <- subset(nyc_M_small, nyc_M_small$Year==2010) #################################################################################################################################################### # SAVE CLEANED DATA FILE #################################################################################################################################################### write.csv(nyc_2010, "NYPD_Crime_Data_CLEAN_2010.csv") #################################################################################################################################################### # OLD CODE #################################################################################################################################################### #Here is the original for loop: # for (i in 1906650:nrow(nyc_clean)){ # if (nyc_clean$OFNS_DESC[i] == "ADMINISTRATIVE CODE" || nyc_clean$OFNS_DESC == "ADMINISTRATIVE CODES"){ # nyc_clean$OFNS_DESC[i] <- "MISCELLANEOUS PENAL LAW" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "BURGLAR'S TOOLS" || nyc_clean$OFNS_DESC[i] == "BURGLARY"){ # nyc_clean$OFNS_DESC[i] <- "BURGLARY RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "FRAUDS" || nyc_clean$OFNS_DESC[i] == "FRAUDULENT ACCOSTING" || nyc_clean$OFNS_DESC[i] == "OFFENSES INVOLVING FRAUD"){ # nyc_clean$OFNS_DESC[i] <- "FRAUD RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "GRAND LARCENY" || nyc_clean$OFNS_DESC[i] == "PETIT LARCENY"){ # nyc_clean$OFNS_DESC[i] <- "GRAND/PETIT LARCENY" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "GRAND LARCENY OF MOTOR VEHICLE" || nyc_clean$OFNS_DESC[i] == "PETIT LARCENY OF MOTOR VEHICLE"){ # nyc_clean$OFNS_DESC[i] <- "VEHICULAR GRAND/PETIT LARCENY" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "HOMICIDE-NEGLIGENT-VEHICLE" || nyc_clean$OFNS_DESC[i] == "HOMICIDE-NEGLIGENT,UNCLASSIFIED" ||nyc_clean$OFNS_DESC[i] == "MURDER & NON-NEGL. MANSLAUGHTER"){ # nyc_clean$OFNS_DESC[i] <- "MURDER" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "INTOXICATED & IMPAIRED DRIVING" || nyc_clean$OFNS_DESC[i] == "INTOXICATED/IMPAIRED DRIVING"){ # nyc_clean$OFNS_DESC[i] <- "DUI" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "KIDNAPPING" || nyc_clean$OFNS_DESC[i] == "KIDNAPPING AND RELATED OFFENSES"|| nyc_clean$OFNS_DESC[i] == "KIDNAPPING & RELATED OFFENSES"){ # nyc_clean$OFNS_DESC[i] <- "KIDNAPPING RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "LOITERING" || nyc_clean$OFNS_DESC[i] == "LOITERING FOR DRUG PURPOSES"|| nyc_clean$OFNS_DESC[i] == "LOITERING FOR PROSTITUTION OR"|| nyc_clean$OFNS_DESC[i] == "LOITERING/DEVIATE SEX"|| nyc_clean$OFNS_DESC[i] == "LOITERING/GAMBLING (CARDS, DIC"){ # nyc_clean$OFNS_DESC[i] <- "LOITERING RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "OFF. AGNST PUB ORD SENSBLTY &" || nyc_clean$OFNS_DESC[i] == "OFFENSES AGAINST MARRIAGE UNCL"|| nyc_clean$OFNS_DESC[i] == "OFFENSES AGINST PUBLIC ADMINI"|| nyc_clean$OFNS_DESC[i] == "OFFENSES AGAINST PUBLIC SAFETY"){ # nyc_clean$OFNS_DESC[i] <- "MISC. OFFENSES" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "OFFENSES AGAINST THE PERSON" || nyc_clean$OFNS_DESC[i] == "OFFENSES RELATED TO CHILDREN"){ # nyc_clean$OFNS_DESC[i] <- "OFFENSES AGAINST HUMANS" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "OTHER OFFENSES RELATED TO THEF" || nyc_clean$OFNS_DESC[i] == "THEFT-FRAUD"|| nyc_clean$OFNS_DESC[i] == "THEFT OF SERVICES"){ # nyc_clean$OFNS_DESC[i] <- "THEFT RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "RAPE" || nyc_clean$OFNS_DESC[i] == "SEX CRIMES"){ # nyc_clean$OFNS_DESC[i] <- "RAPE OR SEX CRIME" # message(i) # } # }
/finalprojects/Zane/cleaning_NYC_Crime_data.R
no_license
lizzieinclass/oeb201
R
false
false
23,690
r
######## # Author: Zane Wolf # Date Created: 9/25/2017 # Purpose: To explore and clean NYC Crime data file. # Transformed some categorical variables to indicator variables # Filled in or deleted mising Location data # Organized and parsed time variables, added indicator variables # Filled in missing Crime Data # Date Last Updated: 11/18/2017 ######## #################################################################################################################################################### # HOUSEKEEPING #################################################################################################################################################### rm(list=ls()) options(stringsAsFactors = FALSE) setwd("C:/Users/zane/Documents/School/Classes/Harvard/OEB 201 Experimental Design and Stats/NYC_Crime") # libraries library(tidyverse) #includes ggplot2, tibble, tidyr, readr, purrr, dplyr library(plyr) library(stringr) library(lubridate) #################################################################################################################################################### # IMPORT #################################################################################################################################################### nyc <- read_csv("NYPD_Complaint_Data_Historic.csv") View(nyc) headers <- names(nyc) #################################################################################################################################################### # DATA CLEANING #################################################################################################################################################### #select for Completed Crimes as.data.frame(table(nyc$CRM_ATPT_CPTD_CD)) #find out just how many were attempted....negligible, ~90,000 out of 5.5million unique(nyc$CRM_ATPT_CPTD_CD) #Completed, attempted, NA nyc <- subset(nyc, CRM_ATPT_CPTD_CD=="COMPLETED") # delete the following columns: CMPLNT_TO_DT, CMPLNT_TO_TM, RPT_DT, X_COORD_CD, Y_COORD_CD, # and Lat_Lon bad_vars <- names(nyc) %in% c("CRM_ATPT_CPTD_CD", "ADDR_PCT_CD", "CMPLNT_NUM", "CMPLNT_TO_DT", "CMPLNT_TO_TM", "RPT_DT", "X_COORD_CD", "Y_COORD_CD", "Lat_Lon") nyc <- nyc[!bad_vars] ######################################################### INDICATOR VARIABLES ################################################################# #Reduce parks to binary #if it occurred in a park, doesn't matter which park, -> 1 #if it did not occur in a park -> 0 nyc$PARKS_NM[is.na(nyc$PARKS_NM)] <- 0 nyc$PARKS_NM[nyc$PARKS_NM!=0] <- 1 #Reduce housing developments to binary #if it occurred in a housing development -> 1 #if it did not -> 0 nyc$HADEVELOPT[is.na(nyc$HADEVELOPT)] <- 0 nyc$HADEVELOPT[nyc$HADEVELOPT!=0] <- 1 #Reduce jurisdiction to binary #if NY Police Department -> 1 #if any other department -> 0 # unique(nyc$JURIS_DESC) # [1] "1" "N.Y. HOUSING POLICE" "N.Y. TRANSIT POLICE" # [4] "N.Y. STATE POLICE" "DEPT OF CORRECTIONS" "TRI-BORO BRDG TUNNL" # [7] "OTHER" "PORT AUTHORITY" "NYC PARKS" # [10] "HEALTH & HOSP CORP" "METRO NORTH" "LONG ISLAND RAILRD" # [13] "STATN IS RAPID TRANS" "N.Y. STATE PARKS" "U.S. PARK POLICE" # [16] "NEW YORK CITY SHERIFF OFFICE" "NYS DEPT TAX AND FINANCE" "AMTRACK" # [19] "CONRAIL" "POLICE DEPT NYC" "FIRE DEPT (FIRE MARSHAL)" # [22] "NYC DEPT ENVIRONMENTAL PROTECTION" "SEA GATE POLICE DEPT" "DISTRICT ATTORNEY OFFICE" # [25] "NYS DEPT ENVIRONMENTAL CONSERVATION" nyc$JURIS_DESC[nyc$JURIS_DESC=='N.Y. POLICE DEPT'] <- 1 nyc$JURIS_DESC[nyc$JURIS_DESC!=1] <- 0 ######################################################### LOCATION ##################################################################### # ~~~~~~~~~~~~~~~~~~~~~ MISSING DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #Reduce location of occurance description to binary #if inside -> 1 #if anything else -> 0 # as.data.frame(table(nyc$LOC_OF_OCCUR_DESC)) # Var1 Freq # 1 FRONT OF 1276501 # 2 INSIDE 2731916 # 3 OPPOSITE OF 151301 # 4 OUTSIDE 2962 # 5 REAR OF 118900 sum(is.na(nyc$BORO_NM)) #75 nyc <- nyc[!with(nyc, is.na(nyc$BORO_NM) | is.na(nyc$CMPLNT_FR_TM)),] #define all othe others nyc$LOC_OF_OCCUR_DESC[nyc$LOC_OF_OCCUR_DESC!='INSIDE'] <- 0 nyc$LOC_OF_OCCUR_DESC[nyc$LOC_OF_OCCUR_DESC=='INSIDE'] <- 1 #there are still a bunch of NAs...277,996 to be exact sum(is.na(nyc$LOC_OF_OCCUR_DESC)) #based on first 100 entries, I'm setting anything with a premise description as Street, Park/playground, or Gas Station as outside nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='STREET'] <- 0 nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='PARK/PLAYGROUND'] <- 0 nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='GAS STATION'] <- 0 sum(is.na(nyc$LOC_OF_OCCUR_DESC)) #which type of premise descriptions have NA as location description.... unique(nyc$PREM_TYP_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)]) #interesting, there are some that have NA for both...how many? sum(is.na(nyc$LOC_OF_OCCUR_DESC)&is.na(nyc$PREM_TYP_DESC)) #...5,897 #are they all from the same borrow? as.data.frame(table(nyc$BORO_NM[(is.na(nyc$LOC_OF_OCCUR_DESC)&is.na(nyc$PREM_TYP_DESC))])) #they seem to be pretty evenly distributed, except for staten island. as.data.frame(table(nyc$BORO_NM)) #but staten island has a lower amount of crimes reported anyway, so having a reduced number of missing location seems to be in proportion #since part of my goal is to model based off of location, crime reports without locational description data is rather useless. #I'm going to go ahead and delete the 5,897 incidences without that data nyc <- nyc[!with(nyc,is.na(nyc$LOC_OF_OCCUR_DESC) & is.na(nyc$PREM_TYP_DESC)),] #now back to the others... na_premises <- unique(nyc$PREM_TYP_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)]) # [1] "OTHER" "TRANSIT - NYC SUBWAY" "RESIDENCE - APT. HOUSE" "BUS STOP" # [5] "GROCERY/BODEGA" "TUNNEL" "RESIDENCE-HOUSE" "BRIDGE" # [9] "AIRPORT TERMINAL" "PUBLIC BUILDING" "FOOD SUPERMARKET" "BUS (NYC TRANSIT)" # [13] "OPEN AREAS (OPEN LOTS)" "PARKING LOT/GARAGE (PUBLIC)" "PARKING LOT/GARAGE (PRIVATE)" "HIGHWAY/PARKWAY" # [17] "DRY CLEANER/LAUNDRY" "HOTEL/MOTEL" "CLOTHING/BOUTIQUE" "STORAGE FACILITY" # [21] "COMMERCIAL BUILDING" "BAR/NIGHT CLUB" "CONSTRUCTION SITE" "FAST FOOD" # [25] "BANK" "CHAIN STORE" "TAXI (LIVERY LICENSED)" "HOSPITAL" # [29] "SMALL MERCHANT" "TAXI (YELLOW LICENSED)" "TAXI/LIVERY (UNLICENSED)" "TRANSIT FACILITY (OTHER)" # [33] "BUS TERMINAL" "PUBLIC SCHOOL" "BUS (OTHER)" "RESTAURANT/DINER" # [37] "BEAUTY & NAIL SALON" "MARINA/PIER" NA "RESIDENCE - PUBLIC HOUSING" # [41] "DEPARTMENT STORE" "CANDY STORE" "TELECOMM. STORE" "STORE UNCLASSIFIED" # [45] "DRUG STORE" "GYM/FITNESS FACILITY" "CHURCH" "BOOK/CARD" # [49] "CHECK CASHING BUSINESS" "ABANDONED BUILDING" "SYNAGOGUE" "LIQUOR STORE" # [53] "OTHER HOUSE OF WORSHIP" "DOCTOR/DENTIST OFFICE" "FACTORY/WAREHOUSE" "ATM" # [57] "PRIVATE/PAROCHIAL SCHOOL" "CEMETERY" "JEWELRY" "SOCIAL CLUB/POLICY" # [61] "VARIETY STORE" "TRAMWAY" "FERRY/FERRY TERMINAL" "PHOTO/COPY" # [65] "VIDEO STORE" "SHOE" "MOSQUE" "LOAN COMPANY" #going to set the following as outside.... na_prem_outside <- na_premises[c(2,4,6,8,12:16,23,27,29:33,35,38,55,57,61,62)] na_premises[c(2,4,6,8,12:16,23,27,29:33,35,38,55,57,61,62)] nyc$LOC_OF_OCCUR_DESC[with(nyc, nyc$PREM_TYP_DESC %in% na_prem_outside & is.na(nyc$LOC_OF_OCCUR_DESC))] <- 0 # #set these as the inside... na_prem_inside <- na_premises[c(3,5,7,9:11,17:22,24:26,28,34,36,37,39:54,56,58:60,63,64,66,67)] na_prem_inside nyc$LOC_OF_OCCUR_DESC[with(nyc, nyc$PREM_TYP_DESC %in% na_prem_inside & is.na(nyc$LOC_OF_OCCUR_DESC))] <- 1 # #what's left....hmm, Just 'SHOE' and 'OTHER'....what the hell type is shoe? maybe shoe store?? unique(nyc$PREM_TYP_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)]) as.data.frame(table(nyc$PREM_TYP_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)])) #set 'SHOE' to inside....why not, only a couple hundred of them nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='SHOE'] <- 1 #so what happens at Other?? as.data.frame(table(nyc$PD_DESC[is.na(nyc$LOC_OF_OCCUR_DESC)])) # a freaking lot # lets just set it all to outside, the crimes with high frequencies (>500 occurrences) are ones that occur in 'open areas' #--makes sense, nondescript open area as 'other' nyc$LOC_OF_OCCUR_DESC[nyc$PREM_TYP_DESC=='OTHER'] <- 1 #no more 'missing' data and all locations set to either inside (1) or outside (0)...whew. ############################################################ TIME ####################################################################### #Time for Time #want to bin time into 1-hour segments #divide Hour:Minute:Second into three separate columns nyc$Time <- nyc$CMPLNT_FR_TM library(chron) nyc$Time2 <- 60 * 24 * as.numeric(times(nyc$Time)) nyc <- separate(nyc, CMPLNT_FR_TM, sep= ":", into=c("Hour", "Minute", "Second"), fill='right', remove=FALSE) #separate date, similarly, into Month, Day, Year nyc <- separate(nyc, CMPLNT_FR_DT, sep= "/", into=c("Month", "Day", "Year"), fill='right', remove=FALSE) as.data.frame(table(nyc$Year)) #DATA DELETION #this database was supposed to be 2006-2016, but there are years here from 1905 and 1015 (prob a typo). Gonna delete the few thousand from before 2006 #2005 is also a little skewed, though. Even though it has 10,000+ events, all the other years have nearly half a million data points. yearsIWant <- c("2006","2007","2008","2009","2010","2011","2012","2013","2014","2015","2016") nyc <- subset(nyc, nyc$Year %in% yearsIWant) nyc <- subset(nyc, !nyc$Day=="01") unique(nyc$Day) #create month-day column for use in holiday determination nyc$MonthDay <- paste( nyc$Month, nyc$Day, sep="-" ) #deal with date...convert to standard format nyc$Date <- as.Date(nyc$CMPLNT_FR_DT, "%m/%d/%Y") #find out day of week nyc$DayName <- weekdays(as.Date(nyc$Date)) #find out weekday or weekend daysoftheweek <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday") daysoftheweekend <- c("Saturday", "Sunday") #create indicator variable, where 0 is a weekday and 1 is a weekend. nyc$Weekend <- as.integer(nyc$DayName %in% daysoftheweekend) #some holidays are always the same day holidays_list <- c("01-01", "02-14", "07-04", "09-04", "10-31", "12-25" ) easter_list <- c("2006-04-23", "2007-04-08", "2008-03-27", "2009-04-19", "2010-04-04", "2011-04-24", "2012-04-15", "2013-05-05", "2014-04-20", "2015-04-12") thanksgiving_list <- c("2005-11-24", "2006-11-23", "2007-11-22", "2008-11-27", "2009-11-26", "2010-11-25", "2011-11-24", "2012-11-22", "2013-11-28", "2014-11-27", "2015-11-26", "2016-11-24") #create Holiday indicator variable, 0 if not holiday, 1 if it matches any of the holidays specified above nyc$Holiday <- as.integer(nyc$MonthDay %in% holidays_list, nyc$CMPLNT_FR_DT %in% easter_list, nyc$CMPLNT_FR_DT %in% thanksgiving_list) # > as.data.frame(table(nyc$Holiday)) # Var1 Freq # 1 0 5375860 # 2 1 93413 ############################################################ CRIME ####################################################################### unique(nyc$OFNS_DESC) #...71 different classifiers unique(nyc$PD_DESC) #...410 different classifiers unique(nyc$LAW_CAT_CD) #...3 different classifiers #well, 71 is a lot better than 410... #I'm not sure there's anything between 3 and 71 without loosing a lot of data. 71 will have to do. # so I guess I'm looking at a hierarchical (between boros) multinomial (unordered categorial crime type) model? # ~~~~~~~~~~~~~~~~~~~~~ MISSING DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #oh look...NAs. Yay. # sum(is.na(nyc$OFNS_DESC)) # as.data.frame(table(nyc$PD_DESC[is.na(nyc$OFNS_DESC)])) #for the 56 cases where the OFNS_DESC is NA but the PD_DESC is not, I could substitute the PD_DESC for the OFNS_DESC #...or I could go through and find previous incidences where the PD_DESC is the same OFNS_DESC is not NA, and substitute that OFNS_DESC for the NA #probably the better way to go....but probs a lot more involved. Ugh. #solution....this takes a bit. Just FYI. #Feel free to delete the message(i) lines if you don't want to see a bazilion numbers on your screen #I use it as a progress measurement, but that's just me for (i in 1:nrow(nyc)){ if (is.na(nyc$OFNS_DESC[i])){ crimetype=nyc$PD_DESC[i] othercrimetypes=unique(nyc$OFNS_DESC[nyc$PD_DESC==crimetype]) if (length(othercrimetypes)==2){ nyc$OFNS_DESC[i] <- othercrimetypes[2] message(i) } else if (is.na(othercrimetypes)){ nyc$OFNS_DESC[i] <- nyc$PD_DESC[i] message(i) } } } # sum(is.na(nyc$OFNS_DESC)) # # #no more OFNS_DESC NAs. Yay! #reorder the columns to put similar variables together #don't care about Minute or Second, so not including those #nixing PD_DESC since I'm going to use OFNS_DESC as my response variable nyc_clean <- nyc[,c("CMPLNT_FR_DT", "Time2","Date", "Month", "Day", "Year", "MonthDay", "Holiday", "DayName", "Weekend", "CMPLNT_FR_TM", "Hour", "OFNS_DESC", "LAW_CAT_CD", "JURIS_DESC", "BORO_NM", "LOC_OF_OCCUR_DESC", "PARKS_NM", "HADEVELOPT")] #################################################################################################################################################### # CRIME CONDENSATION #################################################################################################################################################### #need to par 72 crime types down to something manageable. #get rid of boring crimes useless_crimes <- c("ABORTION", "AGRICULTURE & MRKTS LAW-UNCLASSIFIED", "ALCOHOLIC BEVERAGE CONTROL LAW", "ANTICIPATORY OFFENSES", "CHILD ABANDONMENT/NON SUPPORT", "DISORDERLY CONDUCT", "DISRUPTION OF A RELIGIOUS SERV", "ENDAN WELFARE INCOMP", "ESCAPE 3", "FORTUNE TELLING", "GAMBLING", "JOSTLING", "NEW YORK CITY HEALTH CODE", "NYS LAWS-UNCLASSIFIED FELONY", "NYS LAWS-UNCLASSIFIED VIOLATION", "OTHER STATE LAWS", "OTHER STATE LAWS (NON PENAL LA", "OTHER STATE LAWS (NON PENAL LAW)", "OTHER TRAFFIC INFRACTION", "PROSTITUTION & RELATED OFFENSES", "THEFT,RELATED OFFENSES,UNCLASS", "UNDER THE INFLUENCE OF DRUGS", "UNLAWFUL POSS. WEAP. ON SCHOOL") nyc_2 <- subset(nyc_clean, !nyc_clean$OFNS_DESC %in% useless_crimes) #Combine similar crimes #So I originally did this with a for loop and it ran for DAYS. Not joking. I'm never running a for loop in R ever again if I can help it. #the original for-loop is commented and located at the bottom of this code. #these apply functions literally took about a minute to finish. versus FOUR FREAKING DAYS. Never ever again. Lesson learned. var <- "OFNS_DESC" cd_old <- c("UNAUTHORIZED USE OF A VEHICLE", "VEHICLE AND TRAFFIC LAWS") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old, "VEHICLE/TRAFFIC LAWS RELATED",x)) cd_old_2 <- c("HOMICIDE-NEGLIGENT,UNCLASSIFIE", "HOMICIDE-NEGLIGENT-VEHICLE", "MURDER & NON-NEGL. MANSLAUGHTER") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_2, "MURDER",x)) cd_old_3 <- c("OFFENSES AGAINST PUBLIC ADMINI", "OFF. AGNST PUB ORD SENSBLTY &", "OFFENSES AGAINST MARRIAGE UNCL", "OFFENSES AGAINST PUBLIC SAFETY") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_3, "MISC. OFFENSES",x)) cd_old_4 <- c("RAPE", "SEX CRIMES") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_4, "RAPE OR SEX CRIME",x)) cd_old_5 <- c("ADMINISTRATIVE CODES", "ADMINISTRATIVE CODES") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_5, "MISCELLANEOUS PENAL LAW",x)) cd_old_6 <- c("BURGLAR'S TOOLS", "BURGLARY") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_6, "BURGLARY RELATED",x)) cd_old_7 <- c("FRAUDS", "FRAUDULENT ACCOSTING", "OFFENSES INVOLVING FRAUD") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_7, "FRAUD RELATED",x)) cd_old_8 <- c("GRAND LARCENY", "PETIT LARCENY") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_8, "GRAND/PETIT LARCENY",x)) cd_old_9 <- c("GRAND LARCENY OF MOTOR VEHICLE", "PETIT LARCENY OF MOTOR VEHICLE") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_9, "VEHICULAR GRAND/PETIT LARCENY",x)) cd_old_10 <- c("INTOXICATED & IMPAIRED DRIVING", "INTOXICATED/IMPAIRED DRIVING") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_10, "DUI",x)) cd_old_11 <- c("KIDNAPPING", "KIDNAPPING AND RELATED OFFENSES", "KIDNAPPING & RELATED OFFENSES") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_11, "KIDNAPPING RELATED",x)) cd_old_12 <- c("LOITERING", "LOITERING FOR DRUG PURPOSES", "LOITERING FOR PROSTITUTION OR", "LOITERING/DEVIATE SEX", "LOITERING/GAMBLING (CARDS, DIC") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_12, "LOITERING RELATED",x)) cd_old_13 <- c("OFFENSES AGAINST THE PERSON", "OFFENSES RELATED TO CHILDREN") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_13, "OFFENSES AGAINST HUMANS",x)) cd_old_14 <- c("OTHER OFFENSES RELATED TO THEF", "THEFT-FRAUD", "THEFT OF SERVICES") nyc_2[,var] <- sapply(nyc_2[,var],function(x) ifelse(x %in% cd_old_14, "THEFT RELATED",x)) as.data.frame(table(nyc_2$OFNS_DESC)) #down to only 26 crimes. Sweet. #narrow it down to modeling differences between Misc. Offenses and criminal mischief #the ratio between these two, which one is more likely, varies by hours and neighborhoods nyc_M <- subset(nyc_2, nyc_2$LAW_CAT_CD =="MISDEMEANOR") #ultimately an unnecessary step, but part of the original thought process nyc_M_small <- subset(nyc_M, nyc_M$OFNS_DESC %in% c("MISC. OFFENSES", "CRIMINAL MISCHIEF & RELATED OF")) nyc_2010 <- subset(nyc_M_small, nyc_M_small$Year==2010) #################################################################################################################################################### # SAVE CLEANED DATA FILE #################################################################################################################################################### write.csv(nyc_2010, "NYPD_Crime_Data_CLEAN_2010.csv") #################################################################################################################################################### # OLD CODE #################################################################################################################################################### #Here is the original for loop: # for (i in 1906650:nrow(nyc_clean)){ # if (nyc_clean$OFNS_DESC[i] == "ADMINISTRATIVE CODE" || nyc_clean$OFNS_DESC == "ADMINISTRATIVE CODES"){ # nyc_clean$OFNS_DESC[i] <- "MISCELLANEOUS PENAL LAW" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "BURGLAR'S TOOLS" || nyc_clean$OFNS_DESC[i] == "BURGLARY"){ # nyc_clean$OFNS_DESC[i] <- "BURGLARY RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "FRAUDS" || nyc_clean$OFNS_DESC[i] == "FRAUDULENT ACCOSTING" || nyc_clean$OFNS_DESC[i] == "OFFENSES INVOLVING FRAUD"){ # nyc_clean$OFNS_DESC[i] <- "FRAUD RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "GRAND LARCENY" || nyc_clean$OFNS_DESC[i] == "PETIT LARCENY"){ # nyc_clean$OFNS_DESC[i] <- "GRAND/PETIT LARCENY" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "GRAND LARCENY OF MOTOR VEHICLE" || nyc_clean$OFNS_DESC[i] == "PETIT LARCENY OF MOTOR VEHICLE"){ # nyc_clean$OFNS_DESC[i] <- "VEHICULAR GRAND/PETIT LARCENY" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "HOMICIDE-NEGLIGENT-VEHICLE" || nyc_clean$OFNS_DESC[i] == "HOMICIDE-NEGLIGENT,UNCLASSIFIED" ||nyc_clean$OFNS_DESC[i] == "MURDER & NON-NEGL. MANSLAUGHTER"){ # nyc_clean$OFNS_DESC[i] <- "MURDER" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "INTOXICATED & IMPAIRED DRIVING" || nyc_clean$OFNS_DESC[i] == "INTOXICATED/IMPAIRED DRIVING"){ # nyc_clean$OFNS_DESC[i] <- "DUI" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "KIDNAPPING" || nyc_clean$OFNS_DESC[i] == "KIDNAPPING AND RELATED OFFENSES"|| nyc_clean$OFNS_DESC[i] == "KIDNAPPING & RELATED OFFENSES"){ # nyc_clean$OFNS_DESC[i] <- "KIDNAPPING RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "LOITERING" || nyc_clean$OFNS_DESC[i] == "LOITERING FOR DRUG PURPOSES"|| nyc_clean$OFNS_DESC[i] == "LOITERING FOR PROSTITUTION OR"|| nyc_clean$OFNS_DESC[i] == "LOITERING/DEVIATE SEX"|| nyc_clean$OFNS_DESC[i] == "LOITERING/GAMBLING (CARDS, DIC"){ # nyc_clean$OFNS_DESC[i] <- "LOITERING RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "OFF. AGNST PUB ORD SENSBLTY &" || nyc_clean$OFNS_DESC[i] == "OFFENSES AGAINST MARRIAGE UNCL"|| nyc_clean$OFNS_DESC[i] == "OFFENSES AGINST PUBLIC ADMINI"|| nyc_clean$OFNS_DESC[i] == "OFFENSES AGAINST PUBLIC SAFETY"){ # nyc_clean$OFNS_DESC[i] <- "MISC. OFFENSES" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "OFFENSES AGAINST THE PERSON" || nyc_clean$OFNS_DESC[i] == "OFFENSES RELATED TO CHILDREN"){ # nyc_clean$OFNS_DESC[i] <- "OFFENSES AGAINST HUMANS" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "OTHER OFFENSES RELATED TO THEF" || nyc_clean$OFNS_DESC[i] == "THEFT-FRAUD"|| nyc_clean$OFNS_DESC[i] == "THEFT OF SERVICES"){ # nyc_clean$OFNS_DESC[i] <- "THEFT RELATED" # message(i) # } else if (nyc_clean$OFNS_DESC[i] == "RAPE" || nyc_clean$OFNS_DESC[i] == "SEX CRIMES"){ # nyc_clean$OFNS_DESC[i] <- "RAPE OR SEX CRIME" # message(i) # } # }
context('check summary covr') test_that('covr_summary',{ expect_s3_class(covrpage::covr_summary(covrpage:::covr_test),'tbl_df') expect_error(covrpage::covr_summary()) })
/inst/covrpage_benchmark/tests/testthat/test-covrfuns.R
no_license
Robinlovelace/covrpage
R
false
false
181
r
context('check summary covr') test_that('covr_summary',{ expect_s3_class(covrpage::covr_summary(covrpage:::covr_test),'tbl_df') expect_error(covrpage::covr_summary()) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/XinyuJiangToolsRfunctions.R \name{func4} \alias{func4} \title{Weighted mean, var, sd} \usage{ func4(d) } \arguments{ \item{d}{data.frame} } \value{ list } \description{ Computes the weighted mean, var, sd } \examples{ data(d) func4(d) }
/man/func4.Rd
no_license
Xinyu-Jiang/XinyuJiangTools
R
false
true
315
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/XinyuJiangToolsRfunctions.R \name{func4} \alias{func4} \title{Weighted mean, var, sd} \usage{ func4(d) } \arguments{ \item{d}{data.frame} } \value{ list } \description{ Computes the weighted mean, var, sd } \examples{ data(d) func4(d) }
rm(list=ls()) setwd('~/Desktop/R/Coursera/04_Exploratory Data Analysis') df <- read.table("household_power_consumption.txt", header = TRUE, sep = ";") df$Date = as.Date(df$Date, "%d/%m/%Y") df2 <- df[df$Date=="2007-02-01" | df$Date=="2007-02-02",] df2 <- transform(df2, DateTime=as.POSIXct(paste(df2$Date, df2$Time)), "%d/%m/%Y %H:%M:%S") df2$Global_active_power <- as.character(df2$Global_active_power) df2$Global_active_power <- as.numeric(df2$Global_active_power) df2$Global_reactive_power <- as.character(df2$Global_reactive_power) df2$Global_reactive_power <- as.numeric(df2$Global_reactive_power) df2$Voltage <- as.character(df2$Voltage) df2$Voltage <- as.numeric(df2$Voltage) df2$Sub_metering_1 <- as.character(df2$Sub_metering_1) df2$Sub_metering_1 <- as.numeric(df2$Sub_metering_1) df2$Sub_metering_2 <- as.character(df2$Sub_metering_2) df2$Sub_metering_2 <- as.numeric(df2$Sub_metering_2) df2$Sub_metering_3 <- as.character(df2$Sub_metering_3) df2$Sub_metering_3 <- as.numeric(df2$Sub_metering_3) par(mfrow = c(2,2)) plot(df2$DateTime,df2$Global_active_power, type="l", xlab="", ylab="Global Active Power") plot(df2$DateTime,df2$Voltage, type="l", xlab="", ylab="Voltage") plot(df2$DateTime, df2$Sub_metering_1, type = 'l', ylab = "Energy sub metering", xlab = "") lines(df2$DateTime, df2$Sub_metering_2, col = "red") lines(df2$DateTime, df2$Sub_metering_3, col = "blue") legend("topright", lty = c(1,1), lwd = c(1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n", cex = 0.5) plot(df2$DateTime,df2$Global_reactive_power, type="l", xlab="", ylab="Global Reactive Power") dev.copy(png, file = "plot4.png", width = 480, height = 480) dev.off()
/plot4.R
no_license
sc1234567/ExData_Plotting1
R
false
false
1,727
r
rm(list=ls()) setwd('~/Desktop/R/Coursera/04_Exploratory Data Analysis') df <- read.table("household_power_consumption.txt", header = TRUE, sep = ";") df$Date = as.Date(df$Date, "%d/%m/%Y") df2 <- df[df$Date=="2007-02-01" | df$Date=="2007-02-02",] df2 <- transform(df2, DateTime=as.POSIXct(paste(df2$Date, df2$Time)), "%d/%m/%Y %H:%M:%S") df2$Global_active_power <- as.character(df2$Global_active_power) df2$Global_active_power <- as.numeric(df2$Global_active_power) df2$Global_reactive_power <- as.character(df2$Global_reactive_power) df2$Global_reactive_power <- as.numeric(df2$Global_reactive_power) df2$Voltage <- as.character(df2$Voltage) df2$Voltage <- as.numeric(df2$Voltage) df2$Sub_metering_1 <- as.character(df2$Sub_metering_1) df2$Sub_metering_1 <- as.numeric(df2$Sub_metering_1) df2$Sub_metering_2 <- as.character(df2$Sub_metering_2) df2$Sub_metering_2 <- as.numeric(df2$Sub_metering_2) df2$Sub_metering_3 <- as.character(df2$Sub_metering_3) df2$Sub_metering_3 <- as.numeric(df2$Sub_metering_3) par(mfrow = c(2,2)) plot(df2$DateTime,df2$Global_active_power, type="l", xlab="", ylab="Global Active Power") plot(df2$DateTime,df2$Voltage, type="l", xlab="", ylab="Voltage") plot(df2$DateTime, df2$Sub_metering_1, type = 'l', ylab = "Energy sub metering", xlab = "") lines(df2$DateTime, df2$Sub_metering_2, col = "red") lines(df2$DateTime, df2$Sub_metering_3, col = "blue") legend("topright", lty = c(1,1), lwd = c(1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n", cex = 0.5) plot(df2$DateTime,df2$Global_reactive_power, type="l", xlab="", ylab="Global Reactive Power") dev.copy(png, file = "plot4.png", width = 480, height = 480) dev.off()
# DownloadAndUnzipData.R # Coursera course "Getting and Cleaning Data" # Course project library(LaF) ReadAndMergeData <- function() { # Assuming that the necessary files were downloaded already, they are # imported and merged into a single data frame. # # To save time reading the training and test sets, only the required columns # are imported. Futhermore the activity labels will be included into the new # dataset in a human readable format. Also the column names are edited to # be more human readable. # # Returns: # Data frame with the merged data set containing type (training/test), # subject, activity and the measurements. # Read variable labels of measurements label <- read.delim("UCI HAR Dataset/features.txt", header=FALSE, sep=" ", col.names=c("id", "text"), colClasses=c("NULL", "character")) # Assemble necessary column indices for reading training and test sets column.indices <- grep("mean|std", label$text) # Edit column names to make them human readable label$text <- gsub("\\(\\)", "", label$text) label$text <- gsub("^t", "TimeSignal.", label$text) label$text <- gsub("^f", "FrequencySignal.", label$text) label$text <- gsub("Acc", "Accelerometer", label$text) label$text <- gsub("Gyro", "Gyroscope", label$text) label$text <- gsub("Jerk", ".Jerk", label$text) label$text <- gsub("Mag", ".Magnitude", label$text) label$text <- gsub("(Body)\\1", "\\1", label$text) # import training and test set laf.output <- laf_open_fwf("UCI HAR Dataset/train/X_train.txt", column_types=rep("numeric", nrow(label)), column_widths=rep(16, nrow(label)), column_names=as.character(label$text)) training.set <- laf.output[, column.indices] laf.output <- laf_open_fwf("UCI HAR Dataset/test/X_test.txt", column_types=rep("numeric", nrow(label)), column_widths=rep(16, nrow(label)), column_names=as.character(label$text)) test.set <- laf.output[, column.indices] # merging all training data subject <- read.delim("UCI HAR Dataset/train/subject_train.txt", header=FALSE, col.names="subject") activity <- read.delim("UCI HAR Dataset/train/y_train.txt", header=FALSE, col.names="activity") training.set <- cbind(set.type=rep("training", nrow(training.set)), subject, activity, training.set) # merging all test data subject <- read.delim("UCI HAR Dataset/test/subject_test.txt", header=FALSE, col.names="subject") activity <- read.delim("UCI HAR Dataset/test/y_test.txt", header=FALSE, col.names="activity") test.set <- cbind(set.type=rep("test", nrow(test.set)), subject, activity, test.set) # mergint trainings and test data merged.set <- rbind(training.set, test.set) merged.set$subject <- factor(merged.set$subject) # Making activity info human readable label <- read.delim("UCI HAR Dataset/activity_labels.txt", header=FALSE, sep=" ", col.names=c("id", "activity")) merged.set$activity <- factor(merged.set$activity, levels=label$id, labels=label$activity) merged.set }
/ReadAndMergeData.R
no_license
tegriebe/GetData_Cleaning
R
false
false
3,568
r
# DownloadAndUnzipData.R # Coursera course "Getting and Cleaning Data" # Course project library(LaF) ReadAndMergeData <- function() { # Assuming that the necessary files were downloaded already, they are # imported and merged into a single data frame. # # To save time reading the training and test sets, only the required columns # are imported. Futhermore the activity labels will be included into the new # dataset in a human readable format. Also the column names are edited to # be more human readable. # # Returns: # Data frame with the merged data set containing type (training/test), # subject, activity and the measurements. # Read variable labels of measurements label <- read.delim("UCI HAR Dataset/features.txt", header=FALSE, sep=" ", col.names=c("id", "text"), colClasses=c("NULL", "character")) # Assemble necessary column indices for reading training and test sets column.indices <- grep("mean|std", label$text) # Edit column names to make them human readable label$text <- gsub("\\(\\)", "", label$text) label$text <- gsub("^t", "TimeSignal.", label$text) label$text <- gsub("^f", "FrequencySignal.", label$text) label$text <- gsub("Acc", "Accelerometer", label$text) label$text <- gsub("Gyro", "Gyroscope", label$text) label$text <- gsub("Jerk", ".Jerk", label$text) label$text <- gsub("Mag", ".Magnitude", label$text) label$text <- gsub("(Body)\\1", "\\1", label$text) # import training and test set laf.output <- laf_open_fwf("UCI HAR Dataset/train/X_train.txt", column_types=rep("numeric", nrow(label)), column_widths=rep(16, nrow(label)), column_names=as.character(label$text)) training.set <- laf.output[, column.indices] laf.output <- laf_open_fwf("UCI HAR Dataset/test/X_test.txt", column_types=rep("numeric", nrow(label)), column_widths=rep(16, nrow(label)), column_names=as.character(label$text)) test.set <- laf.output[, column.indices] # merging all training data subject <- read.delim("UCI HAR Dataset/train/subject_train.txt", header=FALSE, col.names="subject") activity <- read.delim("UCI HAR Dataset/train/y_train.txt", header=FALSE, col.names="activity") training.set <- cbind(set.type=rep("training", nrow(training.set)), subject, activity, training.set) # merging all test data subject <- read.delim("UCI HAR Dataset/test/subject_test.txt", header=FALSE, col.names="subject") activity <- read.delim("UCI HAR Dataset/test/y_test.txt", header=FALSE, col.names="activity") test.set <- cbind(set.type=rep("test", nrow(test.set)), subject, activity, test.set) # mergint trainings and test data merged.set <- rbind(training.set, test.set) merged.set$subject <- factor(merged.set$subject) # Making activity info human readable label <- read.delim("UCI HAR Dataset/activity_labels.txt", header=FALSE, sep=" ", col.names=c("id", "activity")) merged.set$activity <- factor(merged.set$activity, levels=label$id, labels=label$activity) merged.set }
pacman::p_load(dplyr, readxl, ggplot2, scales, ggpubr, fpc, doParallel, reticulate) memory.limit() #rna는 정상 세포와 암세포 각각의 RNA 발현량을 담고 있음 -> 원발암 유전자를 찾아라 #단순 proportion으로만 했음 rna <- read_excel("Data/rna.xlsx") colnames(rna) <- c("gene", "normal", "cancer") rna <- rna[rna$normal >= 1e-08 & rna$cancer >= 1e-08,] ggplot(data=rna, aes(x=log10(normal), y=log10(cancer))) + #log scale geom_point() + geom_abline(slope=1, intercept=0) rna$pro <- log10(rna$cancer/rna$normal) ggplot(data=rna, aes(x=pro)) + geom_density(color="darkblue", fill="lightblue") ggboxplot(data=rna, y="pro") qqnorm(rna$pro) cancer_gene_proportion <- c(arrange(rna, pro)[1:15,]$gene, arrange(rna, desc(pro))[1:15,]$gene) median(rna$pro) #DBSCAN, python으로 했음 rna_plot <- bind_cols(rna, py$predict) rna_plot$predict <- rna_plot$predict + 2 rna_plot$predict <- as.factor(rna_plot$predict) ggplot(rna_plot, aes(x=log10(normal), y=log10(cancer), group=predict, color=predict)) + geom_point() + scale_color_manual(values=c("grey", "#E69F00", "#56B4E9", "skyblue", "green", "red", "yellow")) + geom_abline(slope=1, intercept=0) cancer_gene_DBSCAN <- py$cancer_gene_DBSCAN intersect(cancer_gene_DBSCAN, cancer_gene_proportion) rna[rna$gene %in% intersect(cancer_gene_DBSCAN, cancer_gene_proportion),] #여기서 proportion은 극단치를 잡아냈을 뿐임을 알 수 있다.
/Scripts/Detection of cancer.R
no_license
Cytotoxicity/Detect-cancer-gene
R
false
false
1,444
r
pacman::p_load(dplyr, readxl, ggplot2, scales, ggpubr, fpc, doParallel, reticulate) memory.limit() #rna는 정상 세포와 암세포 각각의 RNA 발현량을 담고 있음 -> 원발암 유전자를 찾아라 #단순 proportion으로만 했음 rna <- read_excel("Data/rna.xlsx") colnames(rna) <- c("gene", "normal", "cancer") rna <- rna[rna$normal >= 1e-08 & rna$cancer >= 1e-08,] ggplot(data=rna, aes(x=log10(normal), y=log10(cancer))) + #log scale geom_point() + geom_abline(slope=1, intercept=0) rna$pro <- log10(rna$cancer/rna$normal) ggplot(data=rna, aes(x=pro)) + geom_density(color="darkblue", fill="lightblue") ggboxplot(data=rna, y="pro") qqnorm(rna$pro) cancer_gene_proportion <- c(arrange(rna, pro)[1:15,]$gene, arrange(rna, desc(pro))[1:15,]$gene) median(rna$pro) #DBSCAN, python으로 했음 rna_plot <- bind_cols(rna, py$predict) rna_plot$predict <- rna_plot$predict + 2 rna_plot$predict <- as.factor(rna_plot$predict) ggplot(rna_plot, aes(x=log10(normal), y=log10(cancer), group=predict, color=predict)) + geom_point() + scale_color_manual(values=c("grey", "#E69F00", "#56B4E9", "skyblue", "green", "red", "yellow")) + geom_abline(slope=1, intercept=0) cancer_gene_DBSCAN <- py$cancer_gene_DBSCAN intersect(cancer_gene_DBSCAN, cancer_gene_proportion) rna[rna$gene %in% intersect(cancer_gene_DBSCAN, cancer_gene_proportion),] #여기서 proportion은 극단치를 잡아냈을 뿐임을 알 수 있다.
library(paleotree) ### Name: seqTimeList ### Title: Construct a Stochastic Sequenced Time-List from an Unsequenced ### Time-List ### Aliases: seqTimeList ### ** Examples #Simulate some fossil ranges with simFossilRecord set.seed(444) record <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1, nTotalTaxa = c(60,80), nExtant = 0) taxa <- fossilRecord2fossilTaxa(record) #simulate a fossil record with imperfect sampling with sampleRanges() rangesCont <- sampleRanges(taxa,r = 0.1) #Now let's use binTimeData to get ranges in discrete overlapping intervals #via pre-set intervals input presetIntervals <- cbind(c(1000,995,990,980,970,975,960,950,940,930,900,890,888,879,875), c(995,989,960,975,960,950,930,930,930,900,895,888,880,875,870)) rangesDisc1 <- binTimeData(rangesCont,int.times = presetIntervals) seqLists <- seqTimeList(rangesDisc1,nruns = 10) seqLists$nTaxa seqLists$nIntervals #apply freqRat as an example analysis sapply(seqLists$timeLists,freqRat) #notice the zero and infinite freqRat estimates? What's going on? freqRat(seqLists$timeLists[[4]],plot = TRUE) #too few taxa of two or three interval durations for the ratio to work properly #perhaps ignore these estimates #with weighted selection of intervals seqLists <- seqTimeList(rangesDisc1,nruns = 10,weightSampling = TRUE) seqLists$nTaxa seqLists$nIntervals sapply(seqLists$timeLists,freqRat) #didn't have much effect in this simulated example
/data/genthat_extracted_code/paleotree/examples/seqTimeList.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,437
r
library(paleotree) ### Name: seqTimeList ### Title: Construct a Stochastic Sequenced Time-List from an Unsequenced ### Time-List ### Aliases: seqTimeList ### ** Examples #Simulate some fossil ranges with simFossilRecord set.seed(444) record <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1, nTotalTaxa = c(60,80), nExtant = 0) taxa <- fossilRecord2fossilTaxa(record) #simulate a fossil record with imperfect sampling with sampleRanges() rangesCont <- sampleRanges(taxa,r = 0.1) #Now let's use binTimeData to get ranges in discrete overlapping intervals #via pre-set intervals input presetIntervals <- cbind(c(1000,995,990,980,970,975,960,950,940,930,900,890,888,879,875), c(995,989,960,975,960,950,930,930,930,900,895,888,880,875,870)) rangesDisc1 <- binTimeData(rangesCont,int.times = presetIntervals) seqLists <- seqTimeList(rangesDisc1,nruns = 10) seqLists$nTaxa seqLists$nIntervals #apply freqRat as an example analysis sapply(seqLists$timeLists,freqRat) #notice the zero and infinite freqRat estimates? What's going on? freqRat(seqLists$timeLists[[4]],plot = TRUE) #too few taxa of two or three interval durations for the ratio to work properly #perhaps ignore these estimates #with weighted selection of intervals seqLists <- seqTimeList(rangesDisc1,nruns = 10,weightSampling = TRUE) seqLists$nTaxa seqLists$nIntervals sapply(seqLists$timeLists,freqRat) #didn't have much effect in this simulated example
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bam_processing.R \name{tss_aggregate} \alias{tss_aggregate} \title{Aggregate TSSs} \usage{ tss_aggregate(experiment) } \arguments{ \item{experiment}{tsr explorer object} } \description{ Aggregate overlapping TSSs into a total sum score. }
/man/tss_aggregate.Rd
no_license
vpbrendel/TSRexploreR
R
false
true
317
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bam_processing.R \name{tss_aggregate} \alias{tss_aggregate} \title{Aggregate TSSs} \usage{ tss_aggregate(experiment) } \arguments{ \item{experiment}{tsr explorer object} } \description{ Aggregate overlapping TSSs into a total sum score. }
#Reproduce carpetas básicas de Anchovy79 #lo he despertado el 27 de abril de 2021 para solucionar un error con el fleet, hay que cerrar R e instalar #devtools::install_github("mareframe/mfdb", ref = "3.x") #devtools::install_github("mareframe/mfdb", ref="3.x") #devtools::install_github('hafro/rgadget') #remotes::install_github('mareframe/mfdb', ref = 'd4bbd4e') #Weight in kilos and numbers require to be multiplied by 1000 to be in units and not thousands #Catchdistribution de las surveys y las acústicas solo biomasa #setwd("~/GADGET/Assessment/Assessment2020_pandemia") #setwd("~/GADGET/Assessment/Assessment_2021") library(mfdb) library(gdata) library(XLConnect) # load XLConnect package #library(plyr) library(reshape2) ## Create a gadget directory, define some defaults to use with our queries below exists("mdb_compi") #mdb_comp <- mfdb('Ibera', db_params=list(dbname="mf")) #mfdb('Ibera',destroy_schema = TRUE)#, db_params=list(dbname="mf")) mdb_compi <- mfdb('Ibera.duckdb')#, db_params=list(dbname="mf")) #gd <- gadget_directory("Anchovy2021_withLDandALK2020_1_2_data") #gd <- gadget_directory("Anchovy2021_withLD2018_2019_1_2_andALKpelago2020") area<-data.frame(1,"IXa",13000) colnames(area)<-c("id","name","size") yearsb<-c(1988:2020) month<- rep(1:12, times = length(yearsb)) year<-rep(yearsb, times = 1, length.out = NA, each = 12) areacell<-rep("IXa", times = length(yearsb)*12) id<-rep(1, times = length(yearsb)*12) temperatureb<-rep(5, times = length(yearsb)*12) temperature<-data.frame(id,year,month,areacell,temperatureb) colnames(temperature)<-c("id","year", "month","areacell","temperature") #pelago = read.xls("/home/marga/GADGET/DATOS/pelago_99_14_simp.xls")#estas son las portuguesas #ecocadiz = read.xls("/home/marga/GADGET/DATOS/ecocadiz_04_13_simp.xls") #el 2013 debe coincidir con el total de ecocadiz0813 estas son las espa??olas sar = read.xls("/home/marga/GADGET/DATOS/sar_98_07.xls") catch_age=read.xls("/home/marga/GADGET/DATOS/catch_at_age_numbers88_13.xls") length_age=read.xls("/home/marga/GADGET/DATOS/catch_length_at_age_88_13_3.xls") #weight_age=read.csv("/home/marga/GADGET/DATOS/catches_weight_at_age_kg88_13_3.csv",strip.white = TRUE, sep=",", na.strings= c("999", "NA", " ", ""),stringsAsFactors=FALSE) #weight_age=read.csv("/home/marga/GADGET/DATOS/catches_weight_at_age_kg88_13_2.csv") weight_age=read.xls("/home/marga/GADGET/DATOS/catches_weight_at_age_kg88_13_5.xls") Catches_Ptg<-read.xls("/home/marga/GADGET/DATOS/Algarve/ANE_1989_2016 WGPELA_ENVIO_CORRIGIDO.xls") require(dplyr) require(tidyr) Catches_Algarve<-Catches_Ptg %>% filter(AREATYPE=="27.9.a.s.a") %>% group_by(YEAR,SEASON)%>% summarise(totalton=sum(CATON))%>% ungroup() %>% complete(YEAR, SEASON, fill = list(totalton = 0)) #Cambiar aquí totalton #T1 - 10079.6 Kg #T2 - 1953,5 Kg #T3 - 12871.0 Kg #T4 - 1206.6 Kg Catches_Algarve<-add_row(Catches_Algarve, YEAR=2017, SEASON=1:4, totalton=c(10079.6,1953.5,12871,1206.6)*0.001) #porque estaba en kg se multiplica por 0.001 para que quede en tons CREO 2019 hay que verificar # Trim1 1431,3 Kg sacados de LandingsPortugal2018 Folha1 y Sul es lo que me interesa # # Trim2 11785,3 Kg # # Trim3 52035,7 Kg # # Trim4 83,6 Kg Catches_Algarve<-add_row(Catches_Algarve, YEAR=2018, SEASON=1:4, totalton=c(1431.3,11785.3,52035.7,83.6)*0.001) #porque estaba en kg se multiplica por 0.001 para que quede en tons CREO 2019 hay que verificar Catches_Algarve<-add_row(Catches_Algarve, YEAR=2019, SEASON=1:4, totalton=c(0,0,4,109)) #porque estaba en ya en tons ver /home/marga/GADGET/DATOS/Algarve/Landings_Metier_9aS_alg_2019.xlsx Catches_Algarve<-add_row(Catches_Algarve, YEAR=2020, SEASON=1:4, totalton=c(1.6059,67.2002, 73.5084,12.23)) #estaba en Kg pero en el excel lo he multiplicado por 0.001 #require(tidyr) # #Year4bio2num uno menos que el año en curso #Catches Algarve in biomass to number using BOQUERON_ALK cadiz y suma de números Cdaiz y Algarve en CatchesAllFleetsPTSP #Year4bio2num<-2018 Year4bio2num<-2020 NUMPT<-data.frame(YEAR=rep(1989:Year4bio2num,each=4), SEASON=rep(1:4,each=length(1989:Year4bio2num)),NUMBERPT=rep(0,each=length(1989:Year4bio2num)*4),NUMBERSP=rep(0,each=length(1989:Year4bio2num)*4)) BIOMTOT<-data.frame(YEAR=rep(1989:Year4bio2num,each=4), SEASON=rep(1:4,each=length(1989:Year4bio2num)),TONPT=rep(0,each=length(1989:Year4bio2num)*4),TONSP=rep(0,each=length(1989:Year4bio2num)*4)) a<-1 for (i in 1989:2013){ for (j in 1:4){ bioq<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = F, startCol = 9, startRow = 2, endCol = 9, endRow = 2, useCachedValues=T)*0.001 #Para que quede en toneladas (está en kilos) numberq<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = F, startCol = 9, startRow = 38, endCol = 9, endRow = 38, useCachedValues=T) biopt<-Catches_Algarve %>% filter(YEAR==i,SEASON==j)%>%select(totalton) numpt<-biopt$totalton*numberq/bioq NUMPT[a,]<-c(i,j,numpt,numberq) BIOMTOT[a,]<-c(i,j,biopt,bioq) a<-a+1 } } ##actualizar loop hasta for (i in 2014:Year4bio2num){ for (j in 1:4){ bioq<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = F, startCol = 9, startRow = 2, endCol = 9, endRow = 2, useCachedValues=T)*0.001 #Para que quede en toneladas numberq<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = F, startCol = 9, startRow = 40, endCol = 9, endRow = 40, useCachedValues=T) biopt<-Catches_Algarve %>% filter(YEAR==i,SEASON==j)%>%select(totalton) numpt<-biopt$totalton*numberq/bioq BIOMTOT[a,]<-c(i,j,biopt,bioq) #en toneladas NUMPT[a,]<-c(i,j,numpt,numberq) a<-a+1 } } CatchesAllFleetsPTSP<-NUMPT%>%mutate(ALL=NUMBERPT+NUMBERSP) #Trato de leer todas las tablas en un loop #pelagos #La del 2013 y 2014 es la del IEO porque IPIMAR sale la cosa rara, esto no es alk, esto está ponderado mybiglist <- list() for (i in c(1999,2001:2003,2005:2009)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = TRUE, startCol = 13, startRow = 5, endCol = 17, endRow = 35, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-4 a$type<-"pelago_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a } for (i in c(2010,2013,2014)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = TRUE, startCol = 11, startRow = 5, endCol = 15, endRow = 35, useCachedValues=TRUE) b<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = F, startCol = 9, startRow = 2, endCol = 9, endRow = 2, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$total<-b[1,1]*0.001 #pasarlo a tons que luego se multiplica por 1000 para que quede en Kg a$year<-i a$month<-4 a$type<-"pelago_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a } mybiglist$Pelago1999$month<-3 mybiglist$Pelago2001$month<-3 mybiglist$Pelago2002$month<-3 mybiglist$Pelago2003$month<-2 #para 2017 no hay datos de ALK para Algarve así que pongo sólo lo de Cádiz, esto no va aquí esto está ponderado for (i in c(2015:2021)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 26, useCachedValues=TRUE) b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = F, startCol = 10, startRow = 5, endCol = 10, endRow = 5, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-4 a$type<-"pelago_comp" a$total<-b[1,1] a[is.na(a)] <- 0 mybiglist[[name]] <- a } df<-plyr::ldply(mybiglist,data.frame) pelago_bio_pre<-cbind(df[7], df[10]*1000, df[8]) pelago_bio_pre<-unique(na.omit(pelago_bio_pre)) df<-df[-10] names(df)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") pelago_comp<-df #only_Length_distribution_pelago mybiglist <- list() for (i in c(1999,2001:2003,2005:2009)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = TRUE, startCol = 10, startRow = 5, endCol = 11, endRow = 35, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-4-3 a$type<-"pelago_comp_ld" a[is.na(a)] <- 0 mybiglist[[name]] <- a } for (i in c(2010,2013,2014)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = TRUE, startCol = 8, startRow = 5, endCol = 9, endRow = 35, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-4-3 a$type<-"pelago_comp_ld" a[is.na(a)] <- 0 mybiglist[[name]] <- a } mybiglist$Pelago1999$month<-11 #3+3 mybiglist$Pelago1999$year<-1998 mybiglist$Pelago2001$month<-11 #3+3 mybiglist$Pelago2001$year<-2000 mybiglist$Pelago2002$month<-11#3+3 mybiglist$Pelago2002$year<-2001 mybiglist$Pelago2003$month<-10 #2+3 mybiglist$Pelago2003$year <-2002 name<-"Pelago2015" #en millones a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/ANE_PELAGO15_Results.xls", sheet = "9a S", header = TRUE, startCol = 13, startRow = 2, endCol = 14, endRow = 33, useCachedValues=TRUE) i<-2015 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2015 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000000 mybiglist[[name]] <- a name<-"Pelago2016" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/PELAGO16_ANE_RESULTS_FINAL.xls", sheet = "AbundanceBiomassANE_FINAL", header = TRUE, startCol = 1, startRow = 66, endCol = 2, endRow = 86, useCachedValues=TRUE) i<-2016 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2016 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2017 ld name<-"Pelago2017" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ane-pel17_fv.xlsx", sheet = "CAD", header = TRUE, startCol = 1, startRow = 49, endCol = 2, endRow = 73, useCachedValues=TRUE) i<-2017 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2017 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2018 ld name<-"Pelago2018" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL18.xlsx", sheet = "algarve+cadiz", header = TRUE, startCol = 1, startRow = 1, endCol = 2, endRow = 30, useCachedValues=TRUE) i<-2018 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2018 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2019 ld name<-"Pelago2019" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL19.xlsx", sheet = "algarve+cadiz", header = TRUE, startCol = 1, startRow = 1, endCol = 2, endRow = 30, useCachedValues=TRUE) i<-2019 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2019 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2020 ld name<-"Pelago2020" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL20.xlsx", sheet = "algarve+cadiz", header = TRUE, startCol = 1, startRow = 1, endCol = 2, endRow = 30, useCachedValues=TRUE) i<-2020 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2020 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2021 ld name<-"Pelago2021" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL21.xlsx", sheet = "algarve+cadiz", header = TRUE, startCol = 1, startRow = 1, endCol = 2, endRow = 30, useCachedValues=TRUE) i<-2021 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2021 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a # name<-"Pelago2000" #en miles # d<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/biq2000.xls", sheet = "ALgarve", # header = TRUE, startCol = 1, # startRow = 1, endCol = 2, # endRow = 30, useCachedValues=TRUE) # c<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/biq2000.xls", sheet = "CADIZ", # header = TRUE, startCol = 1, # startRow = 1, endCol = 2, # endRow =30, useCachedValues=TRUE) # d[is.na(d)] <- 0 # c[is.na(c)] <- 0 # a<-data.frame(cbind(d$L,(d$mil+c$mil)*1000)) # # # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # # header = F, startCol = 10, # # startRow = 5, endCol = 10, # # endRow = 5, useCachedValues=TRUE) # names(a)<-c("length","count") # a$year<-2000 # a$month<-4-3 # a$type<-"pelago_comp_ld" # #a$total<-b[1,1] # a[is.na(a)] <- 0 # # a$count<-a$count*1000 # # mybiglist[[name]] <- a df3<-plyr::ldply(mybiglist,data.frame) #pelago_bio_pre<-cbind(df[7], df[10]*1000, df[8]) #pelago_bio_pre<-unique(na.omit(pelago_bio_pre)) #df3<-df3[-10] #names(df3)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") names(df3)<-c(".id", "length", "count", "year", "month", "sampling_type") pelago_comp_ld<-df3[,1:6] pelago_comp_ld$areacell<-"IXa" pelago_comp_ld$species<-'ANE' pelago.ldistnoage<-pelago_comp_ld[,2:8] #aldist_pelago solo 14 15 16 17 18 19 y 20 2013 no porque no tengo la ALK mybiglist <- list() name<-"alPelago2014" d<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/PELAGO14_ANE_ALK_WCOAST_sg.xlsx", sheet = "alg", header = TRUE, startCol = 6, startRow = 7, endCol = 9, endRow = 22, useCachedValues=TRUE) c<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/PELAGO14_ANE_ALK_WCOAST_sg.xlsx", sheet = "cad", header = TRUE, startCol = 6, startRow = 7, endCol = 9, endRow =22, useCachedValues=TRUE) d[is.na(d)] <- 0 c[is.na(c)] <- 0 a<-data.frame(cbind(c$Row.Labels,0,d$X1+c$X1,c$X2+d$X2,c$X3+d$X3)) names(a)<-c("length", "0","1", "2", "3") a$year<-2014 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a name<-"alPelago2015" d<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/ANE_PELAGO15_Results_sg.xlsx", sheet = "Algarve", header = TRUE, startCol = 17, startRow = 4, endCol = 20, endRow = 31, useCachedValues=TRUE) c<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/ANE_PELAGO15_Results_sg.xlsx", sheet = "CADIZ", header = TRUE, startCol = 17, startRow = 4, endCol = 20, endRow =31, useCachedValues=TRUE) d[is.na(d)] <- 0 c[is.na(c)] <- 0 a<-data.frame(cbind(c$Col1,0,d$X1+c$X1,c$X2+d$X2,c$X3+d$X3)) names(a)<-c("length", "0","1", "2", "3") a$year<-2015 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a name<-"alPelago2016" a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/PELAGO16_ANE_RESULTS_FINAL_sg.xlsx", sheet = "ALKs_FINAL", header = TRUE, startCol = 8, startRow = 6, endCol = 11, endRow = 28, useCachedValues=TRUE) a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") a$year<-2016 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2017 name<-"alPelago2017" #solo cadiz no hay Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 26, useCachedValues=TRUE) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) #a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") a$year<-2017 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2018 name<-"alPelago2018" #solo cadiz no hay Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 26, useCachedValues=TRUE) #a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") #names(a)<-c("length", "1", "2", "3") a$year<-2018 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2019 name<-"alPelago2019" #solo cadiz no hay Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 26, useCachedValues=TRUE) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") #names(a)<-c("length", "1", "2", "3") a$year<-2019 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2020 name<-"alPelago2020" #cadiz y Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 26, useCachedValues=TRUE) #a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") #names(a)<-c("length", "1", "2", "3") a$year<-2020 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2021 name<-"alPelago2021" #cadiz y Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 29 , useCachedValues=TRUE) #a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") #names(a)<-c("length", "1", "2", "3") a$year<-2021 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) df4<-plyr::ldply(mybiglist,data.frame) #pelago_bio_pre<-cbind(df[7], df[10]*1000, df[8]) #pelago_bio_pre<-unique(na.omit(pelago_bio_pre)) #df3<-df3[-10] names(df4)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") pelago_comp_ald<-df4 ######################### #ecocadiz mybiglist <- list() B0<-data.frame(year=c(1,2),total=c(1,2)) fi<-1 for (i in c(2004,2006,2007,2009,2010)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 13, startRow = 5, endCol = 17, endRow = 37, useCachedValues=TRUE) bioage0<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = F, startCol = 5, startRow = 92, endCol = 5, endRow = 92, useCachedValues=TRUE) B0[fi,]<-c(i,bioage0[1,1]) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-7 a$type<-"ecocadiz_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a fi<-fi+1 } for (i in c(2013,2014)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 11, startRow = 5, endCol = 15, endRow = 35, useCachedValues=TRUE) bioage0<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = F, startCol = 5, startRow = 92, endCol = 5, endRow = 92, useCachedValues=TRUE) B0[fi,]<-c(i,bioage0[1,1]) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-7 a$type<-"ecocadiz_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a fi<-fi+1 } mybiglist$Ecocadiz2004$month<-6 mybiglist$Ecocadiz2006$month<-6 mybiglist$Ecocadiz2013$month<-8 B<-data.frame(year=c(1,2),total=c(1,2),month=c(1,2)) #ojo a tiene valores ponderados pero que no se van a usar, sin embargo B nos interesa y bioage0 es para restar la biomasa de la edad 0 de la biomasa total, me falta hacerlo en 2010,2013 y 2014 for (i in c(2015:2020)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 11, startRow = 7, endCol = 15, endRow = 38, useCachedValues=TRUE) b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = F, startCol = 9, startRow = 3, endCol = 9, endRow = 3, useCachedValues=T) bioage0<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = F, startCol = 5, startRow = 103, endCol = 5, endRow = 103, useCachedValues=T) names(a)<-c("length","0", "1", "2", "3") a$year<-i B[(i-2014),]$year<-i a$month<-7 B[(i-2014),]$total<-b[1,1]-bioage0[1,1] B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a } names(B)<-c("year","count","month") B$areacell<-"IXa" B$species<-'ANE' B$sampling_type<-"ecocadiz_bio" B$vessel<-'1.RSH' df<-plyr::ldply(mybiglist,data.frame) names(df)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") ecocadiz_comp<-df pelago_def<-melt(pelago_comp, id=c(".id","length","year", "month", "type" )) names(pelago_def)<-c(".id","length","year", "month", "sampling_type","age","count") ecocadiz_def<-melt(ecocadiz_comp, id=c(".id","length","year", "month", "type" )) names(ecocadiz_def)<-c(".id","length","year", "month", "sampling_type","age","count") pelago_def$areacell<-"IXa" pelago_def$species<-'ANE' pelago_def$count<-signif(pelago_def$count,2) pelago_def$count<-pelago_def$count*1000 #para que quede en individuos pelago.ldist<-pelago_def[,2:9] ecocadiz_def$areacell<-"IXa" ecocadiz_def$species<-'ANE' ecocadiz_def$count<-signif(ecocadiz_def$count,2) ecocadiz_def$count<-ecocadiz_def$count*1000 #para que quede en individuos ecocadiz.ldist<-ecocadiz_def[,2:9] ############################################################### #alk ecocadiz mybiglist <- list() for (i in c(2004,2006,2007,2009,2010,2013,2014,2015)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 1, startRow = 5, endCol = 5, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a } mybiglist$Ecocadiz2004$month<-6-3 mybiglist$Ecocadiz2006$month<-6-3 mybiglist$Ecocadiz2013$month<-8-3 #B<-data.frame(year=c(1,2),total=c(1,2),month=c(1,2)) i<-2016 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2017 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a ###################2018 i<-2018 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a #############3 i<-2019 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a #############3 i<-2020 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a df<-plyr::ldply(mybiglist,data.frame) names(df)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") ecocadiz_comp_ald<-df[,1:9] #names(B)<-c("year","count","month") #B$areacell<-"IXa" #B$species<-'ANE' #B$sampling_type<-"ecocadiz_bio" #B$vessel<-'1.RSH' pelago_def_ald<-melt(pelago_comp_ald, id=c(".id","length","year", "month", "type" )) names(pelago_def_ald)<-c(".id","length","year", "month", "sampling_type","age","count") ecocadiz_def_ald<-melt(ecocadiz_comp_ald, id=c(".id","length","year", "month", "type" )) names(ecocadiz_def_ald)<-c(".id","length","year", "month", "sampling_type","age","count") pelago_def_ald$areacell<-"IXa" pelago_def_ald$species<-'ANE' # pelago_def_ald$count<-signif(pelago_def_ald$count,2) # pelago_def_ald$count<-pelago_def$count*1000 #para que quede en individuos pelago.aldist<-pelago_def_ald[,2:9] ecocadiz_def_ald$areacell<-"IXa" ecocadiz_def_ald$species<-'ANE' #ecocadiz_def$count<-signif(ecocadiz_def$count,2) #ecocadiz_def$count<-ecocadiz_def$count*1000 #para que quede en individuos ecocadiz.aldist<-ecocadiz_def_ald[,2:9] ##################3 #length distribution ecocadiz mybiglistII <- list() for (i in c(2004,2006,2007,2009,2010)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 10, startRow = 5, endCol = 11, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a } for (i in c(2013:2015)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 8, startRow = 5, endCol = 9, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a } mybiglistII$Ecocadiz2004$month<-6-3 mybiglistII$Ecocadiz2006$month<-6-3 mybiglistII$Ecocadiz2013$month<-8-3 i<-2016 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 8, startRow = 6, endCol = 9, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a i<-2017 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 8, startRow = 6, endCol = 9, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a #############2018 i<-2018 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 8, startRow = 6, endCol = 9, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a #############2019 i<-2019 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 18, startRow = 6, endCol = 19, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a #############2020 i<-2020 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 18, startRow = 6, endCol = 19, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a dfIII<-plyr::ldply(mybiglistII,data.frame) names(dfIII)<-c(".id", "length", "count", "year", "month", "sampling_type") ecocadiz_comp_ld<-dfIII[,1:6] #arsa_def<-melt(arsa_comp, id=c(".id","length","year", "month", "type" )) #names(arsa_def)<-c(".id","length","year", "month", "sampling_type","age","count") ecocadiz_comp_ld$areacell<-"IXa" ecocadiz_comp_ld$species<-'ANE' ecocadiz.ldistnoage<-ecocadiz_comp_ld[,2:8] ####################################################################### #length distribution of the surveys library(ggplot2) hi<-aggregate(count~length+year+sampling_type,ecocadiz.ldist,sum) ho<-aggregate(count~length+year+sampling_type,pelago.ldist,sum) hu<-rbind(hi,ho) #ecocadiz_ld_plot<-qplot(length, count, data = hu, color = year) ecocadiz_ld_plot<-ggplot(hu,aes(length, count)) ecocadiz_ld_plot+facet_grid(sampling_type~year)+geom_line() library(Rgadget) hu %>% ggplot(aes(length,count)) + geom_line()+ facet_wrap(~sampling_type+year,scale='free_y') hi<-aggregate(count~age+year+sampling_type,ecocadiz.ldist,sum) ho<-aggregate(count~age+year+sampling_type,pelago.ldist,sum) hu<-rbind(hi,ho) ecocadiz_ad_plot<-ggplot(hu, aes(age, count)) ecocadiz_ad_plot+facet_grid(sampling_type~year)+geom_point() library(Rgadget) hu<-subset(hu,age==0|age==1|age==2) hu %>% ggplot(aes(age,count)) + geom_point()+ facet_wrap(~sampling_type+year,scale='free_y') hu %>% ggplot(aes(year,count)) + geom_point() + facet_wrap(~age+sampling_type,scale='free',ncol=2) ####################################3 pelago_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = "BiomasaPort", header = F, startCol = 3, startRow = 35, endCol = 18, endRow = 35, useCachedValues=TRUE) pelago_bio<-t(pelago_bio) #ojo que va en kg hay que multiplicar por 1000 las tonnes pelago_biom<-data.frame(cbind(1999:2014,pelago_bio*1000,c(rep(3,4),2,rep(4,11)))) names(pelago_biom)<-c("year","count","month") names(pelago_bio_pre)<-c("year","count","month") pelago_biom<-rbind(pelago_biom%>%filter(year<2010),pelago_bio_pre) # #pelbiom2018<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL18.xlsx", sheet = "algarve+cadiz", # header = F, startCol = 3, # startRow = 44, endCol = 3, # endRow = 44, useCachedValues=TRUE) # pelago_biom<-add_row(pelago_biom,year=2018,count=pelbiom2018[1,1]*1000,month=4) # pelbiom2019<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL19.xlsx", sheet = "algarve+cadiz", # header = F, startCol = 3, # startRow = 44, endCol = 3, # endRow = 44, useCachedValues=TRUE) # pelago_biom<-add_row(pelago_biom,year=2019,count=pelbiom2019[1,1]*1000,month=4) names(pelago_biom)<-c("year","count","month") pelago_biom$month<-pelago_biom$month-3 pelago_biom$areacell<-"IXa" pelago_biom$species<-'ANE' pelago_biom$sampling_type<-"pelago_bio" pelago_biom$vessel<-'1.RSH' pelago_biom<-na.omit(pelago_biom) pelago_biom$year<-pelago_biom$year*(pelago_biom$month>0)+(pelago_biom$year-1)*(pelago_biom$month<=0) pelago_biom$month<-pelago_biom$month*(pelago_biom$month>0)+11*(pelago_biom$month<=0) ecocadiz_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = "BiomasaEsp", header = F, startCol = 3, startRow = 35, endCol = 13, endRow = 35, useCachedValues=TRUE) ecocadiz_bio<-t(ecocadiz_bio) ecocadiz_biom<-data.frame(cbind(2004:2014,ecocadiz_bio*1000,c(rep(6,3),rep(7,6),8,7))) names(ecocadiz_biom)<-c("year","count","month") ecoca_age0<-left_join(ecocadiz_biom,B0)%>%mutate(count=count-total)%>%select(year,count,month) ecocadiz_biom<-ecoca_age0 ecocadiz_biom$areacell<-"IXa" ecocadiz_biom$species<-'ANE' ecocadiz_biom$sampling_type<-"ecocadiz_bio" ecocadiz_biom$vessel<-"1.RSH" ecocadiz_biom<-rbind(ecocadiz_biom,B) ecocadiz_biom<-na.omit(ecocadiz_biom) ecocadiz_biom$month<-ecocadiz_biom$month-3 g<-factor(sar$numorbio) #pelagosep<-split(pelago,g) #ecocadizsep<-split(ecocadiz,g) sarsep<-split(sar,g) # ecocadiz0813<-read.xls("/home/marga/GADGET/DATOS/ecocadiz_tableincolumn.xls") # order_ecocadiz<-matrix(as.numeric(as.character(ecocadiz0813$table_in_column)), nrow=25, ncol=9,byrow=T) # order_ecocadiz<-cbind(order_ecocadiz,rowSums(order_ecocadiz[,2:6])) # def_ecocadiz0813<-order_ecocadiz[,c(1,10)] # order_ecocadiz<-matrix(ecocadiz0813$table_in_column, nrow=25, ncol=9,byrow=T) # def_ecocadiz0813<-order_ecocadiz[,c(1,8)] # def_ecocadiz0813<-data.frame(def_ecocadiz0813) # names(def_ecocadiz0813)<-c("length","count") # def_ecocadiz0813$year<-2013 # def_ecocadiz0813$month<-8#5#Depending on how time is defined 8 is the real # def_ecocadiz0813$length<-seq(6,18,0.5) # mfdb_import_area(mdb, data.frame( # id = 1:3, # name = c("Tarifa","Algeciras","Morocco"), # ## area size is # size = 1) ) #area<-data.frame(id = c(1), name=c("IXaS_cadiz"), size =c(13000)) # mfdb_import_temperature(mdb, data.frame( # year = rep(c(1998, 1999), each = 12), # month = c(1:12, 1:12), # areacell = c(rep('45G01', times = 24)), # temperature = c(0.5, 1.2, 2.4, 3.5, 4.6, 5.7, 6.1, 7.4, 8.9, 10, 11, 12, 25:36))) mfdb_import_area(mdb_compi, data.frame(id = c(1), name=c("IXa"), size =c(13000))) #division=c('alg','cad'), #mfdb_import_division(mdb_comp, list(divA = c("IXa", "IXa_a"), divB = c('IXa'))) #temperature<-temperature[,-1] mfdb_import_temperature(mdb_compi, temperature) FOO<-list() FOOG<-list() i<-1988 filed<-paste("/home/marga/GADGET/DATOS/Juan_data/Datos_",i,"_anchovy.xlsx",sep = "") FOO[[(i-1987)]]<-readWorksheet(loadWorkbook(filed), sheet="Hoja1") FOOG[[(i-1987)]]<-readWorksheet(loadWorkbook(filed), sheet="Hoja2") FOO[[(i-1987)]]<-cbind(year=i, FOO[[(i-1987)]]) names( FOO[[(i-1987)]])<-c(names(FOO[[1]])) catch_at_length<-FOO[[1]] for(i in 1989:2013) { filed<-paste("/home/marga/GADGET/DATOS/Juan_data/Datos_",i,"_anchovy.xlsx",sep = "") FOO[[(i-1987)]]<-readWorksheet(loadWorkbook(filed), sheet="Hoja1") FOOG[[(i-1987)]]<-readWorksheet(loadWorkbook(filed), sheet="Hoja2") FOO[[(i-1987)]]<-cbind(year=i, FOO[[(i-1987)]]) names( FOO[[(i-1987)]])<-c(names(FOO[[1]])) catch_at_length<-rbind(catch_at_length,FOO[[i-1987]]) } catch_at_length[is.na(catch_at_length)]<-0 #names(catch_at_length)<-c("year","length","Col2","count","count","count","count","Total") #Primero organizar año, edad, número catchldist14<-readWorksheet(loadWorkbook("/home/marga/GADGET/DATOS/lengthdist1415.xls"), sheet="Sheet2") catchldist14<-matrix(as.numeric(as.character(catchldist14$X2014)), ncol=6,byrow=T) catchldist15<-readWorksheet(loadWorkbook("/home/marga/GADGET/DATOS/lengthdist1415.xls"), sheet="Sheet1") catchldist16<-readWorksheet(loadWorkbook("/home/marga/GADGET/DATOS/lengthdist1415.xls"), sheet="Sheet3") catchldist15<-matrix(as.numeric(as.character(catchldist15$X2015)), ncol=6,byrow=T) catchldist16<-matrix(as.numeric(as.character(catchldist16$X2016)), ncol=6,byrow=T) catchldist15<-cbind(catchldist15,year=2015) catchldist14<-cbind(catchldist14,year=2014) catchldist16<-cbind(catchldist16,year=2016) catchldist15<-data.frame(catchldist15) catchldist14<-data.frame(catchldist14) catchldist16<-data.frame(catchldist16) names(catch_at_length)<-c("year","length","Col2","Q1","Q2","Q3","Q4","Total") names(catchldist14)<-c("length","Q1","Q2","Q3","Q4","Total","year") names(catchldist15)<-c("length","Q1","Q2","Q3","Q4","Total","year") names(catchldist16)<-c("length","Q1","Q2","Q3","Q4","Total","year") catchldist14<-cbind(catchldist14[,7],catchldist14[,1],Col2=NA,catchldist14[,2:6]) catchldist15<-cbind(catchldist15[,7],catchldist15[,1],Col2=NA,catchldist15[,2:6]) catchldist16<-cbind(catchldist16[,7],catchldist16[,1],Col2=NA,catchldist16[,2:6]) names(catchldist14)<-c("year","length","Col2","Q1","Q2","Q3","Q4","Total") names(catchldist15)<-c("year","length","Col2","Q1","Q2","Q3","Q4","Total") names(catchldist16)<-c("year","length","Col2","Q1","Q2","Q3","Q4","Total") catch_at_length<-rbind(catch_at_length, catchldist14, catchldist15,catchldist16) fleet.seinepre<-plyr::ddply(catch_at_length,~year,summarise,Total_catches1=sum(Q1),Total_catches2=sum(Q2), Total_catches3=sum(Q3), Total_catches4=sum(Q4)) #suma sobre la longitud #Suma por años las longitudes #fleet.seinepre<-ddply(catch_at_length,~year,summarise,Total_catches1=sum(X1st.Q),Total_catches2=sum(X2nd.Q), Total_catches3=sum(X3rd.Q), Total_catches4=sum(X4th.Q)) # #catches20181cakg<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2018.xlsx", sheet="Hoja2", # header = F, startCol = 7, # startRow = 19, endCol = 7, # endRow = 19, useCachedValues=TRUE) # #catches20182cakgpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2018.xlsx", sheet="Hoja2", # header = F, startCol = 7, # startRow = 22, endCol = 7, # endRow = 22, useCachedValues=TRUE) # catches20181alton<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_2018.xlsx", sheet="Sheet1", # header = F, startCol = 5, # startRow = 7, endCol = 5, # endRow = 7, useCachedValues=TRUE) # # catches20182altonpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_vendas-diarias_2018.xls", sheet="Sheet1", # header = F, startCol = 13, # startRow = 14, endCol = 13, # endRow = 14, useCachedValues=TRUE) #bio2018*Num2017/bio2017 #Algarve en kg más 1209 A 23 de JUnio, Cadiz #ERROR #Numb20181=(catches20181cakg[1,1]+catches20181alton[1,1]*1000)*(CatchesAllFleetsPTSP$ALL[CatchesAllFleetsPTSP$YEAR==2017 & CatchesAllFleetsPTSP$SEASON==1])/((BIOMTOT$TONPT+BIOMTOT$TONSP)[BIOMTOT$YEAR==2017 & BIOMTOT$SEASON==1]) #Numb20182=(catches20182cakgpre[1,1])*(CatchesAllFleetsPTSP$NUMBERSP[CatchesAllFleetsPTSP$YEAR==2017 & CatchesAllFleetsPTSP$SEASON==2])/((BIOMTOT$TONSP)[BIOMTOT$YEAR==2017 & BIOMTOT$SEASON==2]) # catches20191cakg<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2019.xlsx", sheet="Hoja2", # header = F, startCol = 7, # startRow = 19, endCol = 7, # endRow = 19, useCachedValues=TRUE) # catches20192cakgpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2019.xlsx", sheet="Hoja2", # header = F, startCol = 7, # startRow = 22, endCol = 7, # endRow = 22, useCachedValues=TRUE) # catches20191alton<-0 #Catches algarve # catches20192altonpre<-0 # #bio2018*Num2017/bio2017 # #Algarve en kg más 1209 A 23 de JUnio, Cadiz # #ERROR # #Ojo que como no hay Algarve, mejor NUm 2018 de Cadiz y bio 2018 de cadiz # Numb20191=(catches20191cakg[1,1])*(CatchesAllFleetsPTSP$NUMBERSP[CatchesAllFleetsPTSP$YEAR==2018 & CatchesAllFleetsPTSP$SEASON==1])/((BIOMTOT$TONSP)[BIOMTOT$YEAR==2018 & BIOMTOT$SEASON==1]*1000) # # Numb20192=(catches20192cakgpre[1,1])*(CatchesAllFleetsPTSP$NUMBERSP[CatchesAllFleetsPTSP$YEAR==2018 & CatchesAllFleetsPTSP$SEASON==2])/((BIOMTOT$TONSP)[BIOMTOT$YEAR==2018 & BIOMTOT$SEASON==2]*1000) # catches20201cakg<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2020.xlsx", sheet="landings2020", # header = F, startCol = 10, # startRow = 18, endCol = 10, # endRow = 18, useCachedValues=TRUE) # catches20202cakgpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2020.xlsx", sheet="landings2020", # header = F, startCol = 10, # startRow = 19, endCol = 10, # endRow = 19, useCachedValues=TRUE) # catches20201alton<-1.6#readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_2018.xlsx", sheet="Sheet1", # #header = F, startCol = 5, # # startRow = 7, endCol = 5, # #endRow = 7, useCachedValues=TRUE) # # catches20202altonpre<-0.2#readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_vendas-diarias_2018.xls", sheet="Sheet1", # # header = F, startCol = 13, # # startRow = 14, endCol = 13, # # endRow = 14, useCachedValues=TRUE) catches20211cakg<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2021.xls", sheet="Sheet1", header = F, startCol = 7, startRow = 20, endCol = 7, endRow = 20, useCachedValues=TRUE) catches20212cakgpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2021.xls", sheet="Sheet1", header = F, startCol = 8, startRow = 20, endCol = 8, endRow = 20, useCachedValues=TRUE) #revisar bien estaba bien pero me lié porque esta es la del 2 catches20212cakgpre<-catches20212cakgpre+0.37*(catches20211cakg+catches20212cakgpre)#sacado de historico-capturas catches20211alton<-1.669 #ver ANE_biqueirão_01012021-30042021 en DATOS/Algarve #readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_2018.xlsx", sheet="Sheet1", #header = F, startCol = 5, # startRow = 7, endCol = 5, #endRow = 7, useCachedValues=TRUE) catches20212altonpre<-0.059#readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_vendas-diarias_2018.xls", sheet="Sheet1", # header = F, startCol = 13, # startRow = 14, endCol = 13, # endRow = 14, useCachedValues=TRUE) #bio2018*Num2017/bio2017 #Algarve en kg más 1209 A 23 de JUnio, Cadiz #ERROR #Numb20181=(catches20181cakg[1,1]+catches20181alton[1,1]*1000)*(CatchesAllFleetsPTSP$ALL[CatchesAllFleetsPTSP$YEAR==2017 & CatchesAllFleetsPTSP$SEASON==1])/((BIOMTOT$TONPT+BIOMTOT$TONSP)[BIOMTOT$YEAR==2017 & BIOMTOT$SEASON==1]) Numb20211=(catches20211cakg[1,1]+catches20211alton*1000)*(CatchesAllFleetsPTSP$ALL[CatchesAllFleetsPTSP$YEAR==2020 & CatchesAllFleetsPTSP$SEASON==1])/((BIOMTOT$TONPT+BIOMTOT$TONSP)[BIOMTOT$YEAR==2020 & BIOMTOT$SEASON==1]*1000) #Numb20182=(catches20182cakgpre[1,1])*(CatchesAllFleetsPTSP$NUMBERSP[CatchesAllFleetsPTSP$YEAR==2017 & CatchesAllFleetsPTSP$SEASON==2])/((BIOMTOT$TONSP)[BIOMTOT$YEAR==2017 & BIOMTOT$SEASON==2]) Numb20212=(catches20212cakgpre[1,1]+catches20212altonpre*1000)*(CatchesAllFleetsPTSP$ALL[CatchesAllFleetsPTSP$YEAR==2020 & CatchesAllFleetsPTSP$SEASON==2])/((BIOMTOT$TONPT+BIOMTOT$TONSP)[BIOMTOT$YEAR==2020 & BIOMTOT$SEASON==2]*1000) BIOMTOT$ALL<-BIOMTOT$TONPT+BIOMTOT$TONSP BIOMTOT<-add_row(BIOMTOT, YEAR=2021, SEASON=1:2, TONPT=c(catches20211alton,catches20212altonpre),TONSP=c(catches20211cakg[1,1]*0.001,catches20212cakgpre[1,1]*0.001),ALL=c(catches20211alton+catches20211cakg[1,1]*0.001,catches20212altonpre+catches20212cakgpre[1,1]*0.001)) #sacado de Ane279a_9aS_Prov Landings Jan-June 2019.xls porque para Portugal las capturas son cero BIOMTOTnewyear<-BIOMTOT %>% mutate(newyear=c(1988,1988,rep(1989:2020,each=4))) %>%group_by(newyear)%>%summarise(catonsur=sum(ALL)) #cuidado aquí el último año es el año anterior al assessment porque va en management calendar discardsbyquarter<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Landings and Discards_9aN and 9a S_2014 on.xlsx", sheet="South_9aS", header = T, startCol = 9, startRow = 4, endCol = 14, endRow = 33, useCachedValues=TRUE) names(discardsbyquarter)<-c("year","1","2","3","4") DISCARDS<-melt(discardsbyquarter,id="year")%>%arrange(year)%>%mutate(newyear=c(1988,1988,rep(1989:2016,each=4),2017,2017))%>%group_by(newyear)%>%summarise(newyeardiscardsouth=sum(value))%>%mutate(year=newyear) DISCARDS<-add_row(DISCARDS, newyear=2018, newyeardiscardsouth=151, year=2018) #sacado de Landings and Discards_9aN and 9a S_2014 on_2018.xlsx DISCARDS<-add_row(DISCARDS, newyear=2019, newyeardiscardsouth=87, year=2019) #sacado de Landings and Discards_9aN and 9a S_2014 on_2019.xlsx #corregidas también las del 2018 DISCARDS$newyeardiscardsouth[DISCARDS$year==2017]<-186 #save(BIOMTOTnewyear,DISCARDS,file="/home/marga/GADGET/Assessment/Assessment2020_pandemia/BIOMTOTnewyeardiscards.Rdata") save(BIOMTOTnewyear,DISCARDS,file="BIOMTOTnewyeardiscards.Rdata") names(fleet.seinepre)<-c("year","count","count","count","count") fleet.seinepre[c(2,3,4,5)]<-fleet.seinepre[c(2,3,4,5)]*1000 fleet.seine<-rbind(fleet.seinepre[c(1,2)],fleet.seinepre[c(1,3)],fleet.seinepre[c(1,4)],fleet.seinepre[c(1,5)]) fleet.seine$month<-rep(seq(3,12,3),times=1, each=nrow(fleet.seinepre))#rep(c(12,3,6,9),times=1, each=26)#rep(seq(3,12,3),times=1, each=26) fleet.seine$areacell<-"IXa" fleet.seine$species<-'ANE' fleet.seine$vessel<-"2.CQT" fleet.seine$sampling_type<-"SEA" fleet.allptsp<-CatchesAllFleetsPTSP%>%mutate(month=SEASON*3)%>%ungroup()%>%select(year=YEAR,month,count=ALL) #fleet.allptsp<-add_row(fleet.allptsp,year=2018,month=c(3,6),count=c(Numb20181,Numb20182)) #fleet.allptsp<-add_row(fleet.allptsp,year=2019,month=c(3,6),count=c(Numb20191,Numb20192)) #fleet.allptsp<-add_row(fleet.allptsp,year=2020,month=c(3,6),count=c(Numb20201,Numb20202)) fleet.allptsp<-add_row(fleet.allptsp,year=2021,month=c(3,6),count=c(Numb20211,Numb20212)) fleet.allptsp$areacell<-"IXa" fleet.allptsp$species<-'ANE' fleet.allptsp$vessel<-"2.CDA" fleet.allptsp$sampling_type<-"SEAS2" #ARSA ALK mybiglistII <- list() for (i in c(1993,1997:2016)){ name<-paste("ARSA",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ARSA/ARSA_NOV_ALK_",i,".xlsx",sep=""), sheet = "4Q", header = TRUE, startCol = 11, startRow = 5, endCol = 15, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-11 a$type<-"arsa_comp" a[is.na(a)] <- 0 mybiglistII[[name]] <- a } dfII<-plyr::ldply(mybiglistII,data.frame) names(dfII)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") arsa_comp<-dfII arsa_def<-melt(arsa_comp, id=c(".id","length","year", "month", "type" )) names(arsa_def)<-c(".id","length","year", "month", "sampling_type","age","count") arsa_def$areacell<-"IXa" arsa_def$species<-'ANE' arsa.ldist<-arsa_def[,2:9] #arsa.ldist sin age mybiglistII <- list() for (i in c(1993,1997:2016)){ name<-paste("ARSALD",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ARSA/ARSA_NOV_ALK_",i,".xlsx",sep=""), sheet = "4Q", header = FALSE, startCol = 8, startRow = 6, endCol = 9, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-11 a$type<-"arsa_compld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a } dfII<-plyr::ldply(mybiglistII,data.frame) names(dfII)<-c(".id", "length", "count", "year", "month", "sampling_type") arsa_compld<-dfII #arsa_def<-melt(arsa_comp, id=c(".id","length","year", "month", "type" )) #names(arsa_def)<-c(".id","length","year", "month", "sampling_type","age","count") arsa_compld$areacell<-"IXa" arsa_compld$species<-'ANE' arsa.ldistnoage<-arsa_compld[,2:8] arsa_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/ARSA/WKPELA2018_ane.27.9a_ES_SURVEY_ARSA-Novseries.xlsx", sheet = "Absolute Indices' estimates", header = F, startCol = 15, startRow = 2, endCol = 15, endRow = 22, useCachedValues=TRUE) arsa_bio<-arsa_bio*1000 #Para Kilograms ARSA_biom<-data.frame(cbind(year=c(1993,1997:2016), count=arsa_bio, month=11)) names(ARSA_biom)<-c("year", "count", "month") ARSA_biom$areacell<-"IXa" ARSA_biom$species<-'ANE' ARSA_biom$sampling_type<-"arsa_bio" ARSA_biom$vessel<-'1.RSH' #bocadeva bocadeva_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/DEPM/DEPM_SSB_estimates.xlsx", sheet="Sheet1", header = F, startCol = 1, startRow = 2, endCol = 2, endRow = 6, useCachedValues=TRUE) bocadeva_bio[,2]<-bocadeva_bio[,2]*1000 #Para Kilograms bocadeva_biom<-mutate(bocadeva_bio,month=c(6,6,7,7,7)) names(bocadeva_biom)<-c("year", "count", "month") bocadeva_biom$areacell<-"IXa" bocadeva_biom$species<-'ANE' bocadeva_biom$sampling_type<-"bocadeva_bio" bocadeva_biom$vessel<-'1.RSH' bocadeva_biom$month<-bocadeva_biom$month-3 #length-age mybiglistII <- list() mybiglistIV <- list() for (i in 1989:2013){ #i<-1989 for (j in 1:4){ # j<-1 name<-paste("ALKcatches",i,j,sep="") name1<-paste("LDcatches",i,j,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = T, startCol = 1, startRow = 5, endCol = 5, endRow = 37, useCachedValues=T) ldist_cat<- readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = T, startCol = 8, startRow = 5, endCol = 9, endRow = 37, useCachedValues=T) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-3*(j-1)+1 a$type<-"alkcatches_comp" names(ldist_cat)<-c("length","count") ldist_cat$year<-i ldist_cat$month<-3*(j-1)+1 a[is.na(a)] <- 0 mybiglistII[[name]] <- a ldist_cat[is.na(ldist_cat)]<-0 mybiglistIV[[name1]] <- ldist_cat }} #Cambio 2018 por 2019 for (i in 2014:2020){ for (j in 1:4){ name<-paste("ALKcatches",i,j,sep="") name1<-paste("LDcatches",i,j,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = T, startCol = 1, startRow = 5, endCol = 5, endRow = 39, useCachedValues=T) ldist_cat<- readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = T, startCol = 8, startRow = 5, endCol = 9, endRow = 39, useCachedValues=T) names(ldist_cat)<-c("length","count") names(a)<-c("length","0", "1", "2", "3") ldist_cat$year<-i a$year<-i a$month<-3*(j-1)+1 ldist_cat$month<-3*(j-1)+1 a$type<-"alkcatches_comp" a[is.na(a)] <- 0 ldist_cat[is.na(ldist_cat)]<-0 mybiglistII[[name]] <- a mybiglistIV[[name1]] <- ldist_cat }} dfIII<-plyr::ldply(mybiglistII,data.frame) dfIV<-plyr::ldply(mybiglistIV,data.frame) names(dfIII)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") ALKcatches_comp<-dfIII ALKcatches_def<-melt(ALKcatches_comp, id=c(".id","length","year", "month", "type" )) names(ALKcatches_def)<-c(".id","length","year", "month", "sampling_type","age","count") ALKcatches_def$areacell<-"IXa" ALKcatches_def$species<-'ANE' ALKcatches.ldist<-ALKcatches_def[,2:9] # ALKcatches.ldist$age<-as.numeric(as.character(ALKcatches.ldist$age)) names(catch_at_length)<-c("year","length","Col2","count","count","count","count","Total") seine.ldist <- rbind(catch_at_length[c(1,2,4)],catch_at_length[c(1:2,5)],catch_at_length[c(1:2,6)],catch_at_length[c(1:2,7)]) seine.ldist[3]<-seine.ldist[3]*1000 #rep(x, times = 1, length.out = NA, each = 1) #arreglar!!! seine.ldist$month<-rep(seq(3,12,3),times=1, each=nrow(catch_at_length))#rep(c(12,3,6,9),times=1, each=988)#rep(seq(3,12,3),times=1, each=988) seine.ldist<-rbind(seine.ldist%>%filter(year<2014),dfIV%>%filter(year>=2014)%>% select(year,length,count,month)) seine.ldist2<-dfIV%>%mutate(length2=length-0.25,month2=month+2)%>% select(year,length=length2,count2=count,month=month2) SEINE.ldist<-left_join(seine.ldist,seine.ldist2) #left_join(seine.ldist%>%filter(year>2014),seine.ldist2%>%filter(year>2014)) #seine.ldist%>%mutate(count2=seine.ldist2$count) # ggplot(SEINE.ldist,aes(length,count)) + # geom_line(aes(length,count2), col='gray') + # facet_wrap(~year+month) + theme_bw() + geom_line() + # # ldist20171<-seine.ldist%>%filter(year==2017,month<4) # ldist20172<-seine.ldist%>%filter(year==2017,month==4) # ldist20171p<-ldist20171%>%mutate(countperc=count/max(ldist20171$count)) # ldist20172p<-ldist20172%>%mutate(countperc=count/max(ldist20172$count)) seine.ldist$areacell<-"IXa" seine.ldist$species<-'ANE' seine.ldist$sampling_type<-'SEA' # ecocadiz0813.ldist<-def_ecocadiz0813 # ecocadiz0813.ldist$areacell<-"IXa" # ecocadiz0813.ldist$species<-'ANE' # ecocadiz0813.ldist$sampling_type<-'ecocadiz0813' catchespre<-subset(catch_age,AGE==c(0,1,2,3), select=c(1:6)) names(catchespre)<-c("year","age","count","count","count","count") catches.agedist<-rbind(catchespre[c(1,2,3)],catchespre[c(1,2,4)],catchespre[c(1,2,5)],catchespre[c(1,2,6)]) catches.agedist[3]<-catches.agedist[3]*1000 Nyears<-length(unique(catches.agedist$year)) #number of years catches.agedist$month<-rep(seq(3,12,3),times=1, each=Nyears*4)#rep(c(12,3,6,9),times=1, each=26*4)#rep(seq(3,12,3),times=1, each=26*4)#26 años, 4 edades catches.agedist$species<-'ANE' catches.agedist$areacell<-"IXa" catches.agedist$sampling_type<-'SEA' # comm.ldist <- merge(comm.ldist, # comm.stations[c('synis.id','ar','man','lat','lon')]) # comm.ldist$areacell <- d2sr(comm.ldist$lat,-comm.ldist$lon) # names(comm.ldist) <- c('sample.id','species','length','count','sex', # 'maturity','year','month','lat','lon','areacell') # comm.ldist$species <- 'COD' length_agepre<-subset(length_age,age==0|age==1|age==2|age==3) #quitando l????nea de totales names(length_agepre)<-c("year","age","length","length","length","length") length.at.age<-rbind(length_agepre[c(1,2,3)],length_agepre[c(1,2,4)],length_agepre[c(1,2,5)],length_agepre[c(1,2,6)]) length.at.age$month<-rep(seq(3,12,3),times=1, each=26*4)#rep(c(12,3,6,9),times=1, each=26*4)#changing the quarter length.at.age<-na.omit(length.at.age) length.at.age$sampling_type<-'SEA' length.at.age$species<-'ANE' length.at.age$areacell<-"IXa" weight_agepre<-subset(weight_age,age==0|age==1|age==2|age==3) #quitando l????nea de totales names(weight_agepre)<-c("year","age","weight","weight","weight","weight") weight.at.age<-rbind(weight_agepre[c(1,2,3)],weight_agepre[c(1,2,4)],weight_agepre[c(1,2,5)],weight_agepre[c(1,2,6)]) weight.at.age$month<-rep(seq(3,12,3),times=1, each=26*4)#rep(c(12,3,6,9),times=1, each=26*4)#changing the quarter weight.at.age<-na.omit(weight.at.age) weight.at.age$sampling_type<-'SEA' weight.at.age$species<-'ANE' weight.at.age$areacell<-"IXa" # weight_agepre<-subset(weight_age,AGE==0|AGE==1|AGE==2|AGE==3) #quitando l????nea de totales # names(weight_agepre)<-c("year","age","weight","weight","weight","weight") # weight.at.age<-rbind(weight_agepre[c(1,2,3)],weight_agepre[c(1,2,4)],weight_agepre[c(1,2,5)],weight_agepre[c(1,2,6)]) # weight.at.age$month<-rep(c(12,3,6,9),times=1, each=26*4) # #Remove rows with empty spaces # blank2na <- function(x){ # z <- gsub("\\s+", "", x) #make sure it's "" and not " " etc # x[z==""] <- NA # return(x) # } # # #apply that function # weight.at.age<-data.frame(sapply(weight.at.age, blank2na)) # # #weight.at.age<-na.omit(weight.at.age) # weight.at.age$sampling_type<-'SEA' # weight.at.age$species<-'ANE' # weight.at.age$areacell<-"IXa" # pelagonumber.survey<-pelagosep[[2]][c(1,2,5)] # #permutation of time # #pelagonumber.survey[2]<-(pelagonumber.survey[2]+9)%%12 # #pelagonumber.survey$month[pelagonumber.survey$month==0]<-12 # pelagonumber.survey[3]<-pelagonumber.survey[3]*1000000#units in millions # #pelagonumber.survey[is.na(pelagonumber.survey)]<-0 mejor omitir filas con NA que trtarlas como ceros # pelagonumber.survey<-na.omit(pelagonumber.survey) # names(pelagonumber.survey)<-c('year','month','count') # pelagonumber.survey$species<-'ANE' # pelagonumber.survey$areacell<-"IXa" # pelagonumber.survey$sampling_type = 'pelago' # #pelagonumber.survey$survey = 'pelago' # # ecocadiz.survey<-ecocadizsep[[2]][c(1,2,5)] # #ecocadiz.survey[2]<-(ecocadiz.survey[2]+9)%%12 # #ecocadiz.survey$month[ecocadiz.survey$month==0]<-12 # ecocadiz.survey[3]<-ecocadiz.survey[3]*1000000 # #ecocadiz.survey[is.na(ecocadiz.survey)]<-0 # ecocadiz.survey<-na.omit(ecocadiz.survey) # names(ecocadiz.survey)<-c('year','month','count') # ecocadiz.survey$species<-'ANE' # ecocadiz.survey$areacell<-"IXa" # ecocadiz.survey$sampling_type = 'ecocadiz' #1 para biomasa 2 para n?meros #sarnumber.survey<-sarsep[[2]][c(1,2,5)] #sarnumber.survey<-sarsep[[1]][c(1,2,5)] #BIOMASA sarnumber.survey_pre<-sarsep[[1]][c(1,2,4,5)] #BIOMASA #incluyendo la 4 incluyo Portugal, el último dato de 2012 es de la ecocadiz reclutas sarnumber.survey<-cbind(sarnumber.survey_pre[c(1,2)],rowSums(sarnumber.survey_pre[c(3,4)], na.rm=T)) #sarnumber.survey[2]<-(sarnumber.survey[2]+9)%%12 #sarnumber.survey$month[sarnumber.survey$month==0]<-12 sarnumber.survey<-na.omit(sarnumber.survey) #sarnumber.survey[is.na(sarnumber.survey)]<-0 #sarnumber.survey[3]<-sarnumber.survey[3]*1000000 #Para pasar de millones a individuos hay que multiplicar por 1000000 sarnumber.survey[3]<-sarnumber.survey[3]*1000 #Para pasar de toneladas a kilos hay que multiplicar por 1000 sarnumber.survey<-sarnumber.survey[-5,]#la fila 5 es de ecocadiz reclutas names(sarnumber.survey)<-c('year','month','count') sarnumber.survey$species<-'ANE' sarnumber.survey$areacell<-"IXa" sarnumber.survey$sampling_type<-'sar_bio' sarnumber.survey$vessel<-'1.RSH' ###################### #IXaGoC y IXaAlg en biomasa #IXaGoC seine_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/ane 27.9a_Catches by Subdivision.xlsx", sheet = "Quarterly Catches", header = F, startCol = 30, startRow = 7, endCol = 34, endRow = 34, useCachedValues=TRUE) names(seine_bio)<-c("year","count","count","count","count") catches.seine_bio_GoC<-rbind(seine_bio[c(1,2)], seine_bio[c(1,3)],seine_bio[c(1,4)],seine_bio[c(1,5)]) #catches.agedist<-rbind(catchespre[c(1,2,3)],catchespre[c(1,2,4)],catchespre[c(1,2,5)],catchespre[c(1,2,6)]) catches.seine_bio_GoC[2]<-catches.seine_bio_GoC[2]*1000 #tonnes to kg dim(seine_bio) catches.seine_bio_GoC$month<-rep(seq(3,12,3),times=1, each=dim(seine_bio)[1])#rep(c(12,3,6,9),times=1, each=26*4)#rep(seq(3,12,3),times=1, each=26*4)#26 años, 4 edades catches.seine_bio_GoC$species<-'ANE' catches.seine_bio_GoC$areacell<-"IXa" catches.seine_bio_GoC$sampling_type<-'SEA' catches.seine_bio_GoC$vessel<-"3.CQT" port_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/ane 27.9a_Catches by Subdivision.xlsx", sheet = "Quarterly Catches", header = F, startCol = 23, startRow = 10, endCol = 27, endRow = 34, useCachedValues=TRUE) names(port_bio)<-c("year","count","count","count","count") catches.port_bio_Alg<-rbind(port_bio[c(1,2)], port_bio[c(1,3)],port_bio[c(1,4)],port_bio[c(1,5)]) #catches.agedist<-rbind(catchespre[c(1,2,3)],catchespre[c(1,2,4)],catchespre[c(1,2,5)],catchespre[c(1,2,6)]) catches.port_bio_Alg[2]<-catches.port_bio_Alg[2]*1000 #tonnes to kg dim(port_bio) catches.port_bio_Alg$month<-rep(seq(3,12,3),times=1, each=dim(port_bio)[1])#rep(c(12,3,6,9),times=1, each=26*4)#rep(seq(3,12,3),times=1, each=26*4)#26 años, 4 edades catches.port_bio_Alg$species<-'ANE' catches.port_bio_Alg$areacell<-"IXa" catches.port_bio_Alg$sampling_type<-'SEA' catches.port_bio_Alg$vessel<-"3.CDA" mybiglist <- list() for (i in c(2012,2014:2016)){ name<-paste("Ecocadiz_rec",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ-RECLUTAS/ALK_ECO-R_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 11, startRow = 7, endCol = 15, endRow = 36, useCachedValues=TRUE) b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ-RECLUTAS/ALK_ECO-R_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = F, startCol = 9, startRow = 3, endCol = 9, endRow = 3, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-10 a$type<-"ecocadiz_rec_comp" a$total<-b[1,1] a[is.na(a)] <- 0 mybiglist[[name]] <- a } mybiglist$Ecocadiz_rec2012$month<-11 df<-plyr::ldply(mybiglist,data.frame) names(df)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type","total") ecocadiz_rec_bio<-cbind(df[7],df[8],df[10]) names(ecocadiz_rec_bio)<-c("year","month","count") ecocadiz_rec_bio$areacell<-"IXa" ecocadiz_rec_bio$species<-'ANE' ecocadiz_rec_bio$sampling_type<-"ecocadizrec_bio" ecocadiz_rec_bio$vessel<-"1.RSH" ecocadiz_rec_bio<-na.omit(ecocadiz_rec_bio) ecocadiz_rec_bio<-unique(ecocadiz_rec_bio) ecocadiz_rec_comp<-df[-10] #remove total ecocadiz_rec_def<-melt(ecocadiz_rec_comp, id=c(".id","length","year", "month", "type" )) names(ecocadiz_rec_def)<-c(".id","length","year", "month", "sampling_type","age","count") ecocadiz_rec_def$areacell<-"IXa" ecocadiz_rec_def$species<-'ANE' ecocadiz_rec_def$count<-signif(ecocadiz_rec_def$count,2) ecocadiz_rec_def$count<-ecocadiz_rec_def$count*1000 #para que quede en individuos ecocadizrec.ldist<-ecocadiz_rec_def[,2:9] areadef<-data.frame(name="IXa") mfdb_import_area(mdb, areadef) vesseldef<-data.frame(name=c("2.CDA","1.RSH")) mfdb_import_vessel_taxonomy(mdb, vesseldef) library(tidyverse) library(gadget3) defaults <- list( #area = mfdb::mfdb_group("IXa" = 1), timestep = mfdb::mfdb_timestep_quarterly, year = 1988:2021, species="ANE") #areas <- structure( #seq_along(defaults$area), #names = names(defaults$area)) # Timekeeping for the model, i.e. how long we run for time_actions <- list( g3a_time(start_year = min(defaults$year), end_year = max(defaults$year), defaults$timestep), list()) ##### Configure stocks ######################################################### ## stocks anch <- g3_stock(c(species = 'anch'), lengthgroups = seq(3, 22, 0.5)) %>% #g3s_livesonareas(areas[c('IXa')]) %>% g3s_age(minage = 0, maxage = 3) # ling_mat <- # g3_stock(c(species = 'ling', 'mat'), lengthgroups = seq(20, 156, 4)) %>% # g3s_livesonareas(areas[c('1')]) %>% # g3s_age(minage = 5, maxage = 15) ## Maximum number of length groups a stock can group within a time step (maxlengthgroupgrowth) mlgg <- 5 ############ Configure fleets ################################################## ## Survey(s) seine <- g3_fleet('seine')# %>% #g3s_livesonareas(areas[c('IXa')]) ## Commercial ECO <- g3_fleet('ECO') #%>% #g3s_livesonareas(areas[c('IXa')]) PEL <- g3_fleet('PEL')# %>% #g3s_livesonareas(areas[c('IXa')]) # gil <- # g3_fleet('gil') %>% # g3s_livesonareas(areas[c('1')]) # # foreign <- # g3_fleet('foreign') %>% # g3s_livesonareas(areas[c('1')]) # mfdb_import_cs_taxonomy(mdb, 'index_type', data.frame(name = c( # "acoustic", # "effort", # NULL))) # mfdb_import_sampling_type(mdb, data.frame( # id = 1:5, # name = c('SEA', 'pelago','ecocadiz', 'ecocadiz0813','sar'), # description = c('Sea sampling', 'pelago','ecocadiz', 'ecocadiz0813','sar'))) # mfdb_import_sampling_type(mdb_compi, data.frame( id = 1:23, name = c('SEA', 'pelago','ecocadiz', 'ecocadiz0813','sar','pelago_comp','ecocadiz_comp','pelago_bio','ecocadiz_bio', 'sar_bio','ecocadizrec_bio','ecocadiz_rec_comp','bocadeva_bio','alkcatches_comp', 'arsa_comp','arsa_bio','SEAS','SEAS2','arsa_compld','ecocadiz_comp_ld','ecocadiz_comp_ald','pelago_comp_ld','pelago_comp_ald'), description = c('Sea sampling', 'pelago','ecocadiz', 'ecocadiz0813','sar','pelago_comp','ecocadiz_comp','pelago_bio','ecocadiz_bio','sar_bio','ecocadizrec_bio','ecocadiz_rec_comp','bocadeva_bio','alkcatches_comp', 'arsa_comp','arsa_bio','seas2','seas3','arsa_solo_length','ecocadiz_comp_ld','ecocadiz_comp_ald','pelago_comp_ld','pelago_comp_ald'))) mdb<-mdb_compi mfdb:::mfdb_send(mdb, paste0("ALTER TABLE sample ALTER COLUMN count TYPE NUMERIC")) mfdb_import_survey(mdb, data_source = 'example-anchovy-seine.ldist', seine.ldist) #mfdb_import_survey(mdb, # data_source = 'example-anchovy-ecocadiz0813.ldist', # ecocadiz0813.ldist) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-weight.at.age', # weight.at.age) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-length.at.age', # length.at.age) # # mfdb_import_survey(mdb, # data_source = 'example-anchovy-catch.at.agedist', # catches.agedist) mfdb_import_survey(mdb, data_source = 'example-anchovy-ALKcatch.at.dist', ALKcatches.ldist) #mfdb:::mfdb_send(mdb, paste0("ALTER TABLE ", mdb$schema, ".sample ALTER COLUMN count TYPE NUMERIC")) mfdb_import_survey(mdb, data_source = 'example-anchovy-allptsp.dist', fleet.allptsp) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-fleet-Port', # catches.port_bio_Alg) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-fleet-Esp', # catches.seine_bio_GoC) # mfdb_import_survey_index(mdb, data_source = 'acoustic_index1', data.frame( # index_type = 'acoustic', # year = '1998', # month = 1:12, # areacell = '45G01', # # -----Q1----- -----Q2----- -----Q3----- -----Q4----- # value = c(12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ))) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-acoustic-pelago.numbers', # pelagonumber.survey) mfdb_import_survey(mdb, data_source = 'example-anchovy-acoustic-pelago.biomass', pelago_biom) mfdb_import_survey(mdb, data_source = 'example-anchovy-acoustic-pelago.ldist', pelago.ldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-sar.numbers', sarnumber.survey) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-ecocadiz.numbers', # ecocadiz.survey) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadiz.biomass', ecocadiz_biom) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadiz.ldist', ecocadiz.ldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadizrec.biomass', ecocadiz_rec_bio) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadizrec.ldist', ecocadizrec.ldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-arsa.ldist', arsa.ldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-arsa.ldistnoage', arsa.ldistnoage) mfdb_import_survey(mdb, data_source = 'example-anchovy-pelago.ldistnoage', pelago.ldistnoage) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadiz.ldistnoage', ecocadiz.ldistnoage) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadiz.aldist', ecocadiz.aldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-pelago.aldist', pelago.aldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-arsa.biomass', ARSA_biom) mfdb_import_survey(mdb, data_source = 'example-anchovy-bocadeva.biomass', bocadeva_biom) #Para Anchovy73: # bounds understocking # ldist.seine: # areaaggfile Aggfiles/catchdistribution.ldist.seine.area.agg # ageaggfile Aggfiles/catchdistribution.ldist.seine.age.agg # lenaggfile Aggfiles/catchdistribution.ldist.seine.len.agg # ldist.ecocadiz: # areaaggfile Aggfiles/catchdistribution.ldist.ecocadiz.area.agg # ageaggfile Aggfiles/catchdistribution.ldist.ecocadiz.age.agg # lenaggfile Aggfiles/catchdistribution.ldist.ecocadiz.len.agg # catches.agedist: # areaaggfile Aggfiles/catchdistribution.catches.agedist.area.agg # ageaggfile Aggfiles/catchdistribution.catches.agedist.age.agg # lenaggfile Aggfiles/catchdistribution.catches.agedist.len.agg # pelagonumber.survey: # areaaggfile Aggfiles/surveyindices.pelagonumber.survey.area.agg # lenaggfile Aggfiles/surveyindices.pelagonumber.survey.len.agg # ldist.pelago: # areaaggfile Aggfiles/catchdistribution.ldist.pelago.area.agg # ageaggfile Aggfiles/catchdistribution.ldist.pelago.age.agg # lenaggfile Aggfiles/catchdistribution.ldist.pelago.len.agg # ecocadiz.survey: # areaaggfile Aggfiles/surveyindices.ecocadiz.survey.area.agg # lenaggfile Aggfiles/surveyindices.ecocadiz.survey.len.agg # sarnumber.survey: # areaaggfile Aggfiles/surveyindices.sarnumber.survey.area.agg # lenaggfile Aggfiles/surveyindices.sarnumber.survey.len.agg # length.at.age: # areaaggfile Aggfiles/catchstatistics.length.at.age.area.agg # ageaggfile Aggfiles/catchstatistics.length.at.age.age.agg # # defaults <- list( # # timestep = mfdb_timestep_quarterly, # year = 1984:2012, # species = 'COD') ## Write out areafile and update mainfile with areafile location # gadget_dir_write(gd, gadget_areafile( # size = mfdb_area_size(mdb, defaults)[[1]], # temperature = mfdb_temperature(mdb, defaults)[[1]])) # ## Write a penalty component to the likelihood file # gadget_dir_write(gd, gadget_likelihood_component("penalty", # name = "bounds", # weight = "0.5", # data = data.frame( # switch = c("default"), # power = c(2), # lowerW=10000, # upperW=10000, # stringsAsFactors = FALSE))) # # gadget_dir_write(gd, gadget_likelihood_component("understocking", name ="understocking",weight = "1e8")) aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( sampling_type='SEA',length = mfdb_interval("len", seq(3,22,0.5))), defaults)) # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.seine", # weight = 1, # data = aggdata[[1]], # fleetnames = c("seine"), # stocknames = c("anch"))) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.seine.age.agg", list(list(all=c(0,3))))) save(aggdata, file="demo-anchovy/data/ldist.seine.Rdata") rm(aggdata) # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='arsa_compld', year=1993:2000,length = mfdb_interval("len", seq(3,20,1))), # defaults)) # aggdata[[1]]$year<-aggdata[[1]]$year+1 # aggdata[[1]]$step<-1 # # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.arsa.noage1", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ARSA"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.arsa.noage1.age.agg", list(list(all=c(0,3))))) # # rm(aggdata) # # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='arsa_compld', year=2001:2016,length = mfdb_interval("len", seq(3,22,0.5))), # defaults)) # aggdata[[1]]$year<-aggdata[[1]]$year+1 # aggdata[[1]]$step<-1 # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.arsa.noage2", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ARSA"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.arsa.noage2.age.agg", list(list(all=c(0,3))))) # #rm(aggdata) aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='pelago_comp_ld', length = mfdb_interval("len", seq(3,22,0.5))), defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.pelago.noage", # stocknames = c("anch"), # weight = 1, # fleetnames = c("PEL"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.pelago.noage.age.agg", list(list(all=c(0,3))))) save(aggdata, file="demo-anchovy/data/ldist.pelago.Rdata") rm(aggdata) aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='ecocadiz_comp_ld', length = mfdb_interval("len", seq(3,22,0.5))), defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.ecocadiz.noage", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ECO"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz.noage.age.agg", list(list(all=c(0,3))))) save(aggdata, file="demo-anchovy/data/ldist.ecocadiz.Rdata") rm(aggdata) aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='alkcatches_comp',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(0,1,2,3)), defaults)) # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.alkseine", # weight = 1, # data = aggdata[[1]], # fleetnames = c("seine"), # stocknames = c("anch"))) save(aggdata, file="demo-anchovy/data/aldist.seine.Rdata") rm(aggdata) aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='pelago_comp_ald',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(0,1,2,3)), defaults)) # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "aldist.pelago", # weight = 1, # data = aggdata[[1]], # fleetnames = c("PEL"), # stocknames = c("anch"))) save(aggdata, file="demo-anchovy/data/aldist.pelago.Rdata") rm(aggdata) #He quitado los bichos de la edad cero de la ecocadiz aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='ecocadiz_comp_ald',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(1,2,3)), defaults)) # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz_comp_ald',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(0,1,2,3)), # defaults)) # # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "aldist.ecocadiz", # weight = 1, # data = aggdata[[1]], # fleetnames = c("ECO"), # stocknames = c("anch"))) # save(aggdata, file="demo-anchovy/data/aldist.ecocadiz.Rdata") rm(aggdata) ############################################ # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz0813',length = mfdb_interval("len", seq(6,18,0.5))), # defaults)) # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz_comp',length = mfdb_interval("len", seq(3.5,19.5,0.5))), # defaults)) ############################## # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz_comp',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(0,1,2,3)), # defaults)) # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.ecocadiz0813", # stocknames = c("anch"), # weight = 1, # fleetnames = c("seine"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # #ECO O SEINE De flota # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.ecocadiz", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ECO"), # data = aggdata[[1]] # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # # # # # rm(aggdata) #################################esto no # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list(sampling_type='SEA',age =c(0,1,2,3)),defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "catches.agedist", # weight = 1, # data = aggdata[[1]], # fleetnames = c("seine"), # stocknames = c("anch"))) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.catches.agedist.len.agg", list(list(all=c(3,22))))) # rm(aggdata) # # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seine="3.CQT"),sampling_type='SEA'), # defaults)) # # gadget_dir_write(gd, gadget_fleet_component( # 'numberfleet', # name = 'seine', # suitability = "\n anch function expsuitfuncl50 #constant #l50", # data=aggdata[[1]] # )) # # rm(aggdata) #################################33 #####################################3 ########################### #Para tener toda la base de datos de biomasa junta # aggdata<-mfdb_sample_count(mdb, c("sampling_type","vessel"), # c(list(sampling_type=c('pelago_bio','ecocadiz_bio','ecocadizrec_bio','sar_bio','SEA'),vessel=c("3.CQT","3.CDA","1.RSH")),defaults)) # # Biomass_summary<-aggdata[[1]] # Biomass_summary$number<-Biomass_summary$number/1000 #in tonnes # Biomass_summary$com[Biomass_summary$sampling_type=='SEA']<-1 # Biomass_summary$com[Biomass_summary$sampling_type!='SEA']<-0 # #ggplot(aggdata[[1]],aes(interaction(step,year),number,colour=sampling_type, group=1))+geom_line() # rm(aggdata) # ggplot(Biomass_summary,aes(interaction(step,year),number,colour=sampling_type, group=sampling_type))+geom_line() # #dev.new() # #dev.list() # Biomass_summary_by_year<-aggregate(number~year+area+sampling_type, Biomass_summary, sum) # as<-ggplot(Biomass_summary_by_year,aes(year,number,colour=sampling_type, group=sampling_type))+geom_line()+geom_point()+theme_bw()+theme(axis.text.x = element_text(angle = 90, hjust = 1)) # ggsave(as+scale_x_continuous(breaks=1989:2016,limits=c(1989,2016), labels=1989:2016)+ylab("Biomass (tonnes)")+scale_colour_discrete(name = "",breaks=unique(Biomass_summary_by_year$sampling_type),labels=c( "ecocadiz", "ecocadizrec", "pelago" , "sar" , "Catches IXa South" )),file="biomass_summary_year.pdf") # # asi<-ggplot(Biomass_summary_by_year,aes(year,number,colour=sampling_type, group=sampling_type))+geom_point()+theme_bw()+theme(axis.text.x = element_text(angle = 90, hjust = 1)) # asi # asii<-as+scale_x_continuous(breaks=1989:2016,limits=c(1989,2016), labels=1989:2016)+ylab("Biomass (tonnes)")+scale_colour_discrete(name = "",breaks=unique(Biomass_summary_by_year$sampling_type),labels=c( "ecocadiz", "ecocadizrec", "pelago" , "sar" , "Catches IXa South" ))+scale_y_log10(breaks=c(1000,10000,40000),labels=c(1000,10000,40000)) # ggsave(asii,file="biomass_summary_year_log.pdf") ##############################################################3 #Para Gadget con biomasa # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seine="3.CQT"),sampling_type='SEA'), # defaults)) # aggdata[[1]]$vessel<-"seine" # gadget_dir_write(gd, gadget_fleet_component( # 'totalfleet', # name = 'seine', # suitability = "\n anch function expsuitfuncl50 #constant #l50", # data=aggdata[[1]] # )) # # rm(aggdata) # # # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seineport="3.CDA"),sampling_type='SEA'), # defaults)) # aggdata[[1]]$vessel<-"PortBIO" # gadget_dir_write(gd, gadget_fleet_component( # 'totalfleet', # name = 'seineport', # suitability = "\n anch function expsuitfuncl50 #constant #l50", # data=aggdata[[1]] # )) # # rm(aggdata) #Para Gadget con número de capturas # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seine="2.CQT"),sampling_type='SEA'), # defaults)) # aggdata[[1]]$vessel<-"seine" # gadget_dir_write(gd, gadget_fleet_component( # 'numberfleet', # name = 'seine', # suitability = "\n anch function expsuitfuncl50 Initfiles/anchalpha.tvf Initfiles/anchL50.tvf" , # data=aggdata[[1]] # )) #########################################################3 #Para Gadget con all fleets # aggdata<-mfdb_sample_count(mdb,c("sampling_type"),c( # # list(sampling_type='SEAS2'), # defaults)) #aggdata<-mfdb_sample_count(mdb, c("sampling_type"), # c(list(sampling_type=c('SEAS2')),defaults)) aggdata<-mfdb_sample_count(mdb,NULL,c( list(vessel=mfdb_group(all="2.CDA")), defaults)) aggdata[[1]]$area<-NULL save(aggdata, file="demo-anchovy/data/seine_landings.Rdata") rm(aggdata) #aggdata[[1]]$vessel<-"seine" # gadget_dir_write(gd, gadget_fleet_component( # 'numberfleet', # name = 'seine', # suitability = "\n anch function expsuitfuncl50 Initfiles/anchalpha.tvf Initfiles/anchL50.tvf" , # data=aggdata[[1]] # )) # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seine="3.CQT"),sampling_type='SEA'), # defaults)) # aggdata[[1]]$vessel<-"ARSA" # aggdata[[1]]$number<-1 # gadget_dir_write(gd, gadget_fleet_component( # 'totalfleet', # name = 'ARSA', # suitability = "\n anch function expsuitfuncl50 #constantarsa #l50arsa ", # data=aggdata[[1]] # )) aggdata<-mfdb_sample_count(mdb,NULL,c( list(vessel=mfdb_group(seine="1.RSH"),sampling_type='ecocadiz_bio'), defaults)) aggdata[[1]]$area<-NULL aggdata[[1]]$number<-1 save(aggdata, file="demo-anchovy/data/ECO_landings.Rdata") rm(aggdata) #gadget_dir_write(gd, gadget_fleet_component( # 'totalfleet', # name = 'ECO', # suitability = "\n anch function expsuitfuncl50 #constanteco #l50eco ", # data=aggdata[[1]] # )) aggdata<-mfdb_sample_count(mdb,NULL,c( list(vessel=mfdb_group(seine="1.RSH"),sampling_type='pelago_bio'), defaults)) aggdata[[1]]$area<-NULL aggdata[[1]]$number<-1 save(aggdata, file="demo-anchovy/data/PEL_landings.Rdata") rm(aggdata) #################################33 ecorec fleet # # aggdata[[1]]$vessel<-"ECOREC" # # gadget_dir_write(gd, gadget_fleet_component( # # 'totalfleet', # # name = 'ECOREC', # # suitability = "\n anch function expsuitfuncl50 #constantecorec #l50ecorec ", # # data=aggdata[[1]] # # )) # # rm(aggdata) ##############################################3 # agg <- mfdb_survey_index_mean(mdb, cols = c('data_source'), list( # index_type = 'acoustic', #aggdata<- mfdb_sample_count(mdb, c( 'length'), c( #defaults)) #agg <- mfdb_survey_index_mean(mdb, c("survey"), c(list(index_type='acoustic'),defaults)) ########################################################3 #anterior pelago number...ahora con biomasa aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='pelago_bio'),defaults)) # #gadget_dir_write(gd, gadget_likelihood_component("surveyindices", # name = "pelagonumber.survey", # weight = 1, # data = aggdata[[1]], # fittype = 'fixedslopeloglinearfit', # slope=1, # biomass=1, # stocknames = c("anch"), # surveynames='pelago')) save(aggdata, file="demo-anchovy/data/pelago.bioindex.Rdata") rm(aggdata) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.pelagonumber.survey.age.agg", list(list(all=c(0,3))))) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.pelagonumber.survey.len.agg", list(list(all=c(3,22))))) ######################################################33 # # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # # sampling_type='pelago_comp',length = mfdb_interval("len", seq(3.5,19.5,0.5))), # # defaults)) # # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='pelago_comp',age=c(0,1,2,3),length = mfdb_interval("len", seq(3.5,19.5,0.5))), # defaults)) # # # # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # # sampling_type = 'IGFS', # # species = 'COD', # # length = mfdb_interval("len", seq(0, 150, by = 2))), # # defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # # name = "ldist.ecocadiz0813", # # stocknames = c("anch"), # # weight = 1, # # fleetnames = c("seine"), # # data = aggdata[[1]] # # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # # # #ECO O SEINE De flota # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.pelago", # stocknames = c("anch"), # weight = 1, # fleetnames = c("PEL"), # data = aggdata[[1]] # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # #ecocadiz rec ##############################33 # rm(aggdata) # # # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz_rec_comp',age=c(0,1,2,3),length = mfdb_interval("len", seq(3.5,19.5,0.5))), # defaults)) # # # # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # # sampling_type = 'IGFS', # # species = 'COD', # # length = mfdb_interval("len", seq(0, 150, by = 2))), # # defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # # name = "ldist.ecocadiz0813", # # stocknames = c("anch"), # # weight = 1, # # fleetnames = c("seine"), # # data = aggdata[[1]] # # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # # # #ECO O SEINE De flota # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.ecocadizrec", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ECOREC"), # data = aggdata[[1]] # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # # # # # rm(aggdata) ##################33este a?n no lo veo claro aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='ecocadiz_bio'),defaults)) gadget_dir_write(gd, gadget_likelihood_component("surveyindices", name = "ecocadiz.survey", weight = 1, data = aggdata[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("anch"), biomass=1, surveynames='ecocadiz')) rm(aggdata) gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.ecocadiz.survey.age.agg", list(list(all=c(0,3))))) gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.ecocadiz.survey.len.agg", list(list(all=c(3,22))))) ######################################################## # aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='ecocadizrec_bio'),defaults)) # # gadget_dir_write(gd, gadget_likelihood_component("surveyindices", # name = "ecocadizrec.survey", # weight = 1, # data = aggdata[[1]], # fittype = 'fixedslopeloglinearfit', # slope=1, # stocknames = c("anch"), # biomass=1, # surveynames='ecocadizrec')) # # rm(aggdata) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.ecocadizrec.survey.age.agg", list(list(all=c(0,3))))) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.ecocadizrec.survey.len.agg", list(list(all=c(3,22))))) # # # # aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='sar_bio'),defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("surveyindices", # name = "sarnumber.survey", # weight = 1, # data = aggdata[[1]], # fittype = 'fixedslopeloglinearfit', # slope=1, # biomass=1, # stocknames = c("anch"))) # # # rm(aggdata) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.sarnumber.survey.age.agg", list(list(all=c(0,3))))) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.sarnumber.survey.len.agg", list(list(all=c(3,22))))) #este ya no aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='bocadeva_bio'),defaults)) gadget_dir_write(gd, gadget_likelihood_component("surveyindices", name = "bocadeva.survey", weight = 1, data = aggdata[[1]], fittype = 'fixedslopeloglinearfit', slope=1, biomass=1, stocknames = c("anch"))) rm(aggdata) gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.bocadeva.survey.age.agg", list(list(all=c(0,3))))) gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.bocadeva.survey.len.agg", list(list(all=c(3,22))))) # aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='arsa_bio'),defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("surveyindices", # name = "arsa.survey", # weight = 1, # data = aggdata[[1]], # fittype = 'fixedslopeloglinearfit', # slope=1, # biomass=1, # stocknames = c("anch"))) # # # rm(aggdata) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.arsa.survey.age.agg", list(list(all=c(0,3))))) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.arsa.survey.len.agg", list(list(all=c(3,22))))) # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='arsa_comp',length = mfdb_interval("len", seq(3,20,1)), age=c(0,1,2,3)), # defaults)) # # # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.arsa", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ARSA"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) #rm(aggdata) #este ya no aggdata<-mfdb_sample_meanlength(mdb, c('age'), c(list(sampling_type='SEA',age =c(0,1,2,3)), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchstatistics", name = "length.at.age", weight = 1, data = na.omit(aggdata[[1]]), fleetnames = c("seine"), stocknames = c("anch"))) ######## #Suitability for the survey alpha<-0.08 l<-seq(0,20,0.5) l50<-50 expsuitl50_pre<-1+exp(-alpha*(l-l50)) expsuitl50<-expsuitl50_pre^-1 plot(l,expsuitl50,xlim=c(0,20)) #Para fleet.seine.data sum(as.numeric(as.character(def_ecocadiz0813$count))) ##################################################################### ## Age IGFS aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list(sampling_type = 'IGFS', age = mfdb_step_interval('age',by=1,from=1,to=12), species='COD', length = mfdb_interval("len", seq(0, 150, by = 4))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "aldist.igfs", weight = 1, data = aggdata[[1]], fleetnames = c("igfs"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## Maturity @3 from IGFS aggdata <- mfdb_sample_count(mdb, c('maturity_stage', 'length'), append(defaults, list(sampling_type='IGFS', age=mfdb_group(age=3), length = mfdb_step_interval('len', by = 2, to = 100), maturity_stage = mfdb_group(codimm = 1, codmat = 2:5)))) gadget_dir_write(gd, gadget_likelihood_component("stockdistribution", name = "matp.igfs", weight = 1, data = aggdata[[1]], fleetnames = c("igfs"), stocknames = c("codimm", "codmat"))) ## Query length data to create AUT catchdistribution components aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( sampling_type = 'AUT', species = 'COD', length = mfdb_interval("len", seq(0, 150, by = 2))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "ldist.aut", weight = 1, data = aggdata[[1]], fleetnames = c("aut"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## Age AUT aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list(sampling_type = 'AUT', age = mfdb_step_interval('age',by=1,from=1,to=12), length = mfdb_interval("len", seq(0, 150, by = 4))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "aldist.aut", weight = 1, data = aggdata[[1]], fleetnames = c("aut"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## Query length data to create COMM catchdistribution components aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( sampling_type = 'SEA', species = 'COD', length = mfdb_interval("len", seq(0, 150, by = 2))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "ldist.comm", weight = 1, data = aggdata[[1]], fleetnames = c("comm"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## Age AUT aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list(sampling_type = 'SEA', age = mfdb_step_interval('age',by=1,from=1,to=12), length = mfdb_interval("len", seq(0, 150, by = 4))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "aldist.comm", weight = 1, data = aggdata[[1]], fleetnames = c("comm"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## IGFS survey indices igfs.SI1 <- mfdb_sample_count(mdb, c('length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(4,17))), defaults)) igfs.SI2 <- mfdb_sample_count(mdb, c('length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(17,33))), defaults)) igfs.SI3 <- mfdb_sample_count(mdb, c( 'length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(33,140))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp1", weight = 1, data = igfs.SI1[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm"))) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp2", weight = 1, data = igfs.SI2[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm","codmat"))) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp3", weight = 1, data = igfs.SI3[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm","codmat"))) ## AUT survey indices igfs.SI1 <- mfdb_sample_count(mdb, c('length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(16,27))), defaults)) igfs.SI2 <- mfdb_sample_count(mdb, c('length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(27,39))), defaults)) igfs.SI3 <- mfdb_sample_count(mdb, c( 'length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(39,140))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp1a", weight = 1, data = igfs.SI1[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm"))) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp2a", weight = 1, data = igfs.SI2[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm","codmat"))) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp3a", weight = 1, data = igfs.SI3[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm","codmat"))) # For plots from Bjarki # ggplot(seine.ldist,aes(length,count))+geom_line() + facet_wrap(~year+month) # > ggplot(seine.ldist,aes(length,count))+geom_line() + facet_wrap(~year+month,scale='free_y') # > ggplot(subset(seine.ldist,year>2010),aes(length,count))+geom_line() + facet_wrap(~year+month,scale='free_y') #out$catchdist.fleets %>% filter(name=='catches.agedist') %>% group_by(year,step,age) %>% summarise(o.ml = sum(observed*avg.length)/sum(observed), p.ml=sum(predicted*avg.length)/sum(predicted)) %>% ggplot(aes(age,o.ml)) + geom_point() #Para ver la distribuci?n original #out$catchdist.fleets %>% filter(name=='ldist.pelago') %>% ggplot(aes(age,number.x))+geom_point()+facet_wrap(~year+step)
/Gadget3-demo-anchovy/setupanchovy_fromcesga_mejorado_2020_conpelago_ycatches2020_ysumarryplots_noLDyALK2020_1_2.r
no_license
mmrinconh/Gadget3_anchovy
R
false
false
118,317
r
#Reproduce carpetas básicas de Anchovy79 #lo he despertado el 27 de abril de 2021 para solucionar un error con el fleet, hay que cerrar R e instalar #devtools::install_github("mareframe/mfdb", ref = "3.x") #devtools::install_github("mareframe/mfdb", ref="3.x") #devtools::install_github('hafro/rgadget') #remotes::install_github('mareframe/mfdb', ref = 'd4bbd4e') #Weight in kilos and numbers require to be multiplied by 1000 to be in units and not thousands #Catchdistribution de las surveys y las acústicas solo biomasa #setwd("~/GADGET/Assessment/Assessment2020_pandemia") #setwd("~/GADGET/Assessment/Assessment_2021") library(mfdb) library(gdata) library(XLConnect) # load XLConnect package #library(plyr) library(reshape2) ## Create a gadget directory, define some defaults to use with our queries below exists("mdb_compi") #mdb_comp <- mfdb('Ibera', db_params=list(dbname="mf")) #mfdb('Ibera',destroy_schema = TRUE)#, db_params=list(dbname="mf")) mdb_compi <- mfdb('Ibera.duckdb')#, db_params=list(dbname="mf")) #gd <- gadget_directory("Anchovy2021_withLDandALK2020_1_2_data") #gd <- gadget_directory("Anchovy2021_withLD2018_2019_1_2_andALKpelago2020") area<-data.frame(1,"IXa",13000) colnames(area)<-c("id","name","size") yearsb<-c(1988:2020) month<- rep(1:12, times = length(yearsb)) year<-rep(yearsb, times = 1, length.out = NA, each = 12) areacell<-rep("IXa", times = length(yearsb)*12) id<-rep(1, times = length(yearsb)*12) temperatureb<-rep(5, times = length(yearsb)*12) temperature<-data.frame(id,year,month,areacell,temperatureb) colnames(temperature)<-c("id","year", "month","areacell","temperature") #pelago = read.xls("/home/marga/GADGET/DATOS/pelago_99_14_simp.xls")#estas son las portuguesas #ecocadiz = read.xls("/home/marga/GADGET/DATOS/ecocadiz_04_13_simp.xls") #el 2013 debe coincidir con el total de ecocadiz0813 estas son las espa??olas sar = read.xls("/home/marga/GADGET/DATOS/sar_98_07.xls") catch_age=read.xls("/home/marga/GADGET/DATOS/catch_at_age_numbers88_13.xls") length_age=read.xls("/home/marga/GADGET/DATOS/catch_length_at_age_88_13_3.xls") #weight_age=read.csv("/home/marga/GADGET/DATOS/catches_weight_at_age_kg88_13_3.csv",strip.white = TRUE, sep=",", na.strings= c("999", "NA", " ", ""),stringsAsFactors=FALSE) #weight_age=read.csv("/home/marga/GADGET/DATOS/catches_weight_at_age_kg88_13_2.csv") weight_age=read.xls("/home/marga/GADGET/DATOS/catches_weight_at_age_kg88_13_5.xls") Catches_Ptg<-read.xls("/home/marga/GADGET/DATOS/Algarve/ANE_1989_2016 WGPELA_ENVIO_CORRIGIDO.xls") require(dplyr) require(tidyr) Catches_Algarve<-Catches_Ptg %>% filter(AREATYPE=="27.9.a.s.a") %>% group_by(YEAR,SEASON)%>% summarise(totalton=sum(CATON))%>% ungroup() %>% complete(YEAR, SEASON, fill = list(totalton = 0)) #Cambiar aquí totalton #T1 - 10079.6 Kg #T2 - 1953,5 Kg #T3 - 12871.0 Kg #T4 - 1206.6 Kg Catches_Algarve<-add_row(Catches_Algarve, YEAR=2017, SEASON=1:4, totalton=c(10079.6,1953.5,12871,1206.6)*0.001) #porque estaba en kg se multiplica por 0.001 para que quede en tons CREO 2019 hay que verificar # Trim1 1431,3 Kg sacados de LandingsPortugal2018 Folha1 y Sul es lo que me interesa # # Trim2 11785,3 Kg # # Trim3 52035,7 Kg # # Trim4 83,6 Kg Catches_Algarve<-add_row(Catches_Algarve, YEAR=2018, SEASON=1:4, totalton=c(1431.3,11785.3,52035.7,83.6)*0.001) #porque estaba en kg se multiplica por 0.001 para que quede en tons CREO 2019 hay que verificar Catches_Algarve<-add_row(Catches_Algarve, YEAR=2019, SEASON=1:4, totalton=c(0,0,4,109)) #porque estaba en ya en tons ver /home/marga/GADGET/DATOS/Algarve/Landings_Metier_9aS_alg_2019.xlsx Catches_Algarve<-add_row(Catches_Algarve, YEAR=2020, SEASON=1:4, totalton=c(1.6059,67.2002, 73.5084,12.23)) #estaba en Kg pero en el excel lo he multiplicado por 0.001 #require(tidyr) # #Year4bio2num uno menos que el año en curso #Catches Algarve in biomass to number using BOQUERON_ALK cadiz y suma de números Cdaiz y Algarve en CatchesAllFleetsPTSP #Year4bio2num<-2018 Year4bio2num<-2020 NUMPT<-data.frame(YEAR=rep(1989:Year4bio2num,each=4), SEASON=rep(1:4,each=length(1989:Year4bio2num)),NUMBERPT=rep(0,each=length(1989:Year4bio2num)*4),NUMBERSP=rep(0,each=length(1989:Year4bio2num)*4)) BIOMTOT<-data.frame(YEAR=rep(1989:Year4bio2num,each=4), SEASON=rep(1:4,each=length(1989:Year4bio2num)),TONPT=rep(0,each=length(1989:Year4bio2num)*4),TONSP=rep(0,each=length(1989:Year4bio2num)*4)) a<-1 for (i in 1989:2013){ for (j in 1:4){ bioq<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = F, startCol = 9, startRow = 2, endCol = 9, endRow = 2, useCachedValues=T)*0.001 #Para que quede en toneladas (está en kilos) numberq<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = F, startCol = 9, startRow = 38, endCol = 9, endRow = 38, useCachedValues=T) biopt<-Catches_Algarve %>% filter(YEAR==i,SEASON==j)%>%select(totalton) numpt<-biopt$totalton*numberq/bioq NUMPT[a,]<-c(i,j,numpt,numberq) BIOMTOT[a,]<-c(i,j,biopt,bioq) a<-a+1 } } ##actualizar loop hasta for (i in 2014:Year4bio2num){ for (j in 1:4){ bioq<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = F, startCol = 9, startRow = 2, endCol = 9, endRow = 2, useCachedValues=T)*0.001 #Para que quede en toneladas numberq<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = F, startCol = 9, startRow = 40, endCol = 9, endRow = 40, useCachedValues=T) biopt<-Catches_Algarve %>% filter(YEAR==i,SEASON==j)%>%select(totalton) numpt<-biopt$totalton*numberq/bioq BIOMTOT[a,]<-c(i,j,biopt,bioq) #en toneladas NUMPT[a,]<-c(i,j,numpt,numberq) a<-a+1 } } CatchesAllFleetsPTSP<-NUMPT%>%mutate(ALL=NUMBERPT+NUMBERSP) #Trato de leer todas las tablas en un loop #pelagos #La del 2013 y 2014 es la del IEO porque IPIMAR sale la cosa rara, esto no es alk, esto está ponderado mybiglist <- list() for (i in c(1999,2001:2003,2005:2009)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = TRUE, startCol = 13, startRow = 5, endCol = 17, endRow = 35, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-4 a$type<-"pelago_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a } for (i in c(2010,2013,2014)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = TRUE, startCol = 11, startRow = 5, endCol = 15, endRow = 35, useCachedValues=TRUE) b<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = F, startCol = 9, startRow = 2, endCol = 9, endRow = 2, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$total<-b[1,1]*0.001 #pasarlo a tons que luego se multiplica por 1000 para que quede en Kg a$year<-i a$month<-4 a$type<-"pelago_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a } mybiglist$Pelago1999$month<-3 mybiglist$Pelago2001$month<-3 mybiglist$Pelago2002$month<-3 mybiglist$Pelago2003$month<-2 #para 2017 no hay datos de ALK para Algarve así que pongo sólo lo de Cádiz, esto no va aquí esto está ponderado for (i in c(2015:2021)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 26, useCachedValues=TRUE) b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = F, startCol = 10, startRow = 5, endCol = 10, endRow = 5, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-4 a$type<-"pelago_comp" a$total<-b[1,1] a[is.na(a)] <- 0 mybiglist[[name]] <- a } df<-plyr::ldply(mybiglist,data.frame) pelago_bio_pre<-cbind(df[7], df[10]*1000, df[8]) pelago_bio_pre<-unique(na.omit(pelago_bio_pre)) df<-df[-10] names(df)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") pelago_comp<-df #only_Length_distribution_pelago mybiglist <- list() for (i in c(1999,2001:2003,2005:2009)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = TRUE, startCol = 10, startRow = 5, endCol = 11, endRow = 35, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-4-3 a$type<-"pelago_comp_ld" a[is.na(a)] <- 0 mybiglist[[name]] <- a } for (i in c(2010,2013,2014)){ name<-paste("Pelago",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Port",sep=""), header = TRUE, startCol = 8, startRow = 5, endCol = 9, endRow = 35, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-4-3 a$type<-"pelago_comp_ld" a[is.na(a)] <- 0 mybiglist[[name]] <- a } mybiglist$Pelago1999$month<-11 #3+3 mybiglist$Pelago1999$year<-1998 mybiglist$Pelago2001$month<-11 #3+3 mybiglist$Pelago2001$year<-2000 mybiglist$Pelago2002$month<-11#3+3 mybiglist$Pelago2002$year<-2001 mybiglist$Pelago2003$month<-10 #2+3 mybiglist$Pelago2003$year <-2002 name<-"Pelago2015" #en millones a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/ANE_PELAGO15_Results.xls", sheet = "9a S", header = TRUE, startCol = 13, startRow = 2, endCol = 14, endRow = 33, useCachedValues=TRUE) i<-2015 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2015 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000000 mybiglist[[name]] <- a name<-"Pelago2016" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/PELAGO16_ANE_RESULTS_FINAL.xls", sheet = "AbundanceBiomassANE_FINAL", header = TRUE, startCol = 1, startRow = 66, endCol = 2, endRow = 86, useCachedValues=TRUE) i<-2016 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2016 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2017 ld name<-"Pelago2017" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ane-pel17_fv.xlsx", sheet = "CAD", header = TRUE, startCol = 1, startRow = 49, endCol = 2, endRow = 73, useCachedValues=TRUE) i<-2017 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2017 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2018 ld name<-"Pelago2018" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL18.xlsx", sheet = "algarve+cadiz", header = TRUE, startCol = 1, startRow = 1, endCol = 2, endRow = 30, useCachedValues=TRUE) i<-2018 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2018 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2019 ld name<-"Pelago2019" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL19.xlsx", sheet = "algarve+cadiz", header = TRUE, startCol = 1, startRow = 1, endCol = 2, endRow = 30, useCachedValues=TRUE) i<-2019 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2019 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2020 ld name<-"Pelago2020" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL20.xlsx", sheet = "algarve+cadiz", header = TRUE, startCol = 1, startRow = 1, endCol = 2, endRow = 30, useCachedValues=TRUE) i<-2020 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2020 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a #######Agregar pelago 2021 ld name<-"Pelago2021" #en miles a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL21.xlsx", sheet = "algarve+cadiz", header = TRUE, startCol = 1, startRow = 1, endCol = 2, endRow = 30, useCachedValues=TRUE) i<-2021 # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-2021 a$month<-4-3 a$type<-"pelago_comp_ld" #a$total<-b[1,1] a[is.na(a)] <- 0 a$count<-a$count*1000 mybiglist[[name]] <- a # name<-"Pelago2000" #en miles # d<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/biq2000.xls", sheet = "ALgarve", # header = TRUE, startCol = 1, # startRow = 1, endCol = 2, # endRow = 30, useCachedValues=TRUE) # c<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/biq2000.xls", sheet = "CADIZ", # header = TRUE, startCol = 1, # startRow = 1, endCol = 2, # endRow =30, useCachedValues=TRUE) # d[is.na(d)] <- 0 # c[is.na(c)] <- 0 # a<-data.frame(cbind(d$L,(d$mil+c$mil)*1000)) # # # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # # header = F, startCol = 10, # # startRow = 5, endCol = 10, # # endRow = 5, useCachedValues=TRUE) # names(a)<-c("length","count") # a$year<-2000 # a$month<-4-3 # a$type<-"pelago_comp_ld" # #a$total<-b[1,1] # a[is.na(a)] <- 0 # # a$count<-a$count*1000 # # mybiglist[[name]] <- a df3<-plyr::ldply(mybiglist,data.frame) #pelago_bio_pre<-cbind(df[7], df[10]*1000, df[8]) #pelago_bio_pre<-unique(na.omit(pelago_bio_pre)) #df3<-df3[-10] #names(df3)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") names(df3)<-c(".id", "length", "count", "year", "month", "sampling_type") pelago_comp_ld<-df3[,1:6] pelago_comp_ld$areacell<-"IXa" pelago_comp_ld$species<-'ANE' pelago.ldistnoage<-pelago_comp_ld[,2:8] #aldist_pelago solo 14 15 16 17 18 19 y 20 2013 no porque no tengo la ALK mybiglist <- list() name<-"alPelago2014" d<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/PELAGO14_ANE_ALK_WCOAST_sg.xlsx", sheet = "alg", header = TRUE, startCol = 6, startRow = 7, endCol = 9, endRow = 22, useCachedValues=TRUE) c<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/PELAGO14_ANE_ALK_WCOAST_sg.xlsx", sheet = "cad", header = TRUE, startCol = 6, startRow = 7, endCol = 9, endRow =22, useCachedValues=TRUE) d[is.na(d)] <- 0 c[is.na(c)] <- 0 a<-data.frame(cbind(c$Row.Labels,0,d$X1+c$X1,c$X2+d$X2,c$X3+d$X3)) names(a)<-c("length", "0","1", "2", "3") a$year<-2014 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a name<-"alPelago2015" d<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/ANE_PELAGO15_Results_sg.xlsx", sheet = "Algarve", header = TRUE, startCol = 17, startRow = 4, endCol = 20, endRow = 31, useCachedValues=TRUE) c<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/ANE_PELAGO15_Results_sg.xlsx", sheet = "CADIZ", header = TRUE, startCol = 17, startRow = 4, endCol = 20, endRow =31, useCachedValues=TRUE) d[is.na(d)] <- 0 c[is.na(c)] <- 0 a<-data.frame(cbind(c$Col1,0,d$X1+c$X1,c$X2+d$X2,c$X3+d$X3)) names(a)<-c("length", "0","1", "2", "3") a$year<-2015 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a name<-"alPelago2016" a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/PELAGO16_ANE_RESULTS_FINAL_sg.xlsx", sheet = "ALKs_FINAL", header = TRUE, startCol = 8, startRow = 6, endCol = 11, endRow = 28, useCachedValues=TRUE) a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") a$year<-2016 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2017 name<-"alPelago2017" #solo cadiz no hay Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 26, useCachedValues=TRUE) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) #a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") a$year<-2017 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2018 name<-"alPelago2018" #solo cadiz no hay Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 26, useCachedValues=TRUE) #a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") #names(a)<-c("length", "1", "2", "3") a$year<-2018 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2019 name<-"alPelago2019" #solo cadiz no hay Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 26, useCachedValues=TRUE) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") #names(a)<-c("length", "1", "2", "3") a$year<-2019 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2020 name<-"alPelago2020" #cadiz y Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 26, useCachedValues=TRUE) #a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") #names(a)<-c("length", "1", "2", "3") a$year<-2020 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2021 name<-"alPelago2021" #cadiz y Algarve a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", header = TRUE, startCol = 1, startRow = 5, endCol = 4, endRow = 29 , useCachedValues=TRUE) #a<-data.frame(cbind(a$Col1,0,a$X1,a$X2,a$X3)) a<-data.frame(cbind(a[,1],0,a$X1,a$X2,a$X3)) names(a)<-c("length", "0","1", "2", "3") #names(a)<-c("length", "1", "2", "3") a$year<-2021 a$month<-4-3 a$type<-"pelago_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a # b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/PELAGO/Pelago",i,".xls",sep=""), sheet = "Sheet1", # header = F, startCol = 10, # startRow = 5, endCol = 10, # endRow = 5, useCachedValues=TRUE) df4<-plyr::ldply(mybiglist,data.frame) #pelago_bio_pre<-cbind(df[7], df[10]*1000, df[8]) #pelago_bio_pre<-unique(na.omit(pelago_bio_pre)) #df3<-df3[-10] names(df4)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") pelago_comp_ald<-df4 ######################### #ecocadiz mybiglist <- list() B0<-data.frame(year=c(1,2),total=c(1,2)) fi<-1 for (i in c(2004,2006,2007,2009,2010)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 13, startRow = 5, endCol = 17, endRow = 37, useCachedValues=TRUE) bioage0<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = F, startCol = 5, startRow = 92, endCol = 5, endRow = 92, useCachedValues=TRUE) B0[fi,]<-c(i,bioage0[1,1]) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-7 a$type<-"ecocadiz_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a fi<-fi+1 } for (i in c(2013,2014)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 11, startRow = 5, endCol = 15, endRow = 35, useCachedValues=TRUE) bioage0<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = F, startCol = 5, startRow = 92, endCol = 5, endRow = 92, useCachedValues=TRUE) B0[fi,]<-c(i,bioage0[1,1]) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-7 a$type<-"ecocadiz_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a fi<-fi+1 } mybiglist$Ecocadiz2004$month<-6 mybiglist$Ecocadiz2006$month<-6 mybiglist$Ecocadiz2013$month<-8 B<-data.frame(year=c(1,2),total=c(1,2),month=c(1,2)) #ojo a tiene valores ponderados pero que no se van a usar, sin embargo B nos interesa y bioage0 es para restar la biomasa de la edad 0 de la biomasa total, me falta hacerlo en 2010,2013 y 2014 for (i in c(2015:2020)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 11, startRow = 7, endCol = 15, endRow = 38, useCachedValues=TRUE) b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = F, startCol = 9, startRow = 3, endCol = 9, endRow = 3, useCachedValues=T) bioage0<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = F, startCol = 5, startRow = 103, endCol = 5, endRow = 103, useCachedValues=T) names(a)<-c("length","0", "1", "2", "3") a$year<-i B[(i-2014),]$year<-i a$month<-7 B[(i-2014),]$total<-b[1,1]-bioage0[1,1] B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp" a[is.na(a)] <- 0 mybiglist[[name]] <- a } names(B)<-c("year","count","month") B$areacell<-"IXa" B$species<-'ANE' B$sampling_type<-"ecocadiz_bio" B$vessel<-'1.RSH' df<-plyr::ldply(mybiglist,data.frame) names(df)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") ecocadiz_comp<-df pelago_def<-melt(pelago_comp, id=c(".id","length","year", "month", "type" )) names(pelago_def)<-c(".id","length","year", "month", "sampling_type","age","count") ecocadiz_def<-melt(ecocadiz_comp, id=c(".id","length","year", "month", "type" )) names(ecocadiz_def)<-c(".id","length","year", "month", "sampling_type","age","count") pelago_def$areacell<-"IXa" pelago_def$species<-'ANE' pelago_def$count<-signif(pelago_def$count,2) pelago_def$count<-pelago_def$count*1000 #para que quede en individuos pelago.ldist<-pelago_def[,2:9] ecocadiz_def$areacell<-"IXa" ecocadiz_def$species<-'ANE' ecocadiz_def$count<-signif(ecocadiz_def$count,2) ecocadiz_def$count<-ecocadiz_def$count*1000 #para que quede en individuos ecocadiz.ldist<-ecocadiz_def[,2:9] ############################################################### #alk ecocadiz mybiglist <- list() for (i in c(2004,2006,2007,2009,2010,2013,2014,2015)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 1, startRow = 5, endCol = 5, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a } mybiglist$Ecocadiz2004$month<-6-3 mybiglist$Ecocadiz2006$month<-6-3 mybiglist$Ecocadiz2013$month<-8-3 #B<-data.frame(year=c(1,2),total=c(1,2),month=c(1,2)) i<-2016 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a i<-2017 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a ###################2018 i<-2018 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a #############3 i<-2019 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a #############3 i<-2020 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 1, startRow = 6, endCol = 5, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i # B[(i-2014),]$year<-i a$month<-7-3 # B[(i-2014),]$total<-b[1,1] #B[(i-2014),]$month<-7 a$type<-"ecocadiz_comp_ald" a[is.na(a)] <- 0 mybiglist[[name]] <- a df<-plyr::ldply(mybiglist,data.frame) names(df)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") ecocadiz_comp_ald<-df[,1:9] #names(B)<-c("year","count","month") #B$areacell<-"IXa" #B$species<-'ANE' #B$sampling_type<-"ecocadiz_bio" #B$vessel<-'1.RSH' pelago_def_ald<-melt(pelago_comp_ald, id=c(".id","length","year", "month", "type" )) names(pelago_def_ald)<-c(".id","length","year", "month", "sampling_type","age","count") ecocadiz_def_ald<-melt(ecocadiz_comp_ald, id=c(".id","length","year", "month", "type" )) names(ecocadiz_def_ald)<-c(".id","length","year", "month", "sampling_type","age","count") pelago_def_ald$areacell<-"IXa" pelago_def_ald$species<-'ANE' # pelago_def_ald$count<-signif(pelago_def_ald$count,2) # pelago_def_ald$count<-pelago_def$count*1000 #para que quede en individuos pelago.aldist<-pelago_def_ald[,2:9] ecocadiz_def_ald$areacell<-"IXa" ecocadiz_def_ald$species<-'ANE' #ecocadiz_def$count<-signif(ecocadiz_def$count,2) #ecocadiz_def$count<-ecocadiz_def$count*1000 #para que quede en individuos ecocadiz.aldist<-ecocadiz_def_ald[,2:9] ##################3 #length distribution ecocadiz mybiglistII <- list() for (i in c(2004,2006,2007,2009,2010)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 10, startRow = 5, endCol = 11, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a } for (i in c(2013:2015)){ name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = paste(i,"Esp",sep=""), header = TRUE, startCol = 8, startRow = 5, endCol = 9, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a } mybiglistII$Ecocadiz2004$month<-6-3 mybiglistII$Ecocadiz2006$month<-6-3 mybiglistII$Ecocadiz2013$month<-8-3 i<-2016 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 8, startRow = 6, endCol = 9, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a i<-2017 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 8, startRow = 6, endCol = 9, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a #############2018 i<-2018 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 8, startRow = 6, endCol = 9, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a #############2019 i<-2019 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 18, startRow = 6, endCol = 19, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a #############2020 i<-2020 name<-paste("Ecocadiz",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ/ALK_ECOCADIZ_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 18, startRow = 6, endCol = 19, endRow = 38, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-7-3 a$type<-"ecocadiz_comp_ld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a dfIII<-plyr::ldply(mybiglistII,data.frame) names(dfIII)<-c(".id", "length", "count", "year", "month", "sampling_type") ecocadiz_comp_ld<-dfIII[,1:6] #arsa_def<-melt(arsa_comp, id=c(".id","length","year", "month", "type" )) #names(arsa_def)<-c(".id","length","year", "month", "sampling_type","age","count") ecocadiz_comp_ld$areacell<-"IXa" ecocadiz_comp_ld$species<-'ANE' ecocadiz.ldistnoage<-ecocadiz_comp_ld[,2:8] ####################################################################### #length distribution of the surveys library(ggplot2) hi<-aggregate(count~length+year+sampling_type,ecocadiz.ldist,sum) ho<-aggregate(count~length+year+sampling_type,pelago.ldist,sum) hu<-rbind(hi,ho) #ecocadiz_ld_plot<-qplot(length, count, data = hu, color = year) ecocadiz_ld_plot<-ggplot(hu,aes(length, count)) ecocadiz_ld_plot+facet_grid(sampling_type~year)+geom_line() library(Rgadget) hu %>% ggplot(aes(length,count)) + geom_line()+ facet_wrap(~sampling_type+year,scale='free_y') hi<-aggregate(count~age+year+sampling_type,ecocadiz.ldist,sum) ho<-aggregate(count~age+year+sampling_type,pelago.ldist,sum) hu<-rbind(hi,ho) ecocadiz_ad_plot<-ggplot(hu, aes(age, count)) ecocadiz_ad_plot+facet_grid(sampling_type~year)+geom_point() library(Rgadget) hu<-subset(hu,age==0|age==1|age==2) hu %>% ggplot(aes(age,count)) + geom_point()+ facet_wrap(~sampling_type+year,scale='free_y') hu %>% ggplot(aes(year,count)) + geom_point() + facet_wrap(~age+sampling_type,scale='free',ncol=2) ####################################3 pelago_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = "BiomasaPort", header = F, startCol = 3, startRow = 35, endCol = 18, endRow = 35, useCachedValues=TRUE) pelago_bio<-t(pelago_bio) #ojo que va en kg hay que multiplicar por 1000 las tonnes pelago_biom<-data.frame(cbind(1999:2014,pelago_bio*1000,c(rep(3,4),2,rep(4,11)))) names(pelago_biom)<-c("year","count","month") names(pelago_bio_pre)<-c("year","count","month") pelago_biom<-rbind(pelago_biom%>%filter(year<2010),pelago_bio_pre) # #pelbiom2018<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL18.xlsx", sheet = "algarve+cadiz", # header = F, startCol = 3, # startRow = 44, endCol = 3, # endRow = 44, useCachedValues=TRUE) # pelago_biom<-add_row(pelago_biom,year=2018,count=pelbiom2018[1,1]*1000,month=4) # pelbiom2019<-readWorksheetFromFile("/home/marga/GADGET/DATOS/PELAGO/results-ANE-PEL19.xlsx", sheet = "algarve+cadiz", # header = F, startCol = 3, # startRow = 44, endCol = 3, # endRow = 44, useCachedValues=TRUE) # pelago_biom<-add_row(pelago_biom,year=2019,count=pelbiom2019[1,1]*1000,month=4) names(pelago_biom)<-c("year","count","month") pelago_biom$month<-pelago_biom$month-3 pelago_biom$areacell<-"IXa" pelago_biom$species<-'ANE' pelago_biom$sampling_type<-"pelago_bio" pelago_biom$vessel<-'1.RSH' pelago_biom<-na.omit(pelago_biom) pelago_biom$year<-pelago_biom$year*(pelago_biom$month>0)+(pelago_biom$year-1)*(pelago_biom$month<=0) pelago_biom$month<-pelago_biom$month*(pelago_biom$month>0)+11*(pelago_biom$month<=0) ecocadiz_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Acústica en 9aS_TallasyEdades.xls", sheet = "BiomasaEsp", header = F, startCol = 3, startRow = 35, endCol = 13, endRow = 35, useCachedValues=TRUE) ecocadiz_bio<-t(ecocadiz_bio) ecocadiz_biom<-data.frame(cbind(2004:2014,ecocadiz_bio*1000,c(rep(6,3),rep(7,6),8,7))) names(ecocadiz_biom)<-c("year","count","month") ecoca_age0<-left_join(ecocadiz_biom,B0)%>%mutate(count=count-total)%>%select(year,count,month) ecocadiz_biom<-ecoca_age0 ecocadiz_biom$areacell<-"IXa" ecocadiz_biom$species<-'ANE' ecocadiz_biom$sampling_type<-"ecocadiz_bio" ecocadiz_biom$vessel<-"1.RSH" ecocadiz_biom<-rbind(ecocadiz_biom,B) ecocadiz_biom<-na.omit(ecocadiz_biom) ecocadiz_biom$month<-ecocadiz_biom$month-3 g<-factor(sar$numorbio) #pelagosep<-split(pelago,g) #ecocadizsep<-split(ecocadiz,g) sarsep<-split(sar,g) # ecocadiz0813<-read.xls("/home/marga/GADGET/DATOS/ecocadiz_tableincolumn.xls") # order_ecocadiz<-matrix(as.numeric(as.character(ecocadiz0813$table_in_column)), nrow=25, ncol=9,byrow=T) # order_ecocadiz<-cbind(order_ecocadiz,rowSums(order_ecocadiz[,2:6])) # def_ecocadiz0813<-order_ecocadiz[,c(1,10)] # order_ecocadiz<-matrix(ecocadiz0813$table_in_column, nrow=25, ncol=9,byrow=T) # def_ecocadiz0813<-order_ecocadiz[,c(1,8)] # def_ecocadiz0813<-data.frame(def_ecocadiz0813) # names(def_ecocadiz0813)<-c("length","count") # def_ecocadiz0813$year<-2013 # def_ecocadiz0813$month<-8#5#Depending on how time is defined 8 is the real # def_ecocadiz0813$length<-seq(6,18,0.5) # mfdb_import_area(mdb, data.frame( # id = 1:3, # name = c("Tarifa","Algeciras","Morocco"), # ## area size is # size = 1) ) #area<-data.frame(id = c(1), name=c("IXaS_cadiz"), size =c(13000)) # mfdb_import_temperature(mdb, data.frame( # year = rep(c(1998, 1999), each = 12), # month = c(1:12, 1:12), # areacell = c(rep('45G01', times = 24)), # temperature = c(0.5, 1.2, 2.4, 3.5, 4.6, 5.7, 6.1, 7.4, 8.9, 10, 11, 12, 25:36))) mfdb_import_area(mdb_compi, data.frame(id = c(1), name=c("IXa"), size =c(13000))) #division=c('alg','cad'), #mfdb_import_division(mdb_comp, list(divA = c("IXa", "IXa_a"), divB = c('IXa'))) #temperature<-temperature[,-1] mfdb_import_temperature(mdb_compi, temperature) FOO<-list() FOOG<-list() i<-1988 filed<-paste("/home/marga/GADGET/DATOS/Juan_data/Datos_",i,"_anchovy.xlsx",sep = "") FOO[[(i-1987)]]<-readWorksheet(loadWorkbook(filed), sheet="Hoja1") FOOG[[(i-1987)]]<-readWorksheet(loadWorkbook(filed), sheet="Hoja2") FOO[[(i-1987)]]<-cbind(year=i, FOO[[(i-1987)]]) names( FOO[[(i-1987)]])<-c(names(FOO[[1]])) catch_at_length<-FOO[[1]] for(i in 1989:2013) { filed<-paste("/home/marga/GADGET/DATOS/Juan_data/Datos_",i,"_anchovy.xlsx",sep = "") FOO[[(i-1987)]]<-readWorksheet(loadWorkbook(filed), sheet="Hoja1") FOOG[[(i-1987)]]<-readWorksheet(loadWorkbook(filed), sheet="Hoja2") FOO[[(i-1987)]]<-cbind(year=i, FOO[[(i-1987)]]) names( FOO[[(i-1987)]])<-c(names(FOO[[1]])) catch_at_length<-rbind(catch_at_length,FOO[[i-1987]]) } catch_at_length[is.na(catch_at_length)]<-0 #names(catch_at_length)<-c("year","length","Col2","count","count","count","count","Total") #Primero organizar año, edad, número catchldist14<-readWorksheet(loadWorkbook("/home/marga/GADGET/DATOS/lengthdist1415.xls"), sheet="Sheet2") catchldist14<-matrix(as.numeric(as.character(catchldist14$X2014)), ncol=6,byrow=T) catchldist15<-readWorksheet(loadWorkbook("/home/marga/GADGET/DATOS/lengthdist1415.xls"), sheet="Sheet1") catchldist16<-readWorksheet(loadWorkbook("/home/marga/GADGET/DATOS/lengthdist1415.xls"), sheet="Sheet3") catchldist15<-matrix(as.numeric(as.character(catchldist15$X2015)), ncol=6,byrow=T) catchldist16<-matrix(as.numeric(as.character(catchldist16$X2016)), ncol=6,byrow=T) catchldist15<-cbind(catchldist15,year=2015) catchldist14<-cbind(catchldist14,year=2014) catchldist16<-cbind(catchldist16,year=2016) catchldist15<-data.frame(catchldist15) catchldist14<-data.frame(catchldist14) catchldist16<-data.frame(catchldist16) names(catch_at_length)<-c("year","length","Col2","Q1","Q2","Q3","Q4","Total") names(catchldist14)<-c("length","Q1","Q2","Q3","Q4","Total","year") names(catchldist15)<-c("length","Q1","Q2","Q3","Q4","Total","year") names(catchldist16)<-c("length","Q1","Q2","Q3","Q4","Total","year") catchldist14<-cbind(catchldist14[,7],catchldist14[,1],Col2=NA,catchldist14[,2:6]) catchldist15<-cbind(catchldist15[,7],catchldist15[,1],Col2=NA,catchldist15[,2:6]) catchldist16<-cbind(catchldist16[,7],catchldist16[,1],Col2=NA,catchldist16[,2:6]) names(catchldist14)<-c("year","length","Col2","Q1","Q2","Q3","Q4","Total") names(catchldist15)<-c("year","length","Col2","Q1","Q2","Q3","Q4","Total") names(catchldist16)<-c("year","length","Col2","Q1","Q2","Q3","Q4","Total") catch_at_length<-rbind(catch_at_length, catchldist14, catchldist15,catchldist16) fleet.seinepre<-plyr::ddply(catch_at_length,~year,summarise,Total_catches1=sum(Q1),Total_catches2=sum(Q2), Total_catches3=sum(Q3), Total_catches4=sum(Q4)) #suma sobre la longitud #Suma por años las longitudes #fleet.seinepre<-ddply(catch_at_length,~year,summarise,Total_catches1=sum(X1st.Q),Total_catches2=sum(X2nd.Q), Total_catches3=sum(X3rd.Q), Total_catches4=sum(X4th.Q)) # #catches20181cakg<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2018.xlsx", sheet="Hoja2", # header = F, startCol = 7, # startRow = 19, endCol = 7, # endRow = 19, useCachedValues=TRUE) # #catches20182cakgpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2018.xlsx", sheet="Hoja2", # header = F, startCol = 7, # startRow = 22, endCol = 7, # endRow = 22, useCachedValues=TRUE) # catches20181alton<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_2018.xlsx", sheet="Sheet1", # header = F, startCol = 5, # startRow = 7, endCol = 5, # endRow = 7, useCachedValues=TRUE) # # catches20182altonpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_vendas-diarias_2018.xls", sheet="Sheet1", # header = F, startCol = 13, # startRow = 14, endCol = 13, # endRow = 14, useCachedValues=TRUE) #bio2018*Num2017/bio2017 #Algarve en kg más 1209 A 23 de JUnio, Cadiz #ERROR #Numb20181=(catches20181cakg[1,1]+catches20181alton[1,1]*1000)*(CatchesAllFleetsPTSP$ALL[CatchesAllFleetsPTSP$YEAR==2017 & CatchesAllFleetsPTSP$SEASON==1])/((BIOMTOT$TONPT+BIOMTOT$TONSP)[BIOMTOT$YEAR==2017 & BIOMTOT$SEASON==1]) #Numb20182=(catches20182cakgpre[1,1])*(CatchesAllFleetsPTSP$NUMBERSP[CatchesAllFleetsPTSP$YEAR==2017 & CatchesAllFleetsPTSP$SEASON==2])/((BIOMTOT$TONSP)[BIOMTOT$YEAR==2017 & BIOMTOT$SEASON==2]) # catches20191cakg<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2019.xlsx", sheet="Hoja2", # header = F, startCol = 7, # startRow = 19, endCol = 7, # endRow = 19, useCachedValues=TRUE) # catches20192cakgpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2019.xlsx", sheet="Hoja2", # header = F, startCol = 7, # startRow = 22, endCol = 7, # endRow = 22, useCachedValues=TRUE) # catches20191alton<-0 #Catches algarve # catches20192altonpre<-0 # #bio2018*Num2017/bio2017 # #Algarve en kg más 1209 A 23 de JUnio, Cadiz # #ERROR # #Ojo que como no hay Algarve, mejor NUm 2018 de Cadiz y bio 2018 de cadiz # Numb20191=(catches20191cakg[1,1])*(CatchesAllFleetsPTSP$NUMBERSP[CatchesAllFleetsPTSP$YEAR==2018 & CatchesAllFleetsPTSP$SEASON==1])/((BIOMTOT$TONSP)[BIOMTOT$YEAR==2018 & BIOMTOT$SEASON==1]*1000) # # Numb20192=(catches20192cakgpre[1,1])*(CatchesAllFleetsPTSP$NUMBERSP[CatchesAllFleetsPTSP$YEAR==2018 & CatchesAllFleetsPTSP$SEASON==2])/((BIOMTOT$TONSP)[BIOMTOT$YEAR==2018 & BIOMTOT$SEASON==2]*1000) # catches20201cakg<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2020.xlsx", sheet="landings2020", # header = F, startCol = 10, # startRow = 18, endCol = 10, # endRow = 18, useCachedValues=TRUE) # catches20202cakgpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2020.xlsx", sheet="landings2020", # header = F, startCol = 10, # startRow = 19, endCol = 10, # endRow = 19, useCachedValues=TRUE) # catches20201alton<-1.6#readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_2018.xlsx", sheet="Sheet1", # #header = F, startCol = 5, # # startRow = 7, endCol = 5, # #endRow = 7, useCachedValues=TRUE) # # catches20202altonpre<-0.2#readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_vendas-diarias_2018.xls", sheet="Sheet1", # # header = F, startCol = 13, # # startRow = 14, endCol = 13, # # endRow = 14, useCachedValues=TRUE) catches20211cakg<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2021.xls", sheet="Sheet1", header = F, startCol = 7, startRow = 20, endCol = 7, endRow = 20, useCachedValues=TRUE) catches20212cakgpre<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Ane279a_9aS_Prov Landings Jan-June 2021.xls", sheet="Sheet1", header = F, startCol = 8, startRow = 20, endCol = 8, endRow = 20, useCachedValues=TRUE) #revisar bien estaba bien pero me lié porque esta es la del 2 catches20212cakgpre<-catches20212cakgpre+0.37*(catches20211cakg+catches20212cakgpre)#sacado de historico-capturas catches20211alton<-1.669 #ver ANE_biqueirão_01012021-30042021 en DATOS/Algarve #readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_2018.xlsx", sheet="Sheet1", #header = F, startCol = 5, # startRow = 7, endCol = 5, #endRow = 7, useCachedValues=TRUE) catches20212altonpre<-0.059#readWorksheetFromFile("/home/marga/GADGET/DATOS/Algarve/PNAB_biqueirao_vendas-diarias_2018.xls", sheet="Sheet1", # header = F, startCol = 13, # startRow = 14, endCol = 13, # endRow = 14, useCachedValues=TRUE) #bio2018*Num2017/bio2017 #Algarve en kg más 1209 A 23 de JUnio, Cadiz #ERROR #Numb20181=(catches20181cakg[1,1]+catches20181alton[1,1]*1000)*(CatchesAllFleetsPTSP$ALL[CatchesAllFleetsPTSP$YEAR==2017 & CatchesAllFleetsPTSP$SEASON==1])/((BIOMTOT$TONPT+BIOMTOT$TONSP)[BIOMTOT$YEAR==2017 & BIOMTOT$SEASON==1]) Numb20211=(catches20211cakg[1,1]+catches20211alton*1000)*(CatchesAllFleetsPTSP$ALL[CatchesAllFleetsPTSP$YEAR==2020 & CatchesAllFleetsPTSP$SEASON==1])/((BIOMTOT$TONPT+BIOMTOT$TONSP)[BIOMTOT$YEAR==2020 & BIOMTOT$SEASON==1]*1000) #Numb20182=(catches20182cakgpre[1,1])*(CatchesAllFleetsPTSP$NUMBERSP[CatchesAllFleetsPTSP$YEAR==2017 & CatchesAllFleetsPTSP$SEASON==2])/((BIOMTOT$TONSP)[BIOMTOT$YEAR==2017 & BIOMTOT$SEASON==2]) Numb20212=(catches20212cakgpre[1,1]+catches20212altonpre*1000)*(CatchesAllFleetsPTSP$ALL[CatchesAllFleetsPTSP$YEAR==2020 & CatchesAllFleetsPTSP$SEASON==2])/((BIOMTOT$TONPT+BIOMTOT$TONSP)[BIOMTOT$YEAR==2020 & BIOMTOT$SEASON==2]*1000) BIOMTOT$ALL<-BIOMTOT$TONPT+BIOMTOT$TONSP BIOMTOT<-add_row(BIOMTOT, YEAR=2021, SEASON=1:2, TONPT=c(catches20211alton,catches20212altonpre),TONSP=c(catches20211cakg[1,1]*0.001,catches20212cakgpre[1,1]*0.001),ALL=c(catches20211alton+catches20211cakg[1,1]*0.001,catches20212altonpre+catches20212cakgpre[1,1]*0.001)) #sacado de Ane279a_9aS_Prov Landings Jan-June 2019.xls porque para Portugal las capturas son cero BIOMTOTnewyear<-BIOMTOT %>% mutate(newyear=c(1988,1988,rep(1989:2020,each=4))) %>%group_by(newyear)%>%summarise(catonsur=sum(ALL)) #cuidado aquí el último año es el año anterior al assessment porque va en management calendar discardsbyquarter<-readWorksheetFromFile("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/Landings and Discards_9aN and 9a S_2014 on.xlsx", sheet="South_9aS", header = T, startCol = 9, startRow = 4, endCol = 14, endRow = 33, useCachedValues=TRUE) names(discardsbyquarter)<-c("year","1","2","3","4") DISCARDS<-melt(discardsbyquarter,id="year")%>%arrange(year)%>%mutate(newyear=c(1988,1988,rep(1989:2016,each=4),2017,2017))%>%group_by(newyear)%>%summarise(newyeardiscardsouth=sum(value))%>%mutate(year=newyear) DISCARDS<-add_row(DISCARDS, newyear=2018, newyeardiscardsouth=151, year=2018) #sacado de Landings and Discards_9aN and 9a S_2014 on_2018.xlsx DISCARDS<-add_row(DISCARDS, newyear=2019, newyeardiscardsouth=87, year=2019) #sacado de Landings and Discards_9aN and 9a S_2014 on_2019.xlsx #corregidas también las del 2018 DISCARDS$newyeardiscardsouth[DISCARDS$year==2017]<-186 #save(BIOMTOTnewyear,DISCARDS,file="/home/marga/GADGET/Assessment/Assessment2020_pandemia/BIOMTOTnewyeardiscards.Rdata") save(BIOMTOTnewyear,DISCARDS,file="BIOMTOTnewyeardiscards.Rdata") names(fleet.seinepre)<-c("year","count","count","count","count") fleet.seinepre[c(2,3,4,5)]<-fleet.seinepre[c(2,3,4,5)]*1000 fleet.seine<-rbind(fleet.seinepre[c(1,2)],fleet.seinepre[c(1,3)],fleet.seinepre[c(1,4)],fleet.seinepre[c(1,5)]) fleet.seine$month<-rep(seq(3,12,3),times=1, each=nrow(fleet.seinepre))#rep(c(12,3,6,9),times=1, each=26)#rep(seq(3,12,3),times=1, each=26) fleet.seine$areacell<-"IXa" fleet.seine$species<-'ANE' fleet.seine$vessel<-"2.CQT" fleet.seine$sampling_type<-"SEA" fleet.allptsp<-CatchesAllFleetsPTSP%>%mutate(month=SEASON*3)%>%ungroup()%>%select(year=YEAR,month,count=ALL) #fleet.allptsp<-add_row(fleet.allptsp,year=2018,month=c(3,6),count=c(Numb20181,Numb20182)) #fleet.allptsp<-add_row(fleet.allptsp,year=2019,month=c(3,6),count=c(Numb20191,Numb20192)) #fleet.allptsp<-add_row(fleet.allptsp,year=2020,month=c(3,6),count=c(Numb20201,Numb20202)) fleet.allptsp<-add_row(fleet.allptsp,year=2021,month=c(3,6),count=c(Numb20211,Numb20212)) fleet.allptsp$areacell<-"IXa" fleet.allptsp$species<-'ANE' fleet.allptsp$vessel<-"2.CDA" fleet.allptsp$sampling_type<-"SEAS2" #ARSA ALK mybiglistII <- list() for (i in c(1993,1997:2016)){ name<-paste("ARSA",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ARSA/ARSA_NOV_ALK_",i,".xlsx",sep=""), sheet = "4Q", header = TRUE, startCol = 11, startRow = 5, endCol = 15, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-11 a$type<-"arsa_comp" a[is.na(a)] <- 0 mybiglistII[[name]] <- a } dfII<-plyr::ldply(mybiglistII,data.frame) names(dfII)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") arsa_comp<-dfII arsa_def<-melt(arsa_comp, id=c(".id","length","year", "month", "type" )) names(arsa_def)<-c(".id","length","year", "month", "sampling_type","age","count") arsa_def$areacell<-"IXa" arsa_def$species<-'ANE' arsa.ldist<-arsa_def[,2:9] #arsa.ldist sin age mybiglistII <- list() for (i in c(1993,1997:2016)){ name<-paste("ARSALD",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ARSA/ARSA_NOV_ALK_",i,".xlsx",sep=""), sheet = "4Q", header = FALSE, startCol = 8, startRow = 6, endCol = 9, endRow = 37, useCachedValues=TRUE) names(a)<-c("length","count") a$year<-i a$month<-11 a$type<-"arsa_compld" a[is.na(a)] <- 0 mybiglistII[[name]] <- a } dfII<-plyr::ldply(mybiglistII,data.frame) names(dfII)<-c(".id", "length", "count", "year", "month", "sampling_type") arsa_compld<-dfII #arsa_def<-melt(arsa_comp, id=c(".id","length","year", "month", "type" )) #names(arsa_def)<-c(".id","length","year", "month", "sampling_type","age","count") arsa_compld$areacell<-"IXa" arsa_compld$species<-'ANE' arsa.ldistnoage<-arsa_compld[,2:8] arsa_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/ARSA/WKPELA2018_ane.27.9a_ES_SURVEY_ARSA-Novseries.xlsx", sheet = "Absolute Indices' estimates", header = F, startCol = 15, startRow = 2, endCol = 15, endRow = 22, useCachedValues=TRUE) arsa_bio<-arsa_bio*1000 #Para Kilograms ARSA_biom<-data.frame(cbind(year=c(1993,1997:2016), count=arsa_bio, month=11)) names(ARSA_biom)<-c("year", "count", "month") ARSA_biom$areacell<-"IXa" ARSA_biom$species<-'ANE' ARSA_biom$sampling_type<-"arsa_bio" ARSA_biom$vessel<-'1.RSH' #bocadeva bocadeva_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/DEPM/DEPM_SSB_estimates.xlsx", sheet="Sheet1", header = F, startCol = 1, startRow = 2, endCol = 2, endRow = 6, useCachedValues=TRUE) bocadeva_bio[,2]<-bocadeva_bio[,2]*1000 #Para Kilograms bocadeva_biom<-mutate(bocadeva_bio,month=c(6,6,7,7,7)) names(bocadeva_biom)<-c("year", "count", "month") bocadeva_biom$areacell<-"IXa" bocadeva_biom$species<-'ANE' bocadeva_biom$sampling_type<-"bocadeva_bio" bocadeva_biom$vessel<-'1.RSH' bocadeva_biom$month<-bocadeva_biom$month-3 #length-age mybiglistII <- list() mybiglistIV <- list() for (i in 1989:2013){ #i<-1989 for (j in 1:4){ # j<-1 name<-paste("ALKcatches",i,j,sep="") name1<-paste("LDcatches",i,j,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = T, startCol = 1, startRow = 5, endCol = 5, endRow = 37, useCachedValues=T) ldist_cat<- readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = T, startCol = 8, startRow = 5, endCol = 9, endRow = 37, useCachedValues=T) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-3*(j-1)+1 a$type<-"alkcatches_comp" names(ldist_cat)<-c("length","count") ldist_cat$year<-i ldist_cat$month<-3*(j-1)+1 a[is.na(a)] <- 0 mybiglistII[[name]] <- a ldist_cat[is.na(ldist_cat)]<-0 mybiglistIV[[name1]] <- ldist_cat }} #Cambio 2018 por 2019 for (i in 2014:2020){ for (j in 1:4){ name<-paste("ALKcatches",i,j,sep="") name1<-paste("LDcatches",i,j,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = T, startCol = 1, startRow = 5, endCol = 5, endRow = 39, useCachedValues=T) ldist_cat<- readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/Taledas_allfleets_1988_2016/BOQUERON_ALK_",i,".xls",sep=""), sheet = paste(j,"Q",sep=""), header = T, startCol = 8, startRow = 5, endCol = 9, endRow = 39, useCachedValues=T) names(ldist_cat)<-c("length","count") names(a)<-c("length","0", "1", "2", "3") ldist_cat$year<-i a$year<-i a$month<-3*(j-1)+1 ldist_cat$month<-3*(j-1)+1 a$type<-"alkcatches_comp" a[is.na(a)] <- 0 ldist_cat[is.na(ldist_cat)]<-0 mybiglistII[[name]] <- a mybiglistIV[[name1]] <- ldist_cat }} dfIII<-plyr::ldply(mybiglistII,data.frame) dfIV<-plyr::ldply(mybiglistIV,data.frame) names(dfIII)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type") ALKcatches_comp<-dfIII ALKcatches_def<-melt(ALKcatches_comp, id=c(".id","length","year", "month", "type" )) names(ALKcatches_def)<-c(".id","length","year", "month", "sampling_type","age","count") ALKcatches_def$areacell<-"IXa" ALKcatches_def$species<-'ANE' ALKcatches.ldist<-ALKcatches_def[,2:9] # ALKcatches.ldist$age<-as.numeric(as.character(ALKcatches.ldist$age)) names(catch_at_length)<-c("year","length","Col2","count","count","count","count","Total") seine.ldist <- rbind(catch_at_length[c(1,2,4)],catch_at_length[c(1:2,5)],catch_at_length[c(1:2,6)],catch_at_length[c(1:2,7)]) seine.ldist[3]<-seine.ldist[3]*1000 #rep(x, times = 1, length.out = NA, each = 1) #arreglar!!! seine.ldist$month<-rep(seq(3,12,3),times=1, each=nrow(catch_at_length))#rep(c(12,3,6,9),times=1, each=988)#rep(seq(3,12,3),times=1, each=988) seine.ldist<-rbind(seine.ldist%>%filter(year<2014),dfIV%>%filter(year>=2014)%>% select(year,length,count,month)) seine.ldist2<-dfIV%>%mutate(length2=length-0.25,month2=month+2)%>% select(year,length=length2,count2=count,month=month2) SEINE.ldist<-left_join(seine.ldist,seine.ldist2) #left_join(seine.ldist%>%filter(year>2014),seine.ldist2%>%filter(year>2014)) #seine.ldist%>%mutate(count2=seine.ldist2$count) # ggplot(SEINE.ldist,aes(length,count)) + # geom_line(aes(length,count2), col='gray') + # facet_wrap(~year+month) + theme_bw() + geom_line() + # # ldist20171<-seine.ldist%>%filter(year==2017,month<4) # ldist20172<-seine.ldist%>%filter(year==2017,month==4) # ldist20171p<-ldist20171%>%mutate(countperc=count/max(ldist20171$count)) # ldist20172p<-ldist20172%>%mutate(countperc=count/max(ldist20172$count)) seine.ldist$areacell<-"IXa" seine.ldist$species<-'ANE' seine.ldist$sampling_type<-'SEA' # ecocadiz0813.ldist<-def_ecocadiz0813 # ecocadiz0813.ldist$areacell<-"IXa" # ecocadiz0813.ldist$species<-'ANE' # ecocadiz0813.ldist$sampling_type<-'ecocadiz0813' catchespre<-subset(catch_age,AGE==c(0,1,2,3), select=c(1:6)) names(catchespre)<-c("year","age","count","count","count","count") catches.agedist<-rbind(catchespre[c(1,2,3)],catchespre[c(1,2,4)],catchespre[c(1,2,5)],catchespre[c(1,2,6)]) catches.agedist[3]<-catches.agedist[3]*1000 Nyears<-length(unique(catches.agedist$year)) #number of years catches.agedist$month<-rep(seq(3,12,3),times=1, each=Nyears*4)#rep(c(12,3,6,9),times=1, each=26*4)#rep(seq(3,12,3),times=1, each=26*4)#26 años, 4 edades catches.agedist$species<-'ANE' catches.agedist$areacell<-"IXa" catches.agedist$sampling_type<-'SEA' # comm.ldist <- merge(comm.ldist, # comm.stations[c('synis.id','ar','man','lat','lon')]) # comm.ldist$areacell <- d2sr(comm.ldist$lat,-comm.ldist$lon) # names(comm.ldist) <- c('sample.id','species','length','count','sex', # 'maturity','year','month','lat','lon','areacell') # comm.ldist$species <- 'COD' length_agepre<-subset(length_age,age==0|age==1|age==2|age==3) #quitando l????nea de totales names(length_agepre)<-c("year","age","length","length","length","length") length.at.age<-rbind(length_agepre[c(1,2,3)],length_agepre[c(1,2,4)],length_agepre[c(1,2,5)],length_agepre[c(1,2,6)]) length.at.age$month<-rep(seq(3,12,3),times=1, each=26*4)#rep(c(12,3,6,9),times=1, each=26*4)#changing the quarter length.at.age<-na.omit(length.at.age) length.at.age$sampling_type<-'SEA' length.at.age$species<-'ANE' length.at.age$areacell<-"IXa" weight_agepre<-subset(weight_age,age==0|age==1|age==2|age==3) #quitando l????nea de totales names(weight_agepre)<-c("year","age","weight","weight","weight","weight") weight.at.age<-rbind(weight_agepre[c(1,2,3)],weight_agepre[c(1,2,4)],weight_agepre[c(1,2,5)],weight_agepre[c(1,2,6)]) weight.at.age$month<-rep(seq(3,12,3),times=1, each=26*4)#rep(c(12,3,6,9),times=1, each=26*4)#changing the quarter weight.at.age<-na.omit(weight.at.age) weight.at.age$sampling_type<-'SEA' weight.at.age$species<-'ANE' weight.at.age$areacell<-"IXa" # weight_agepre<-subset(weight_age,AGE==0|AGE==1|AGE==2|AGE==3) #quitando l????nea de totales # names(weight_agepre)<-c("year","age","weight","weight","weight","weight") # weight.at.age<-rbind(weight_agepre[c(1,2,3)],weight_agepre[c(1,2,4)],weight_agepre[c(1,2,5)],weight_agepre[c(1,2,6)]) # weight.at.age$month<-rep(c(12,3,6,9),times=1, each=26*4) # #Remove rows with empty spaces # blank2na <- function(x){ # z <- gsub("\\s+", "", x) #make sure it's "" and not " " etc # x[z==""] <- NA # return(x) # } # # #apply that function # weight.at.age<-data.frame(sapply(weight.at.age, blank2na)) # # #weight.at.age<-na.omit(weight.at.age) # weight.at.age$sampling_type<-'SEA' # weight.at.age$species<-'ANE' # weight.at.age$areacell<-"IXa" # pelagonumber.survey<-pelagosep[[2]][c(1,2,5)] # #permutation of time # #pelagonumber.survey[2]<-(pelagonumber.survey[2]+9)%%12 # #pelagonumber.survey$month[pelagonumber.survey$month==0]<-12 # pelagonumber.survey[3]<-pelagonumber.survey[3]*1000000#units in millions # #pelagonumber.survey[is.na(pelagonumber.survey)]<-0 mejor omitir filas con NA que trtarlas como ceros # pelagonumber.survey<-na.omit(pelagonumber.survey) # names(pelagonumber.survey)<-c('year','month','count') # pelagonumber.survey$species<-'ANE' # pelagonumber.survey$areacell<-"IXa" # pelagonumber.survey$sampling_type = 'pelago' # #pelagonumber.survey$survey = 'pelago' # # ecocadiz.survey<-ecocadizsep[[2]][c(1,2,5)] # #ecocadiz.survey[2]<-(ecocadiz.survey[2]+9)%%12 # #ecocadiz.survey$month[ecocadiz.survey$month==0]<-12 # ecocadiz.survey[3]<-ecocadiz.survey[3]*1000000 # #ecocadiz.survey[is.na(ecocadiz.survey)]<-0 # ecocadiz.survey<-na.omit(ecocadiz.survey) # names(ecocadiz.survey)<-c('year','month','count') # ecocadiz.survey$species<-'ANE' # ecocadiz.survey$areacell<-"IXa" # ecocadiz.survey$sampling_type = 'ecocadiz' #1 para biomasa 2 para n?meros #sarnumber.survey<-sarsep[[2]][c(1,2,5)] #sarnumber.survey<-sarsep[[1]][c(1,2,5)] #BIOMASA sarnumber.survey_pre<-sarsep[[1]][c(1,2,4,5)] #BIOMASA #incluyendo la 4 incluyo Portugal, el último dato de 2012 es de la ecocadiz reclutas sarnumber.survey<-cbind(sarnumber.survey_pre[c(1,2)],rowSums(sarnumber.survey_pre[c(3,4)], na.rm=T)) #sarnumber.survey[2]<-(sarnumber.survey[2]+9)%%12 #sarnumber.survey$month[sarnumber.survey$month==0]<-12 sarnumber.survey<-na.omit(sarnumber.survey) #sarnumber.survey[is.na(sarnumber.survey)]<-0 #sarnumber.survey[3]<-sarnumber.survey[3]*1000000 #Para pasar de millones a individuos hay que multiplicar por 1000000 sarnumber.survey[3]<-sarnumber.survey[3]*1000 #Para pasar de toneladas a kilos hay que multiplicar por 1000 sarnumber.survey<-sarnumber.survey[-5,]#la fila 5 es de ecocadiz reclutas names(sarnumber.survey)<-c('year','month','count') sarnumber.survey$species<-'ANE' sarnumber.survey$areacell<-"IXa" sarnumber.survey$sampling_type<-'sar_bio' sarnumber.survey$vessel<-'1.RSH' ###################### #IXaGoC y IXaAlg en biomasa #IXaGoC seine_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/ane 27.9a_Catches by Subdivision.xlsx", sheet = "Quarterly Catches", header = F, startCol = 30, startRow = 7, endCol = 34, endRow = 34, useCachedValues=TRUE) names(seine_bio)<-c("year","count","count","count","count") catches.seine_bio_GoC<-rbind(seine_bio[c(1,2)], seine_bio[c(1,3)],seine_bio[c(1,4)],seine_bio[c(1,5)]) #catches.agedist<-rbind(catchespre[c(1,2,3)],catchespre[c(1,2,4)],catchespre[c(1,2,5)],catchespre[c(1,2,6)]) catches.seine_bio_GoC[2]<-catches.seine_bio_GoC[2]*1000 #tonnes to kg dim(seine_bio) catches.seine_bio_GoC$month<-rep(seq(3,12,3),times=1, each=dim(seine_bio)[1])#rep(c(12,3,6,9),times=1, each=26*4)#rep(seq(3,12,3),times=1, each=26*4)#26 años, 4 edades catches.seine_bio_GoC$species<-'ANE' catches.seine_bio_GoC$areacell<-"IXa" catches.seine_bio_GoC$sampling_type<-'SEA' catches.seine_bio_GoC$vessel<-"3.CQT" port_bio<-readWorksheetFromFile("/home/marga/GADGET/DATOS/ane 27.9a_Catches by Subdivision.xlsx", sheet = "Quarterly Catches", header = F, startCol = 23, startRow = 10, endCol = 27, endRow = 34, useCachedValues=TRUE) names(port_bio)<-c("year","count","count","count","count") catches.port_bio_Alg<-rbind(port_bio[c(1,2)], port_bio[c(1,3)],port_bio[c(1,4)],port_bio[c(1,5)]) #catches.agedist<-rbind(catchespre[c(1,2,3)],catchespre[c(1,2,4)],catchespre[c(1,2,5)],catchespre[c(1,2,6)]) catches.port_bio_Alg[2]<-catches.port_bio_Alg[2]*1000 #tonnes to kg dim(port_bio) catches.port_bio_Alg$month<-rep(seq(3,12,3),times=1, each=dim(port_bio)[1])#rep(c(12,3,6,9),times=1, each=26*4)#rep(seq(3,12,3),times=1, each=26*4)#26 años, 4 edades catches.port_bio_Alg$species<-'ANE' catches.port_bio_Alg$areacell<-"IXa" catches.port_bio_Alg$sampling_type<-'SEA' catches.port_bio_Alg$vessel<-"3.CDA" mybiglist <- list() for (i in c(2012,2014:2016)){ name<-paste("Ecocadiz_rec",i,sep="") a<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ-RECLUTAS/ALK_ECO-R_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = TRUE, startCol = 11, startRow = 7, endCol = 15, endRow = 36, useCachedValues=TRUE) b<-readWorksheetFromFile(paste("/home/marga/GADGET/DATOS/ECOCADIZ-RECLUTAS/ALK_ECO-R_",i,".xls",sep=""), sheet = "ALK_GENERAL_BOQUERON", header = F, startCol = 9, startRow = 3, endCol = 9, endRow = 3, useCachedValues=TRUE) names(a)<-c("length","0", "1", "2", "3") a$year<-i a$month<-10 a$type<-"ecocadiz_rec_comp" a$total<-b[1,1] a[is.na(a)] <- 0 mybiglist[[name]] <- a } mybiglist$Ecocadiz_rec2012$month<-11 df<-plyr::ldply(mybiglist,data.frame) names(df)<-c(".id", "length", "0", "1", "2", "3", "year", "month", "type","total") ecocadiz_rec_bio<-cbind(df[7],df[8],df[10]) names(ecocadiz_rec_bio)<-c("year","month","count") ecocadiz_rec_bio$areacell<-"IXa" ecocadiz_rec_bio$species<-'ANE' ecocadiz_rec_bio$sampling_type<-"ecocadizrec_bio" ecocadiz_rec_bio$vessel<-"1.RSH" ecocadiz_rec_bio<-na.omit(ecocadiz_rec_bio) ecocadiz_rec_bio<-unique(ecocadiz_rec_bio) ecocadiz_rec_comp<-df[-10] #remove total ecocadiz_rec_def<-melt(ecocadiz_rec_comp, id=c(".id","length","year", "month", "type" )) names(ecocadiz_rec_def)<-c(".id","length","year", "month", "sampling_type","age","count") ecocadiz_rec_def$areacell<-"IXa" ecocadiz_rec_def$species<-'ANE' ecocadiz_rec_def$count<-signif(ecocadiz_rec_def$count,2) ecocadiz_rec_def$count<-ecocadiz_rec_def$count*1000 #para que quede en individuos ecocadizrec.ldist<-ecocadiz_rec_def[,2:9] areadef<-data.frame(name="IXa") mfdb_import_area(mdb, areadef) vesseldef<-data.frame(name=c("2.CDA","1.RSH")) mfdb_import_vessel_taxonomy(mdb, vesseldef) library(tidyverse) library(gadget3) defaults <- list( #area = mfdb::mfdb_group("IXa" = 1), timestep = mfdb::mfdb_timestep_quarterly, year = 1988:2021, species="ANE") #areas <- structure( #seq_along(defaults$area), #names = names(defaults$area)) # Timekeeping for the model, i.e. how long we run for time_actions <- list( g3a_time(start_year = min(defaults$year), end_year = max(defaults$year), defaults$timestep), list()) ##### Configure stocks ######################################################### ## stocks anch <- g3_stock(c(species = 'anch'), lengthgroups = seq(3, 22, 0.5)) %>% #g3s_livesonareas(areas[c('IXa')]) %>% g3s_age(minage = 0, maxage = 3) # ling_mat <- # g3_stock(c(species = 'ling', 'mat'), lengthgroups = seq(20, 156, 4)) %>% # g3s_livesonareas(areas[c('1')]) %>% # g3s_age(minage = 5, maxage = 15) ## Maximum number of length groups a stock can group within a time step (maxlengthgroupgrowth) mlgg <- 5 ############ Configure fleets ################################################## ## Survey(s) seine <- g3_fleet('seine')# %>% #g3s_livesonareas(areas[c('IXa')]) ## Commercial ECO <- g3_fleet('ECO') #%>% #g3s_livesonareas(areas[c('IXa')]) PEL <- g3_fleet('PEL')# %>% #g3s_livesonareas(areas[c('IXa')]) # gil <- # g3_fleet('gil') %>% # g3s_livesonareas(areas[c('1')]) # # foreign <- # g3_fleet('foreign') %>% # g3s_livesonareas(areas[c('1')]) # mfdb_import_cs_taxonomy(mdb, 'index_type', data.frame(name = c( # "acoustic", # "effort", # NULL))) # mfdb_import_sampling_type(mdb, data.frame( # id = 1:5, # name = c('SEA', 'pelago','ecocadiz', 'ecocadiz0813','sar'), # description = c('Sea sampling', 'pelago','ecocadiz', 'ecocadiz0813','sar'))) # mfdb_import_sampling_type(mdb_compi, data.frame( id = 1:23, name = c('SEA', 'pelago','ecocadiz', 'ecocadiz0813','sar','pelago_comp','ecocadiz_comp','pelago_bio','ecocadiz_bio', 'sar_bio','ecocadizrec_bio','ecocadiz_rec_comp','bocadeva_bio','alkcatches_comp', 'arsa_comp','arsa_bio','SEAS','SEAS2','arsa_compld','ecocadiz_comp_ld','ecocadiz_comp_ald','pelago_comp_ld','pelago_comp_ald'), description = c('Sea sampling', 'pelago','ecocadiz', 'ecocadiz0813','sar','pelago_comp','ecocadiz_comp','pelago_bio','ecocadiz_bio','sar_bio','ecocadizrec_bio','ecocadiz_rec_comp','bocadeva_bio','alkcatches_comp', 'arsa_comp','arsa_bio','seas2','seas3','arsa_solo_length','ecocadiz_comp_ld','ecocadiz_comp_ald','pelago_comp_ld','pelago_comp_ald'))) mdb<-mdb_compi mfdb:::mfdb_send(mdb, paste0("ALTER TABLE sample ALTER COLUMN count TYPE NUMERIC")) mfdb_import_survey(mdb, data_source = 'example-anchovy-seine.ldist', seine.ldist) #mfdb_import_survey(mdb, # data_source = 'example-anchovy-ecocadiz0813.ldist', # ecocadiz0813.ldist) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-weight.at.age', # weight.at.age) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-length.at.age', # length.at.age) # # mfdb_import_survey(mdb, # data_source = 'example-anchovy-catch.at.agedist', # catches.agedist) mfdb_import_survey(mdb, data_source = 'example-anchovy-ALKcatch.at.dist', ALKcatches.ldist) #mfdb:::mfdb_send(mdb, paste0("ALTER TABLE ", mdb$schema, ".sample ALTER COLUMN count TYPE NUMERIC")) mfdb_import_survey(mdb, data_source = 'example-anchovy-allptsp.dist', fleet.allptsp) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-fleet-Port', # catches.port_bio_Alg) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-fleet-Esp', # catches.seine_bio_GoC) # mfdb_import_survey_index(mdb, data_source = 'acoustic_index1', data.frame( # index_type = 'acoustic', # year = '1998', # month = 1:12, # areacell = '45G01', # # -----Q1----- -----Q2----- -----Q3----- -----Q4----- # value = c(12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ))) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-acoustic-pelago.numbers', # pelagonumber.survey) mfdb_import_survey(mdb, data_source = 'example-anchovy-acoustic-pelago.biomass', pelago_biom) mfdb_import_survey(mdb, data_source = 'example-anchovy-acoustic-pelago.ldist', pelago.ldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-sar.numbers', sarnumber.survey) # mfdb_import_survey(mdb, # data_source = 'example-anchovy-ecocadiz.numbers', # ecocadiz.survey) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadiz.biomass', ecocadiz_biom) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadiz.ldist', ecocadiz.ldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadizrec.biomass', ecocadiz_rec_bio) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadizrec.ldist', ecocadizrec.ldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-arsa.ldist', arsa.ldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-arsa.ldistnoage', arsa.ldistnoage) mfdb_import_survey(mdb, data_source = 'example-anchovy-pelago.ldistnoage', pelago.ldistnoage) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadiz.ldistnoage', ecocadiz.ldistnoage) mfdb_import_survey(mdb, data_source = 'example-anchovy-ecocadiz.aldist', ecocadiz.aldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-pelago.aldist', pelago.aldist) mfdb_import_survey(mdb, data_source = 'example-anchovy-arsa.biomass', ARSA_biom) mfdb_import_survey(mdb, data_source = 'example-anchovy-bocadeva.biomass', bocadeva_biom) #Para Anchovy73: # bounds understocking # ldist.seine: # areaaggfile Aggfiles/catchdistribution.ldist.seine.area.agg # ageaggfile Aggfiles/catchdistribution.ldist.seine.age.agg # lenaggfile Aggfiles/catchdistribution.ldist.seine.len.agg # ldist.ecocadiz: # areaaggfile Aggfiles/catchdistribution.ldist.ecocadiz.area.agg # ageaggfile Aggfiles/catchdistribution.ldist.ecocadiz.age.agg # lenaggfile Aggfiles/catchdistribution.ldist.ecocadiz.len.agg # catches.agedist: # areaaggfile Aggfiles/catchdistribution.catches.agedist.area.agg # ageaggfile Aggfiles/catchdistribution.catches.agedist.age.agg # lenaggfile Aggfiles/catchdistribution.catches.agedist.len.agg # pelagonumber.survey: # areaaggfile Aggfiles/surveyindices.pelagonumber.survey.area.agg # lenaggfile Aggfiles/surveyindices.pelagonumber.survey.len.agg # ldist.pelago: # areaaggfile Aggfiles/catchdistribution.ldist.pelago.area.agg # ageaggfile Aggfiles/catchdistribution.ldist.pelago.age.agg # lenaggfile Aggfiles/catchdistribution.ldist.pelago.len.agg # ecocadiz.survey: # areaaggfile Aggfiles/surveyindices.ecocadiz.survey.area.agg # lenaggfile Aggfiles/surveyindices.ecocadiz.survey.len.agg # sarnumber.survey: # areaaggfile Aggfiles/surveyindices.sarnumber.survey.area.agg # lenaggfile Aggfiles/surveyindices.sarnumber.survey.len.agg # length.at.age: # areaaggfile Aggfiles/catchstatistics.length.at.age.area.agg # ageaggfile Aggfiles/catchstatistics.length.at.age.age.agg # # defaults <- list( # # timestep = mfdb_timestep_quarterly, # year = 1984:2012, # species = 'COD') ## Write out areafile and update mainfile with areafile location # gadget_dir_write(gd, gadget_areafile( # size = mfdb_area_size(mdb, defaults)[[1]], # temperature = mfdb_temperature(mdb, defaults)[[1]])) # ## Write a penalty component to the likelihood file # gadget_dir_write(gd, gadget_likelihood_component("penalty", # name = "bounds", # weight = "0.5", # data = data.frame( # switch = c("default"), # power = c(2), # lowerW=10000, # upperW=10000, # stringsAsFactors = FALSE))) # # gadget_dir_write(gd, gadget_likelihood_component("understocking", name ="understocking",weight = "1e8")) aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( sampling_type='SEA',length = mfdb_interval("len", seq(3,22,0.5))), defaults)) # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.seine", # weight = 1, # data = aggdata[[1]], # fleetnames = c("seine"), # stocknames = c("anch"))) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.seine.age.agg", list(list(all=c(0,3))))) save(aggdata, file="demo-anchovy/data/ldist.seine.Rdata") rm(aggdata) # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='arsa_compld', year=1993:2000,length = mfdb_interval("len", seq(3,20,1))), # defaults)) # aggdata[[1]]$year<-aggdata[[1]]$year+1 # aggdata[[1]]$step<-1 # # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.arsa.noage1", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ARSA"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.arsa.noage1.age.agg", list(list(all=c(0,3))))) # # rm(aggdata) # # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='arsa_compld', year=2001:2016,length = mfdb_interval("len", seq(3,22,0.5))), # defaults)) # aggdata[[1]]$year<-aggdata[[1]]$year+1 # aggdata[[1]]$step<-1 # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.arsa.noage2", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ARSA"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.arsa.noage2.age.agg", list(list(all=c(0,3))))) # #rm(aggdata) aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='pelago_comp_ld', length = mfdb_interval("len", seq(3,22,0.5))), defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.pelago.noage", # stocknames = c("anch"), # weight = 1, # fleetnames = c("PEL"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.pelago.noage.age.agg", list(list(all=c(0,3))))) save(aggdata, file="demo-anchovy/data/ldist.pelago.Rdata") rm(aggdata) aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='ecocadiz_comp_ld', length = mfdb_interval("len", seq(3,22,0.5))), defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.ecocadiz.noage", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ECO"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz.noage.age.agg", list(list(all=c(0,3))))) save(aggdata, file="demo-anchovy/data/ldist.ecocadiz.Rdata") rm(aggdata) aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='alkcatches_comp',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(0,1,2,3)), defaults)) # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.alkseine", # weight = 1, # data = aggdata[[1]], # fleetnames = c("seine"), # stocknames = c("anch"))) save(aggdata, file="demo-anchovy/data/aldist.seine.Rdata") rm(aggdata) aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='pelago_comp_ald',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(0,1,2,3)), defaults)) # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "aldist.pelago", # weight = 1, # data = aggdata[[1]], # fleetnames = c("PEL"), # stocknames = c("anch"))) save(aggdata, file="demo-anchovy/data/aldist.pelago.Rdata") rm(aggdata) #He quitado los bichos de la edad cero de la ecocadiz aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( sampling_type='ecocadiz_comp_ald',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(1,2,3)), defaults)) # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz_comp_ald',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(0,1,2,3)), # defaults)) # # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "aldist.ecocadiz", # weight = 1, # data = aggdata[[1]], # fleetnames = c("ECO"), # stocknames = c("anch"))) # save(aggdata, file="demo-anchovy/data/aldist.ecocadiz.Rdata") rm(aggdata) ############################################ # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz0813',length = mfdb_interval("len", seq(6,18,0.5))), # defaults)) # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz_comp',length = mfdb_interval("len", seq(3.5,19.5,0.5))), # defaults)) ############################## # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz_comp',length = mfdb_interval("len", seq(3.5,19.5,0.5)), age=c(0,1,2,3)), # defaults)) # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # sampling_type = 'IGFS', # species = 'COD', # length = mfdb_interval("len", seq(0, 150, by = 2))), # defaults)) # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.ecocadiz0813", # stocknames = c("anch"), # weight = 1, # fleetnames = c("seine"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # #ECO O SEINE De flota # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.ecocadiz", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ECO"), # data = aggdata[[1]] # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # # # # # rm(aggdata) #################################esto no # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list(sampling_type='SEA',age =c(0,1,2,3)),defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "catches.agedist", # weight = 1, # data = aggdata[[1]], # fleetnames = c("seine"), # stocknames = c("anch"))) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.catches.agedist.len.agg", list(list(all=c(3,22))))) # rm(aggdata) # # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seine="3.CQT"),sampling_type='SEA'), # defaults)) # # gadget_dir_write(gd, gadget_fleet_component( # 'numberfleet', # name = 'seine', # suitability = "\n anch function expsuitfuncl50 #constant #l50", # data=aggdata[[1]] # )) # # rm(aggdata) #################################33 #####################################3 ########################### #Para tener toda la base de datos de biomasa junta # aggdata<-mfdb_sample_count(mdb, c("sampling_type","vessel"), # c(list(sampling_type=c('pelago_bio','ecocadiz_bio','ecocadizrec_bio','sar_bio','SEA'),vessel=c("3.CQT","3.CDA","1.RSH")),defaults)) # # Biomass_summary<-aggdata[[1]] # Biomass_summary$number<-Biomass_summary$number/1000 #in tonnes # Biomass_summary$com[Biomass_summary$sampling_type=='SEA']<-1 # Biomass_summary$com[Biomass_summary$sampling_type!='SEA']<-0 # #ggplot(aggdata[[1]],aes(interaction(step,year),number,colour=sampling_type, group=1))+geom_line() # rm(aggdata) # ggplot(Biomass_summary,aes(interaction(step,year),number,colour=sampling_type, group=sampling_type))+geom_line() # #dev.new() # #dev.list() # Biomass_summary_by_year<-aggregate(number~year+area+sampling_type, Biomass_summary, sum) # as<-ggplot(Biomass_summary_by_year,aes(year,number,colour=sampling_type, group=sampling_type))+geom_line()+geom_point()+theme_bw()+theme(axis.text.x = element_text(angle = 90, hjust = 1)) # ggsave(as+scale_x_continuous(breaks=1989:2016,limits=c(1989,2016), labels=1989:2016)+ylab("Biomass (tonnes)")+scale_colour_discrete(name = "",breaks=unique(Biomass_summary_by_year$sampling_type),labels=c( "ecocadiz", "ecocadizrec", "pelago" , "sar" , "Catches IXa South" )),file="biomass_summary_year.pdf") # # asi<-ggplot(Biomass_summary_by_year,aes(year,number,colour=sampling_type, group=sampling_type))+geom_point()+theme_bw()+theme(axis.text.x = element_text(angle = 90, hjust = 1)) # asi # asii<-as+scale_x_continuous(breaks=1989:2016,limits=c(1989,2016), labels=1989:2016)+ylab("Biomass (tonnes)")+scale_colour_discrete(name = "",breaks=unique(Biomass_summary_by_year$sampling_type),labels=c( "ecocadiz", "ecocadizrec", "pelago" , "sar" , "Catches IXa South" ))+scale_y_log10(breaks=c(1000,10000,40000),labels=c(1000,10000,40000)) # ggsave(asii,file="biomass_summary_year_log.pdf") ##############################################################3 #Para Gadget con biomasa # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seine="3.CQT"),sampling_type='SEA'), # defaults)) # aggdata[[1]]$vessel<-"seine" # gadget_dir_write(gd, gadget_fleet_component( # 'totalfleet', # name = 'seine', # suitability = "\n anch function expsuitfuncl50 #constant #l50", # data=aggdata[[1]] # )) # # rm(aggdata) # # # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seineport="3.CDA"),sampling_type='SEA'), # defaults)) # aggdata[[1]]$vessel<-"PortBIO" # gadget_dir_write(gd, gadget_fleet_component( # 'totalfleet', # name = 'seineport', # suitability = "\n anch function expsuitfuncl50 #constant #l50", # data=aggdata[[1]] # )) # # rm(aggdata) #Para Gadget con número de capturas # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seine="2.CQT"),sampling_type='SEA'), # defaults)) # aggdata[[1]]$vessel<-"seine" # gadget_dir_write(gd, gadget_fleet_component( # 'numberfleet', # name = 'seine', # suitability = "\n anch function expsuitfuncl50 Initfiles/anchalpha.tvf Initfiles/anchL50.tvf" , # data=aggdata[[1]] # )) #########################################################3 #Para Gadget con all fleets # aggdata<-mfdb_sample_count(mdb,c("sampling_type"),c( # # list(sampling_type='SEAS2'), # defaults)) #aggdata<-mfdb_sample_count(mdb, c("sampling_type"), # c(list(sampling_type=c('SEAS2')),defaults)) aggdata<-mfdb_sample_count(mdb,NULL,c( list(vessel=mfdb_group(all="2.CDA")), defaults)) aggdata[[1]]$area<-NULL save(aggdata, file="demo-anchovy/data/seine_landings.Rdata") rm(aggdata) #aggdata[[1]]$vessel<-"seine" # gadget_dir_write(gd, gadget_fleet_component( # 'numberfleet', # name = 'seine', # suitability = "\n anch function expsuitfuncl50 Initfiles/anchalpha.tvf Initfiles/anchL50.tvf" , # data=aggdata[[1]] # )) # aggdata<-mfdb_sample_count(mdb,c("vessel"),c( # # list(vessel=mfdb_group(seine="3.CQT"),sampling_type='SEA'), # defaults)) # aggdata[[1]]$vessel<-"ARSA" # aggdata[[1]]$number<-1 # gadget_dir_write(gd, gadget_fleet_component( # 'totalfleet', # name = 'ARSA', # suitability = "\n anch function expsuitfuncl50 #constantarsa #l50arsa ", # data=aggdata[[1]] # )) aggdata<-mfdb_sample_count(mdb,NULL,c( list(vessel=mfdb_group(seine="1.RSH"),sampling_type='ecocadiz_bio'), defaults)) aggdata[[1]]$area<-NULL aggdata[[1]]$number<-1 save(aggdata, file="demo-anchovy/data/ECO_landings.Rdata") rm(aggdata) #gadget_dir_write(gd, gadget_fleet_component( # 'totalfleet', # name = 'ECO', # suitability = "\n anch function expsuitfuncl50 #constanteco #l50eco ", # data=aggdata[[1]] # )) aggdata<-mfdb_sample_count(mdb,NULL,c( list(vessel=mfdb_group(seine="1.RSH"),sampling_type='pelago_bio'), defaults)) aggdata[[1]]$area<-NULL aggdata[[1]]$number<-1 save(aggdata, file="demo-anchovy/data/PEL_landings.Rdata") rm(aggdata) #################################33 ecorec fleet # # aggdata[[1]]$vessel<-"ECOREC" # # gadget_dir_write(gd, gadget_fleet_component( # # 'totalfleet', # # name = 'ECOREC', # # suitability = "\n anch function expsuitfuncl50 #constantecorec #l50ecorec ", # # data=aggdata[[1]] # # )) # # rm(aggdata) ##############################################3 # agg <- mfdb_survey_index_mean(mdb, cols = c('data_source'), list( # index_type = 'acoustic', #aggdata<- mfdb_sample_count(mdb, c( 'length'), c( #defaults)) #agg <- mfdb_survey_index_mean(mdb, c("survey"), c(list(index_type='acoustic'),defaults)) ########################################################3 #anterior pelago number...ahora con biomasa aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='pelago_bio'),defaults)) # #gadget_dir_write(gd, gadget_likelihood_component("surveyindices", # name = "pelagonumber.survey", # weight = 1, # data = aggdata[[1]], # fittype = 'fixedslopeloglinearfit', # slope=1, # biomass=1, # stocknames = c("anch"), # surveynames='pelago')) save(aggdata, file="demo-anchovy/data/pelago.bioindex.Rdata") rm(aggdata) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.pelagonumber.survey.age.agg", list(list(all=c(0,3))))) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.pelagonumber.survey.len.agg", list(list(all=c(3,22))))) ######################################################33 # # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # # sampling_type='pelago_comp',length = mfdb_interval("len", seq(3.5,19.5,0.5))), # # defaults)) # # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='pelago_comp',age=c(0,1,2,3),length = mfdb_interval("len", seq(3.5,19.5,0.5))), # defaults)) # # # # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # # sampling_type = 'IGFS', # # species = 'COD', # # length = mfdb_interval("len", seq(0, 150, by = 2))), # # defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # # name = "ldist.ecocadiz0813", # # stocknames = c("anch"), # # weight = 1, # # fleetnames = c("seine"), # # data = aggdata[[1]] # # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # # # #ECO O SEINE De flota # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.pelago", # stocknames = c("anch"), # weight = 1, # fleetnames = c("PEL"), # data = aggdata[[1]] # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # #ecocadiz rec ##############################33 # rm(aggdata) # # # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='ecocadiz_rec_comp',age=c(0,1,2,3),length = mfdb_interval("len", seq(3.5,19.5,0.5))), # defaults)) # # # # aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( # # sampling_type = 'IGFS', # # species = 'COD', # # length = mfdb_interval("len", seq(0, 150, by = 2))), # # defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # # name = "ldist.ecocadiz0813", # # stocknames = c("anch"), # # weight = 1, # # fleetnames = c("seine"), # # data = aggdata[[1]] # # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # # # #ECO O SEINE De flota # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.ecocadizrec", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ECOREC"), # data = aggdata[[1]] # )) # # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) # # # # # rm(aggdata) ##################33este a?n no lo veo claro aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='ecocadiz_bio'),defaults)) gadget_dir_write(gd, gadget_likelihood_component("surveyindices", name = "ecocadiz.survey", weight = 1, data = aggdata[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("anch"), biomass=1, surveynames='ecocadiz')) rm(aggdata) gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.ecocadiz.survey.age.agg", list(list(all=c(0,3))))) gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.ecocadiz.survey.len.agg", list(list(all=c(3,22))))) ######################################################## # aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='ecocadizrec_bio'),defaults)) # # gadget_dir_write(gd, gadget_likelihood_component("surveyindices", # name = "ecocadizrec.survey", # weight = 1, # data = aggdata[[1]], # fittype = 'fixedslopeloglinearfit', # slope=1, # stocknames = c("anch"), # biomass=1, # surveynames='ecocadizrec')) # # rm(aggdata) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.ecocadizrec.survey.age.agg", list(list(all=c(0,3))))) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.ecocadizrec.survey.len.agg", list(list(all=c(3,22))))) # # # # aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='sar_bio'),defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("surveyindices", # name = "sarnumber.survey", # weight = 1, # data = aggdata[[1]], # fittype = 'fixedslopeloglinearfit', # slope=1, # biomass=1, # stocknames = c("anch"))) # # # rm(aggdata) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.sarnumber.survey.age.agg", list(list(all=c(0,3))))) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.sarnumber.survey.len.agg", list(list(all=c(3,22))))) #este ya no aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='bocadeva_bio'),defaults)) gadget_dir_write(gd, gadget_likelihood_component("surveyindices", name = "bocadeva.survey", weight = 1, data = aggdata[[1]], fittype = 'fixedslopeloglinearfit', slope=1, biomass=1, stocknames = c("anch"))) rm(aggdata) gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.bocadeva.survey.age.agg", list(list(all=c(0,3))))) gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.bocadeva.survey.len.agg", list(list(all=c(3,22))))) # aggdata<-mfdb_sample_count(mdb, c('length'), c(list(sampling_type='arsa_bio'),defaults)) # # # gadget_dir_write(gd, gadget_likelihood_component("surveyindices", # name = "arsa.survey", # weight = 1, # data = aggdata[[1]], # fittype = 'fixedslopeloglinearfit', # slope=1, # biomass=1, # stocknames = c("anch"))) # # # rm(aggdata) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.arsa.survey.age.agg", list(list(all=c(0,3))))) # gadget_dir_write(gd, gadget_file("Aggfiles/surveyindices.arsa.survey.len.agg", list(list(all=c(3,22))))) # aggdata <- mfdb_sample_count(mdb, c('age','length'), c(list( # sampling_type='arsa_comp',length = mfdb_interval("len", seq(3,20,1)), age=c(0,1,2,3)), # defaults)) # # # # # gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", # name = "ldist.arsa", # stocknames = c("anch"), # weight = 1, # fleetnames = c("ARSA"), # data = aggdata[[1]] # )) # gadget_dir_write(gd, gadget_file("Aggfiles/catchdistribution.ldist.ecocadiz0813.age.agg", list(list(all=c(0,3))))) #rm(aggdata) #este ya no aggdata<-mfdb_sample_meanlength(mdb, c('age'), c(list(sampling_type='SEA',age =c(0,1,2,3)), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchstatistics", name = "length.at.age", weight = 1, data = na.omit(aggdata[[1]]), fleetnames = c("seine"), stocknames = c("anch"))) ######## #Suitability for the survey alpha<-0.08 l<-seq(0,20,0.5) l50<-50 expsuitl50_pre<-1+exp(-alpha*(l-l50)) expsuitl50<-expsuitl50_pre^-1 plot(l,expsuitl50,xlim=c(0,20)) #Para fleet.seine.data sum(as.numeric(as.character(def_ecocadiz0813$count))) ##################################################################### ## Age IGFS aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list(sampling_type = 'IGFS', age = mfdb_step_interval('age',by=1,from=1,to=12), species='COD', length = mfdb_interval("len", seq(0, 150, by = 4))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "aldist.igfs", weight = 1, data = aggdata[[1]], fleetnames = c("igfs"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## Maturity @3 from IGFS aggdata <- mfdb_sample_count(mdb, c('maturity_stage', 'length'), append(defaults, list(sampling_type='IGFS', age=mfdb_group(age=3), length = mfdb_step_interval('len', by = 2, to = 100), maturity_stage = mfdb_group(codimm = 1, codmat = 2:5)))) gadget_dir_write(gd, gadget_likelihood_component("stockdistribution", name = "matp.igfs", weight = 1, data = aggdata[[1]], fleetnames = c("igfs"), stocknames = c("codimm", "codmat"))) ## Query length data to create AUT catchdistribution components aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( sampling_type = 'AUT', species = 'COD', length = mfdb_interval("len", seq(0, 150, by = 2))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "ldist.aut", weight = 1, data = aggdata[[1]], fleetnames = c("aut"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## Age AUT aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list(sampling_type = 'AUT', age = mfdb_step_interval('age',by=1,from=1,to=12), length = mfdb_interval("len", seq(0, 150, by = 4))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "aldist.aut", weight = 1, data = aggdata[[1]], fleetnames = c("aut"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## Query length data to create COMM catchdistribution components aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list( sampling_type = 'SEA', species = 'COD', length = mfdb_interval("len", seq(0, 150, by = 2))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "ldist.comm", weight = 1, data = aggdata[[1]], fleetnames = c("comm"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## Age AUT aggdata <- mfdb_sample_count(mdb, c('age', 'length'), c(list(sampling_type = 'SEA', age = mfdb_step_interval('age',by=1,from=1,to=12), length = mfdb_interval("len", seq(0, 150, by = 4))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("catchdistribution", name = "aldist.comm", weight = 1, data = aggdata[[1]], fleetnames = c("comm"), stocknames = c("codimm", "codmat"))) rm(aggdata) ## IGFS survey indices igfs.SI1 <- mfdb_sample_count(mdb, c('length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(4,17))), defaults)) igfs.SI2 <- mfdb_sample_count(mdb, c('length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(17,33))), defaults)) igfs.SI3 <- mfdb_sample_count(mdb, c( 'length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(33,140))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp1", weight = 1, data = igfs.SI1[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm"))) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp2", weight = 1, data = igfs.SI2[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm","codmat"))) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp3", weight = 1, data = igfs.SI3[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm","codmat"))) ## AUT survey indices igfs.SI1 <- mfdb_sample_count(mdb, c('length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(16,27))), defaults)) igfs.SI2 <- mfdb_sample_count(mdb, c('length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(27,39))), defaults)) igfs.SI3 <- mfdb_sample_count(mdb, c( 'length'), c(list( sampling_type = 'IGFS', length = mfdb_interval("len", c(39,140))), defaults)) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp1a", weight = 1, data = igfs.SI1[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm"))) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp2a", weight = 1, data = igfs.SI2[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm","codmat"))) gadget_dir_write(gd, gadget_likelihood_component("surveyindicies", name = "si.gp3a", weight = 1, data = igfs.SI3[[1]], fittype = 'fixedslopeloglinearfit', slope=1, stocknames = c("codimm","codmat"))) # For plots from Bjarki # ggplot(seine.ldist,aes(length,count))+geom_line() + facet_wrap(~year+month) # > ggplot(seine.ldist,aes(length,count))+geom_line() + facet_wrap(~year+month,scale='free_y') # > ggplot(subset(seine.ldist,year>2010),aes(length,count))+geom_line() + facet_wrap(~year+month,scale='free_y') #out$catchdist.fleets %>% filter(name=='catches.agedist') %>% group_by(year,step,age) %>% summarise(o.ml = sum(observed*avg.length)/sum(observed), p.ml=sum(predicted*avg.length)/sum(predicted)) %>% ggplot(aes(age,o.ml)) + geom_point() #Para ver la distribuci?n original #out$catchdist.fleets %>% filter(name=='ldist.pelago') %>% ggplot(aes(age,number.x))+geom_point()+facet_wrap(~year+step)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/child.R \docType{data} \name{child} \alias{child} \title{child} \format{ observations of 8 variables for 50 states } \source{ Meier, K.J. and Keisler, L.R. (1996). Public Administration as a Science of the Artificial: A Method for Prescription, Public Administration Review 56, 459-466. } \usage{ child } \description{ Child Support Collection Policies from 50 states from 1982-1991. See page 166 } \details{ The variables included in the dataset are: \itemize{ \item\code{SCCOLL} Change in Child Support collections \item\code{ACES} Chapters per Population \item\code{INSTABIL} Policy Instability \item\code{AAMBIG} Policy Ambiguity \item\code{CSTAFF} Change in Agency Staffing \item\code{ARD} State Divorce Rate \item\code{ASLACK} Organizational Slack \item\code{AEXPEND} State Level Expenditures } }
/man/child.Rd
no_license
cran/BaM
R
false
true
881
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/child.R \docType{data} \name{child} \alias{child} \title{child} \format{ observations of 8 variables for 50 states } \source{ Meier, K.J. and Keisler, L.R. (1996). Public Administration as a Science of the Artificial: A Method for Prescription, Public Administration Review 56, 459-466. } \usage{ child } \description{ Child Support Collection Policies from 50 states from 1982-1991. See page 166 } \details{ The variables included in the dataset are: \itemize{ \item\code{SCCOLL} Change in Child Support collections \item\code{ACES} Chapters per Population \item\code{INSTABIL} Policy Instability \item\code{AAMBIG} Policy Ambiguity \item\code{CSTAFF} Change in Agency Staffing \item\code{ARD} State Divorce Rate \item\code{ASLACK} Organizational Slack \item\code{AEXPEND} State Level Expenditures } }
dds_filter <- function(dds, low=10, high=1e4) { cnts <- counts(dds) rmin <- apply(cnts, 1, min) rmax <- apply(cnts, 1, max) if (is.null(low) || missing(low)) { low = max(10, quantile(cnts, probs=0.10)) } if (is.null(high) || missing(high)) { high <- min(1e4, quantile(cnts, probs=0.99)) } chosen <- rmin > 10 & rmax < high dds_chosen <- dds[chosen, ] return(dds_chosen) } # dds2 <- dds_filter(dds) # length(dds2) / length(dds) # 0.5644346
/scripts/dds_filter.R
no_license
fengwangjiang/ngs_banana_disease
R
false
false
551
r
dds_filter <- function(dds, low=10, high=1e4) { cnts <- counts(dds) rmin <- apply(cnts, 1, min) rmax <- apply(cnts, 1, max) if (is.null(low) || missing(low)) { low = max(10, quantile(cnts, probs=0.10)) } if (is.null(high) || missing(high)) { high <- min(1e4, quantile(cnts, probs=0.99)) } chosen <- rmin > 10 & rmax < high dds_chosen <- dds[chosen, ] return(dds_chosen) } # dds2 <- dds_filter(dds) # length(dds2) / length(dds) # 0.5644346
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kinesis_operations.R \name{kinesis_deregister_stream_consumer} \alias{kinesis_deregister_stream_consumer} \title{To deregister a consumer, provide its ARN} \usage{ kinesis_deregister_stream_consumer( StreamARN = NULL, ConsumerName = NULL, ConsumerARN = NULL ) } \arguments{ \item{StreamARN}{The ARN of the Kinesis data stream that the consumer is registered with. For more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html#arn-syntax-kinesis-streams}{Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces}.} \item{ConsumerName}{The name that you gave to the consumer.} \item{ConsumerARN}{The ARN returned by Kinesis Data Streams when you registered the consumer. If you don't know the ARN of the consumer that you want to deregister, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. The description of a consumer contains its ARN.} } \description{ To deregister a consumer, provide its ARN. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to deregister, you can use the \code{\link[=kinesis_list_stream_consumers]{list_stream_consumers}} operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. The description of a consumer contains its name and ARN. See \url{https://www.paws-r-sdk.com/docs/kinesis_deregister_stream_consumer/} for full documentation. } \keyword{internal}
/cran/paws.analytics/man/kinesis_deregister_stream_consumer.Rd
permissive
paws-r/paws
R
false
true
1,818
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kinesis_operations.R \name{kinesis_deregister_stream_consumer} \alias{kinesis_deregister_stream_consumer} \title{To deregister a consumer, provide its ARN} \usage{ kinesis_deregister_stream_consumer( StreamARN = NULL, ConsumerName = NULL, ConsumerARN = NULL ) } \arguments{ \item{StreamARN}{The ARN of the Kinesis data stream that the consumer is registered with. For more information, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html#arn-syntax-kinesis-streams}{Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces}.} \item{ConsumerName}{The name that you gave to the consumer.} \item{ConsumerARN}{The ARN returned by Kinesis Data Streams when you registered the consumer. If you don't know the ARN of the consumer that you want to deregister, you can use the ListStreamConsumers operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. The description of a consumer contains its ARN.} } \description{ To deregister a consumer, provide its ARN. Alternatively, you can provide the ARN of the data stream and the name you gave the consumer when you registered it. You may also provide all three parameters, as long as they don't conflict with each other. If you don't know the name or ARN of the consumer that you want to deregister, you can use the \code{\link[=kinesis_list_stream_consumers]{list_stream_consumers}} operation to get a list of the descriptions of all the consumers that are currently registered with a given data stream. The description of a consumer contains its name and ARN. See \url{https://www.paws-r-sdk.com/docs/kinesis_deregister_stream_consumer/} for full documentation. } \keyword{internal}
## Project 1 ## Set Up dataraw <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings = "?") dataraw$DateTime <- strptime(paste(dataraw$Date, dataraw$Time), format="%d/%m/%Y %H:%M:%S") seldata <- subset(dataraw, DateTime>="2007-02-01 00:00:00" & DateTime <="2007-02-02 23:59:59", select= c(DateTime, Global_active_power, Global_reactive_power, Voltage, Global_intensity, Sub_metering_1, Sub_metering_2, Sub_metering_3)) ## Plot 4 # Convert the DateTime field to a Posix type seldata$DateTime <- as.POSIXlt(seldata$DateTime) png(file = "plot4.png") ## Open PNG device omfrow <- par("mfrow") # make a copy of default parameters par(mfrow=c(2,2)) # Change parameters plot(seldata$DateTime, seldata$Global_active_power, xlab="", ylab="Global Active Power", type="n") lines(seldata$DateTime, seldata$Global_active_power,type="l") plot(seldata$DateTime, seldata$Voltage, xlab="datetime", ylab="Voltage", type="n") lines(seldata$DateTime, seldata$Voltage, type="l") plot(seldata$DateTime, seldata$Sub_metering_1, xlab="", ylab="Energy sub metering", type="n") with(seldata, { lines(DateTime, Sub_metering_1) lines(DateTime, Sub_metering_2, col="red") lines(DateTime, Sub_metering_3, col="blue") }) legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3" ), col=c("black","red","blue"), lwd=1, bty="n", cex=0.9) plot(seldata$DateTime, seldata$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="n") lines(seldata$DateTime, seldata$Global_reactive_power, type="l") par(mfrow=omfrow) # restored old parameters back dev.off() ## Close the PNG file device and save the file
/plot4.R
no_license
hmenag1/ExData_Plotting1
R
false
false
1,836
r
## Project 1 ## Set Up dataraw <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings = "?") dataraw$DateTime <- strptime(paste(dataraw$Date, dataraw$Time), format="%d/%m/%Y %H:%M:%S") seldata <- subset(dataraw, DateTime>="2007-02-01 00:00:00" & DateTime <="2007-02-02 23:59:59", select= c(DateTime, Global_active_power, Global_reactive_power, Voltage, Global_intensity, Sub_metering_1, Sub_metering_2, Sub_metering_3)) ## Plot 4 # Convert the DateTime field to a Posix type seldata$DateTime <- as.POSIXlt(seldata$DateTime) png(file = "plot4.png") ## Open PNG device omfrow <- par("mfrow") # make a copy of default parameters par(mfrow=c(2,2)) # Change parameters plot(seldata$DateTime, seldata$Global_active_power, xlab="", ylab="Global Active Power", type="n") lines(seldata$DateTime, seldata$Global_active_power,type="l") plot(seldata$DateTime, seldata$Voltage, xlab="datetime", ylab="Voltage", type="n") lines(seldata$DateTime, seldata$Voltage, type="l") plot(seldata$DateTime, seldata$Sub_metering_1, xlab="", ylab="Energy sub metering", type="n") with(seldata, { lines(DateTime, Sub_metering_1) lines(DateTime, Sub_metering_2, col="red") lines(DateTime, Sub_metering_3, col="blue") }) legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3" ), col=c("black","red","blue"), lwd=1, bty="n", cex=0.9) plot(seldata$DateTime, seldata$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="n") lines(seldata$DateTime, seldata$Global_reactive_power, type="l") par(mfrow=omfrow) # restored old parameters back dev.off() ## Close the PNG file device and save the file
# Scotland in Context Helper functions make_clp_lattice <- function(DTA, DTA_overall, CODES, ASPECT = "iso", YEAR_RANGE = c(1900, 2010), AGE_RANGE = c(0, 90), COL.REGIONS = colorRampPalette(rev(brewer.pal(6, "RdBu")))(64), ADD_CONTOURS = F, AT = seq(from= -1.2, to = 1.2, by=0.2) ){ tmp1 <- DTA %>% mutate(cmr = death_count/ population_count) %>% select(country, year, age, sex, cmr) tmp2 <- DTA_overall %>% select(year, age, sex, overall_cmr = cmr) tmp3 <- tmp1 %>% left_join(tmp2) dif_dta <- tmp3 %>% filter(!is.na(cmr) &!is.na(overall_cmr)) %>% mutate(dif_lg_cmr = log(cmr, base = 10) - log(overall_cmr, base = 10)) rm(tmp1, tmp2, tmp3) dif_dta_blurred <- dif_dta %>% smooth_var( dta=., group_vars= c("country", "sex"), smooth_var="dif_lg_cmr", smooth_par=1.4 ) lev_part <- dif_dta %>% filter( sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% mutate( country = mapvalues( country, from=CODES, to=names(CODES) ) ) %>% levelplot( dif_lg_cmr ~ year * age | country + sex, data=., region=T, ylab="Age in years", xlab="Year", at = AT, col.regions = COL.REGIONS, scales=list(alternating=3), main=NULL, aspect= ASPECT, xlim=YEAR_RANGE, par.settings=list(strip.background=list(col="lightgrey")) ) if (ADD_CONTOURS){ zero_part <- dif_dta_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% mutate( country = mapvalues( country, from=CODES, to=names(CODES) ) ) %>% contourplot( dif_lg_cmr ~ year + age | country + sex, data=., region=F, ylab="", xlab="", scales=list(NULL), at=0, lwd=1, labels = F, aspect = ASPECT, xlim = YEAR_RANGE, main = NULL ) output <- lev_part + zero_part } else {output <- lev_part} return(output) } make_single_clp <- function(DTA, DTA_overall, SELECTION, ASPECT = "iso", AGE_RANGE = c(0, 90), YEAR_RANGE = c(1900, 2010), ADD_CONTOURS = F, AT = seq(from= -1.2, to = 1.2, by=0.2) ){ tmp1 <- DTA %>% filter(country == SELECTION) %>% mutate(cmr = death_count/ population_count) %>% mutate(country = "specific") %>% select(country, year, age, sex, cmr) tmp2 <- DTA_overall %>% mutate(country = "overall" ) %>% select(country, year, age, sex, cmr) tmp3 <- bind_rows(tmp1, tmp2) rm(tmp1, tmp2) dif_to_overall <- tmp3 %>% mutate(lg_cmr = log(cmr, base=10)) %>% select(-cmr) %>% spread(key=country, value=lg_cmr) %>% filter(!is.na(specific)) %>% mutate(dif = specific - overall) %>% select(year, age, sex, dif) lev_part <- dif_to_overall %>% filter( sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% levelplot( dif ~ year * age | sex, data=., region=T, xlim=YEAR_RANGE, ylab="Age in years", xlab="Year", aspect = ASPECT, at = AT, col.regions = colorRampPalette(rev(brewer.pal(6, "RdBu")))(64), scales=list(alternating=3), main=NULL, par.settings=list(strip.background=list(col="lightgrey")) ) if (ADD_CONTOURS){ dif_blurred <- dif_to_overall %>% smooth_var( dta=., group_vars= "sex", smooth_var="dif", smooth_par=1.4 ) zero_part <- dif_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% contourplot( dif ~ year + age | sex, data=., region=F, ylab="", xlab="", scales=list(NULL), at=0, lwd=1, labels=F, aspect = ASPECT, xlim=YEAR_RANGE, main=NULL ) quarter_part <- dif_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% contourplot( dif ~ year + age | sex, data = . , region = F, ylab = "", xlab = "", scales = list (NULL), at = c(-0.25, 0.25), lwd = 1.5, labels = F, aspect = ASPECT, xlim = YEAR_RANGE, main = NULL ) half_part <- dif_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% contourplot( dif ~ year + age | sex, data = . , region = F, ylab="", xlab="", scales = list (NULL), at =c(-0.5, 0.5), lwd=2.0, labels =F, aspect = ASPECT, xlim=YEAR_RANGE, main=NULL ) output <- lev_part + zero_part + quarter_part + half_part } else {output <- lev_part} return(output) } make_scp_lattice <- function(DTA, DTA_smoothed, CODES, ASPECT="iso", AGE_RANGE = c(0, 90), YEAR_RANGE = c(1900, 2010), COL.REGIONS=colorRampPalette(brewer.pal(6, "Reds"))(200) ){ shade_part <- DTA %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% mutate( cmr = death_count / population_count, lg_cmr = log(cmr, base=10), country = mapvalues( country, from = CODES, to = names(CODES) ) ) %>% levelplot( lg_cmr ~ year * age | country + sex, data=., region=T, par.strip.text=list(cex=1.4, fontface="bold"), ylab=list(label="Age in years", cex=1.4), xlab=list(label="Year", cex=1.4), cex=1.4, col.regions=COL.REGIONS, main=NULL, xlim=YEAR_RANGE, aspect=ASPECT, scales=list( x=list(cex=1.4), y=list(cex=1.4), alternating=3 ), par.settings=list(strip.background=list(col="lightgrey")) ) contour_part <- DTA_smoothed %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% mutate( country = mapvalues( country, from = CODES, to = names(CODES) ) ) %>% contourplot( lg_cmr ~ year + age | country + sex, data=., region=F, ylab="", xlab="", scales=list(NULL), cuts=25, xlim=YEAR_RANGE, aspect=ASPECT, col="black", labels=list( cex=1.2 ), main=NULL ) output <- shade_part + contour_part return(output) } make_scp_overall <- function(DTA_unsmoothed, DTA_smoothed, ASPECT="iso", AGE_RANGE = c(0, 90), YEAR_RANGE = c(1900, 2010), COL.REGIONS = colorRampPalette(brewer.pal(6, "Reds"))(200) ){ shade_part <- DTA_unsmoothed %>% filter( year >= 1900 & year <= 2010 & age <= 90 & sex != "total" ) %>% mutate( cmr = death_count / population_count, lg_cmr = log(cmr, base=10) ) %>% levelplot( lg_cmr ~ year * age | sex, data=., region=T, par.strip.text=list(cex=1.4, fontface="bold"), ylab=list(label="Age in years", cex=1.4), xlab=list(label="Year", cex=1.4), cex=1.4, col.regions=COL.REGIONS, main=NULL, aspect=ASPECT, xlim=YEAR_RANGE, scales=list( x=list(cex=1.4), y=list(cex=1.4), alternating=3 ), par.settings=list(strip.background=list(col="lightgrey")) ) contour_part <- DTA_smoothed %>% filter( year >= 1900 & year <= 2008 & age <= 90 ) %>% contourplot( lg_cmr ~ year + age | sex, data=., region=F, ylab="", xlab="", aspect=ASPECT, xlim=YEAR_RANGE, scales=list(NULL), cuts=25, col="black", labels=list( cex=1.2 ), main=NULL ) output <- shade_part + contour_part } make_scp <- function(DTA_unsmoothed, DTA_smoothed, COUNTRY, ASPECT= "iso", AGE_RANGE = c(0, 90), YEAR_RANGE = c(1900, 2010), COL.REGIONS = colorRampPalette(brewer.pal(6, "Reds"))(200) ){ shade_part <- DTA_unsmoothed %>% filter( country == COUNTRY & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & sex != "total" ) %>% mutate( cmr = death_count / population_count, lg_cmr = log(cmr, base=10) ) %>% levelplot( lg_cmr ~ year * age | sex, data=., region=T, par.strip.text=list(cex=1.4, fontface="bold"), ylab=list(label="Age in years", cex=1.4), xlab=list(label="Year", cex=1.4), cex=1.4, aspect=ASPECT, col.regions=COL.REGIONS, main=NULL, xlim=YEAR_RANGE, scales=list( x=list(cex=1.4), y=list(cex=1.4), alternating=3 ), par.settings=list(strip.background=list(col="lightgrey")) ) contour_part <- DTA_smoothed %>% filter( country == COUNTRY & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & sex != "total" ) %>% contourplot( lg_cmr ~ year + age | sex, data=., region=F, ylab="", xlab="", xlim=YEAR_RANGE, scales=list(NULL), cuts=25, aspect=ASPECT, col="black", labels=list( cex=1.2 ), main=NULL ) output <- shade_part + contour_part } grouper <- function(DTA){ output <- DTA %>% group_by(age, sex, year) %>% summarise( death_count = sum(death_count), population_count = sum(population_count) ) %>% mutate( cmr = death_count / population_count, lg_cmr = log(cmr, base=10) ) return(output) } make_two_country_clp <- function(DTA, GROUP_A, GROUP_B, YEAR_RANGE = c(1900, 2010), AGE_RANGE = c(0, 90), ASPECT = "iso", SMOOTH_PAR= 1.4, COL.REGIONS = colorRampPalette(rev(brewer.pal(6, "RdBu")))(64), ADD_CONTOURS = F, AT = seq(from= -1.2, to = 1.2, by=0.2) ){ tmp1 <- DTA %>% filter(country == GROUP_A) %>% mutate(cmr = death_count/ population_count) %>% mutate(country = "group_a") %>% select(country, year, age, sex, cmr) tmp2 <- DTA %>% filter(country == GROUP_B) %>% mutate(cmr = death_count/ population_count) %>% mutate(country = "group_b") %>% select(country, year, age, sex, cmr) tmp3 <- bind_rows(tmp1, tmp2) rm(tmp1, tmp2) dif_b_to_a <- tmp3 %>% mutate(lg_cmr = log(cmr, base=10)) %>% select(-cmr) %>% spread(key=country, value=lg_cmr) %>% filter(!is.na(group_a) & !is.na(group_b)) %>% mutate(dif = group_b - group_a) %>% select(year, age, sex, dif) lev_part <- dif_b_to_a %>% filter( sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% levelplot( dif ~ year * age | sex, data=., region=T, xlim=YEAR_RANGE, ylab="Age in years", xlab="Year", aspect=ASPECT, at = AT, col.regions = COL.REGIONS, scales=list(alternating=3), main=NULL, par.settings=list(strip.background=list(col="lightgrey")) ) if(ADD_CONTOURS){ dif_blurred <- dif_b_to_a %>% smooth_var( dta=., group_vars= "sex", smooth_var="dif", smooth_par=SMOOTH_PAR ) zero_part <- dif_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% contourplot( dif ~ year + age | sex, data=., region=F, ylab="", xlab="", scales=list(NULL), at=0, lwd=1, labels=F, xlim=YEAR_RANGE, aspect=ASPECT, main=NULL ) output <- lev_part + zero_part } else {output <- lev_part} return(output) }
/scripts/scotland_in_context__helper_functions.R
no_license
rickwahs/Lexis_Surface_Difference_Maps
R
false
false
13,558
r
# Scotland in Context Helper functions make_clp_lattice <- function(DTA, DTA_overall, CODES, ASPECT = "iso", YEAR_RANGE = c(1900, 2010), AGE_RANGE = c(0, 90), COL.REGIONS = colorRampPalette(rev(brewer.pal(6, "RdBu")))(64), ADD_CONTOURS = F, AT = seq(from= -1.2, to = 1.2, by=0.2) ){ tmp1 <- DTA %>% mutate(cmr = death_count/ population_count) %>% select(country, year, age, sex, cmr) tmp2 <- DTA_overall %>% select(year, age, sex, overall_cmr = cmr) tmp3 <- tmp1 %>% left_join(tmp2) dif_dta <- tmp3 %>% filter(!is.na(cmr) &!is.na(overall_cmr)) %>% mutate(dif_lg_cmr = log(cmr, base = 10) - log(overall_cmr, base = 10)) rm(tmp1, tmp2, tmp3) dif_dta_blurred <- dif_dta %>% smooth_var( dta=., group_vars= c("country", "sex"), smooth_var="dif_lg_cmr", smooth_par=1.4 ) lev_part <- dif_dta %>% filter( sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% mutate( country = mapvalues( country, from=CODES, to=names(CODES) ) ) %>% levelplot( dif_lg_cmr ~ year * age | country + sex, data=., region=T, ylab="Age in years", xlab="Year", at = AT, col.regions = COL.REGIONS, scales=list(alternating=3), main=NULL, aspect= ASPECT, xlim=YEAR_RANGE, par.settings=list(strip.background=list(col="lightgrey")) ) if (ADD_CONTOURS){ zero_part <- dif_dta_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% mutate( country = mapvalues( country, from=CODES, to=names(CODES) ) ) %>% contourplot( dif_lg_cmr ~ year + age | country + sex, data=., region=F, ylab="", xlab="", scales=list(NULL), at=0, lwd=1, labels = F, aspect = ASPECT, xlim = YEAR_RANGE, main = NULL ) output <- lev_part + zero_part } else {output <- lev_part} return(output) } make_single_clp <- function(DTA, DTA_overall, SELECTION, ASPECT = "iso", AGE_RANGE = c(0, 90), YEAR_RANGE = c(1900, 2010), ADD_CONTOURS = F, AT = seq(from= -1.2, to = 1.2, by=0.2) ){ tmp1 <- DTA %>% filter(country == SELECTION) %>% mutate(cmr = death_count/ population_count) %>% mutate(country = "specific") %>% select(country, year, age, sex, cmr) tmp2 <- DTA_overall %>% mutate(country = "overall" ) %>% select(country, year, age, sex, cmr) tmp3 <- bind_rows(tmp1, tmp2) rm(tmp1, tmp2) dif_to_overall <- tmp3 %>% mutate(lg_cmr = log(cmr, base=10)) %>% select(-cmr) %>% spread(key=country, value=lg_cmr) %>% filter(!is.na(specific)) %>% mutate(dif = specific - overall) %>% select(year, age, sex, dif) lev_part <- dif_to_overall %>% filter( sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% levelplot( dif ~ year * age | sex, data=., region=T, xlim=YEAR_RANGE, ylab="Age in years", xlab="Year", aspect = ASPECT, at = AT, col.regions = colorRampPalette(rev(brewer.pal(6, "RdBu")))(64), scales=list(alternating=3), main=NULL, par.settings=list(strip.background=list(col="lightgrey")) ) if (ADD_CONTOURS){ dif_blurred <- dif_to_overall %>% smooth_var( dta=., group_vars= "sex", smooth_var="dif", smooth_par=1.4 ) zero_part <- dif_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% contourplot( dif ~ year + age | sex, data=., region=F, ylab="", xlab="", scales=list(NULL), at=0, lwd=1, labels=F, aspect = ASPECT, xlim=YEAR_RANGE, main=NULL ) quarter_part <- dif_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% contourplot( dif ~ year + age | sex, data = . , region = F, ylab = "", xlab = "", scales = list (NULL), at = c(-0.25, 0.25), lwd = 1.5, labels = F, aspect = ASPECT, xlim = YEAR_RANGE, main = NULL ) half_part <- dif_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% contourplot( dif ~ year + age | sex, data = . , region = F, ylab="", xlab="", scales = list (NULL), at =c(-0.5, 0.5), lwd=2.0, labels =F, aspect = ASPECT, xlim=YEAR_RANGE, main=NULL ) output <- lev_part + zero_part + quarter_part + half_part } else {output <- lev_part} return(output) } make_scp_lattice <- function(DTA, DTA_smoothed, CODES, ASPECT="iso", AGE_RANGE = c(0, 90), YEAR_RANGE = c(1900, 2010), COL.REGIONS=colorRampPalette(brewer.pal(6, "Reds"))(200) ){ shade_part <- DTA %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% mutate( cmr = death_count / population_count, lg_cmr = log(cmr, base=10), country = mapvalues( country, from = CODES, to = names(CODES) ) ) %>% levelplot( lg_cmr ~ year * age | country + sex, data=., region=T, par.strip.text=list(cex=1.4, fontface="bold"), ylab=list(label="Age in years", cex=1.4), xlab=list(label="Year", cex=1.4), cex=1.4, col.regions=COL.REGIONS, main=NULL, xlim=YEAR_RANGE, aspect=ASPECT, scales=list( x=list(cex=1.4), y=list(cex=1.4), alternating=3 ), par.settings=list(strip.background=list(col="lightgrey")) ) contour_part <- DTA_smoothed %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% mutate( country = mapvalues( country, from = CODES, to = names(CODES) ) ) %>% contourplot( lg_cmr ~ year + age | country + sex, data=., region=F, ylab="", xlab="", scales=list(NULL), cuts=25, xlim=YEAR_RANGE, aspect=ASPECT, col="black", labels=list( cex=1.2 ), main=NULL ) output <- shade_part + contour_part return(output) } make_scp_overall <- function(DTA_unsmoothed, DTA_smoothed, ASPECT="iso", AGE_RANGE = c(0, 90), YEAR_RANGE = c(1900, 2010), COL.REGIONS = colorRampPalette(brewer.pal(6, "Reds"))(200) ){ shade_part <- DTA_unsmoothed %>% filter( year >= 1900 & year <= 2010 & age <= 90 & sex != "total" ) %>% mutate( cmr = death_count / population_count, lg_cmr = log(cmr, base=10) ) %>% levelplot( lg_cmr ~ year * age | sex, data=., region=T, par.strip.text=list(cex=1.4, fontface="bold"), ylab=list(label="Age in years", cex=1.4), xlab=list(label="Year", cex=1.4), cex=1.4, col.regions=COL.REGIONS, main=NULL, aspect=ASPECT, xlim=YEAR_RANGE, scales=list( x=list(cex=1.4), y=list(cex=1.4), alternating=3 ), par.settings=list(strip.background=list(col="lightgrey")) ) contour_part <- DTA_smoothed %>% filter( year >= 1900 & year <= 2008 & age <= 90 ) %>% contourplot( lg_cmr ~ year + age | sex, data=., region=F, ylab="", xlab="", aspect=ASPECT, xlim=YEAR_RANGE, scales=list(NULL), cuts=25, col="black", labels=list( cex=1.2 ), main=NULL ) output <- shade_part + contour_part } make_scp <- function(DTA_unsmoothed, DTA_smoothed, COUNTRY, ASPECT= "iso", AGE_RANGE = c(0, 90), YEAR_RANGE = c(1900, 2010), COL.REGIONS = colorRampPalette(brewer.pal(6, "Reds"))(200) ){ shade_part <- DTA_unsmoothed %>% filter( country == COUNTRY & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & sex != "total" ) %>% mutate( cmr = death_count / population_count, lg_cmr = log(cmr, base=10) ) %>% levelplot( lg_cmr ~ year * age | sex, data=., region=T, par.strip.text=list(cex=1.4, fontface="bold"), ylab=list(label="Age in years", cex=1.4), xlab=list(label="Year", cex=1.4), cex=1.4, aspect=ASPECT, col.regions=COL.REGIONS, main=NULL, xlim=YEAR_RANGE, scales=list( x=list(cex=1.4), y=list(cex=1.4), alternating=3 ), par.settings=list(strip.background=list(col="lightgrey")) ) contour_part <- DTA_smoothed %>% filter( country == COUNTRY & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & sex != "total" ) %>% contourplot( lg_cmr ~ year + age | sex, data=., region=F, ylab="", xlab="", xlim=YEAR_RANGE, scales=list(NULL), cuts=25, aspect=ASPECT, col="black", labels=list( cex=1.2 ), main=NULL ) output <- shade_part + contour_part } grouper <- function(DTA){ output <- DTA %>% group_by(age, sex, year) %>% summarise( death_count = sum(death_count), population_count = sum(population_count) ) %>% mutate( cmr = death_count / population_count, lg_cmr = log(cmr, base=10) ) return(output) } make_two_country_clp <- function(DTA, GROUP_A, GROUP_B, YEAR_RANGE = c(1900, 2010), AGE_RANGE = c(0, 90), ASPECT = "iso", SMOOTH_PAR= 1.4, COL.REGIONS = colorRampPalette(rev(brewer.pal(6, "RdBu")))(64), ADD_CONTOURS = F, AT = seq(from= -1.2, to = 1.2, by=0.2) ){ tmp1 <- DTA %>% filter(country == GROUP_A) %>% mutate(cmr = death_count/ population_count) %>% mutate(country = "group_a") %>% select(country, year, age, sex, cmr) tmp2 <- DTA %>% filter(country == GROUP_B) %>% mutate(cmr = death_count/ population_count) %>% mutate(country = "group_b") %>% select(country, year, age, sex, cmr) tmp3 <- bind_rows(tmp1, tmp2) rm(tmp1, tmp2) dif_b_to_a <- tmp3 %>% mutate(lg_cmr = log(cmr, base=10)) %>% select(-cmr) %>% spread(key=country, value=lg_cmr) %>% filter(!is.na(group_a) & !is.na(group_b)) %>% mutate(dif = group_b - group_a) %>% select(year, age, sex, dif) lev_part <- dif_b_to_a %>% filter( sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% levelplot( dif ~ year * age | sex, data=., region=T, xlim=YEAR_RANGE, ylab="Age in years", xlab="Year", aspect=ASPECT, at = AT, col.regions = COL.REGIONS, scales=list(alternating=3), main=NULL, par.settings=list(strip.background=list(col="lightgrey")) ) if(ADD_CONTOURS){ dif_blurred <- dif_b_to_a %>% smooth_var( dta=., group_vars= "sex", smooth_var="dif", smooth_par=SMOOTH_PAR ) zero_part <- dif_blurred %>% filter(sex!="total" & age >= AGE_RANGE[1] & age <= AGE_RANGE[2] & year >= YEAR_RANGE[1] & year <= YEAR_RANGE[2] ) %>% contourplot( dif ~ year + age | sex, data=., region=F, ylab="", xlab="", scales=list(NULL), at=0, lwd=1, labels=F, xlim=YEAR_RANGE, aspect=ASPECT, main=NULL ) output <- lev_part + zero_part } else {output <- lev_part} return(output) }
# Python script to load data ---------------------------------------------- # Here it is provided as a reference # import pandas as pd # import wbdata as wd # # # define a period of time # start_year = 2013 # end_year = 2017 # # # list of countries under inflation targeting monetary policy regime # countries = ['AM', 'AU', 'AT', 'BE', 'BG', 'BR', 'CA', 'CH', 'CL', 'CO', 'CY', 'CZ', 'DE', 'DK', 'XC', 'ES', 'EE', 'FI', 'FR', 'GB', 'GR', 'HU', 'IN', 'IE', 'IS', 'IL', 'IT', 'JM', 'JP', 'KR', 'LK', 'LT', 'LU', 'LV', 'MA', 'MD', 'MX', 'MT', 'MY', 'NL', 'NO', 'NZ', 'PK', 'PE', 'PH', 'PL', 'PT', 'RO', 'RU', 'SG', 'SK', 'SI', 'SE', 'TH', 'TR', 'US', 'ZA'] # # # set dictionary for wbdata # inflation = {'FP.CPI.TOTL.ZG': 'CPI_annual', 'NY.GDP.MKTP.KD.ZG': 'GDP_annual'} # # # download wb data # df = wd.get_dataframe(inflation, country = countries, data_date = (pd.datetime(start_year, 1, 1), pd.datetime(end_year, 1, 1))) # # # print(df.head()) # df.to_csv('WB_data.csv', index = False) # Libraries --------------------------------------------------------------- library('data.table') library('ggplot2') library('grid') library('gridExtra') library('forcats') # Prepare data ------------------------------------------------------------ dt_cpi <- fread('data/WB_data.csv') setkey(dt_cpi, "country") dt_cpi_s <- dt_cpi dt_cpi_s[country == 'Czech Republic', country := "Czech\nRepublic"] dt_cpi_s[country == 'Korea, Rep.', country := "Republic\nof Korea"] dt_cpi_s[country == 'New Zealand', country := "New\nZealand"] dt_cpi_s[country == 'Russian Federation', country := "Russian\nFederation"] dt_cpi_s[country == 'South Africa', country := "South\nAfrica"] dt_cpi_s[country == 'Slovak Republic', country := "Slovak\nRepublic"] dt_cpi_s[country == 'United Kingdom', country := "United\nKingdom"] dt_cpi_s[country == 'United States', country := "United\nStates"] dt_cpi_melt <- melt.data.table(dt_cpi_s, id.vars = c("country", "date")) # Data for additional labels inside the plot ------------------------------ # Color encoding on the first small multiple instead of a legend dt_cpi_melt[, lbl := ""] dt_cpi_melt[(country %in% c("Armenia", "Jamaica")) & (date == 2017) & (variable == "CPI_annual"), lbl := "CPI"] dt_cpi_melt[(country %in% c("Armenia", "Jamaica")) & (date == 2017) & (variable == "GDP_annual"), lbl := "GDP"] # Position of new labels dt_cpi_melt[, y_pos := c(0)] dt_cpi_melt[(country == "Armenia") & (date == 2017) & (variable == "CPI_annual"), y_pos := -9] dt_cpi_melt[(country == "Armenia") & (date == 2017) & (variable == "GDP_annual"), y_pos := 8] dt_cpi_melt[(country == "Jamaica") & (date == 2017) & (variable == "CPI_annual"), y_pos := 6] dt_cpi_melt[(country == "Jamaica") & (date == 2017) & (variable == "GDP_annual"), y_pos := -8.5] # Outlier labels dt_cpi_melt[, y_pos_out := c(0)] dt_cpi_melt[(country == "Russian\nFederation") & (date == 2015) & (variable == "CPI_annual"), y_pos_out := 15.9] dt_cpi_melt[(country == "Russian\nFederation") & (date == 2015) & (variable == "GDP_annual"), y_pos_out := 2.5] dt_cpi_melt[(country == "Ireland") & (date == 2015) & (variable == "GDP_annual"), y_pos_out := 20] # Base plot --------------------------------------------------------------- p <- ggplot() + # Pseudo grid line for finer control geom_hline(yintercept = -10, color = "#BFBFBF", size = 0.2) + geom_hline(yintercept = -5, color = "#BFBFBF", size = 0.1) + geom_hline(yintercept = 0, color = "#707070", size = 0.3) + geom_hline(yintercept = 5, color = "#BFBFBF", size = 0.1) + geom_hline(yintercept = 10, color = "#BFBFBF", size = 0.2) + geom_hline(yintercept = 15, color = "#BFBFBF", size = 0.1) + geom_hline(yintercept = 20, color = "#BFBFBF", size = 0.2) + # Data geom_line(data = dt_cpi_melt, aes(x = date, y = value, color = variable)) + geom_point(data = dt_cpi_melt, aes(x = date, y = value, color = variable), size = 0.5, shape = 20, show.legend = FALSE) + geom_text(data = dt_cpi_melt, aes(x = date, y = y_pos, color = variable, label = lbl), show.legend = FALSE, family = "Inter UI", size = 3.5, hjust = 0.9, vjust = 0) + # Outlier labels geom_text(data = dt_cpi_melt[(country == "Russian\nFederation") & (date == 2015), ], aes(x = date, y = y_pos_out, color = variable, label = round(value, 2)), show.legend = FALSE, family = "Inter UI", size = 3.5, hjust = 0.5, vjust = 0) + geom_text(data = dt_cpi_melt[(country == "Ireland") & (date == 2015) & (variable == "GDP_annual"), ], aes(x = 2015.2, y = y_pos_out, color = variable, label = round(value, 2)), show.legend = FALSE, family = "Inter UI", hjust = 0, vjust = 0) + # Facets setup facet_wrap(~country, ncol = 6) + # Scales scale_x_continuous(labels = c("2013", "14", "15", "16", "17")) + scale_y_continuous(sec.axis = dup_axis(name = "")) + scale_color_manual(label = c("CPI", "GDP"), values = c("#aa8c3b", "#003831")) + # Labs labs(x = "", y = "Annual\nGDP growth rate\nand CPI, %", # subtitle = "Period: 2013-2017", title = "Inflation (CPI) and GDP Growth Rate\nin Inflation Targeting Countries") + # Theme theme_minimal() + theme(panel.spacing = unit(1.05, "lines"), panel.grid.minor = element_blank(), panel.grid.major = element_blank(), plot.margin = margin(t = 15, b = 50, l = 0, r = 30, unit = "pt"), # Axes axis.text.x = element_text(family = "Roboto Condensed", size = 9, color = "#3B3B3B"), axis.text.y = element_text(family = "Roboto Condensed", size = 10, color = "#3B3B3B"), axis.title.y = element_text(family = "Inter UI", size = 12, color = "#3B3B3B", hjust = 1, angle = 0, # debug = TRUE, margin = margin(r = 10, unit = "pt")), # Legend legend.position = "", # Titles plot.title = element_text(family = "Inter UI Black", size = 18, color = "#3B3B3B"), strip.text = element_text(family = "Roboto Condensed", size = 12.5, color = "#3B3B3B") ) # Helper plot to extract the x axis with 6 labels ------------------------- clist <- c("Armenia", "Australia", "Austria", "Belgium", "Brazil", "Bulgaria") p6 <- ggplot() + # Data geom_line(data = dt_cpi_melt[country %in% clist], aes(x = date, y = value, color = variable)) + geom_point(data = dt_cpi_melt[country %in% clist], aes(x = date, y = value, color = variable), size = 0.5, shape = 20, show.legend = FALSE) + facet_wrap(~country, ncol = 6) + # Scales scale_x_continuous(labels = c("2013", "14", "15", "16", "17")) + scale_y_continuous(sec.axis = dup_axis(name = "")) + scale_color_manual(label = c("CPI", "GDP"), values = c("#aa8c3b", "#003831")) + # Theme theme_minimal() + theme(panel.spacing = unit(1.05, "lines"), panel.grid.minor = element_blank(), panel.grid.major = element_blank(), plot.margin = margin(t = 15, b = 10, l = 0, r = 30, unit = "pt"), # Axes axis.text.x = element_text(family = "Roboto Condensed", size = 9, color = "#3B3B3B"), axis.text.y = element_text(family = "Roboto Condensed", size = 10, color = "#3B3B3B"), axis.title.y = element_text(family = "Inter UI", size = 12, color = "#3B3B3B", hjust = 1, angle = 0, # debug = TRUE, margin = margin(r = 10, unit = "pt")), legend.position = "", # Titles strip.text = element_text(family = "Roboto Condensed", size = 12.5, color = "#3B3B3B") ) # Convert plots to grobs to add xaxis on the center of facets ------------- g <- ggplotGrob(p) p6 <- ggplotGrob(p6) # Locate the data panels panels <- grep("panel", g$layout$name) top <- unique(g$layout$t[panels]) # X axis object xax <- p6[9, ] # Add it to the first row first_row <- gtable:::rbind_gtable(g[seq.int(min(top)), ], xax, "first" ) # Add it to row starting from Jamaica jam_row <- gtable:::rbind_gtable(g[27:29, ], xax, "first" ) # Merge it with the rest of the data between 1st row # and row starting from Jamaica first_to_jam <- gtable:::rbind_gtable(first_row, g[(min(top) + 1):26,], "first") # Merge row starting from Jamaica with the rest of the rows jam_to_end <- gtable:::rbind_gtable(jam_row, g[32:(max(top) + 3),], "first") # Collect everything in one plot with caption and y axis title ------------ all <- arrangeGrob( # Combine y axis title and the rest of the plot arrangeGrob(textGrob("Annual\nGDP growth rate\nand CPI, %", gp = gpar(fontsize = 12, family = "Inter UI", col = "#3B3B3B"), x = unit(1, "npc"), y = unit(0.93, "npc"), just = c("left", "top")), gtable:::rbind_gtable(first_to_jam, jam_to_end, "first"), ncol = 2, widths = c(0.01, 0.99)), # Add annotation textGrob("Data: Worldbank::FP.CPI.TOTL.ZG & Worldbank::NY.GDP.MKTP.KD.ZG Indicators.", gp = gpar(fontsize = 10, family = "Inter UI", col = "#3B3B3B"), x = unit(0.18, "npc"), just = c("left", "bottom")), nrow = 2, heights = c(0.99, 0.01) ) # Save result ------------------------------------------------------------- ggsave("wb_gdp_cpi.svg", all, width = 12, height = 18, scale = 0.8) ggsave("wb_gdp_cpi.png", all, width = 12, height = 18, scale = 0.8, dpi = 600, type = "cairo-png")
/script.R
no_license
zhitkovk/medium_articles
R
false
false
10,142
r
# Python script to load data ---------------------------------------------- # Here it is provided as a reference # import pandas as pd # import wbdata as wd # # # define a period of time # start_year = 2013 # end_year = 2017 # # # list of countries under inflation targeting monetary policy regime # countries = ['AM', 'AU', 'AT', 'BE', 'BG', 'BR', 'CA', 'CH', 'CL', 'CO', 'CY', 'CZ', 'DE', 'DK', 'XC', 'ES', 'EE', 'FI', 'FR', 'GB', 'GR', 'HU', 'IN', 'IE', 'IS', 'IL', 'IT', 'JM', 'JP', 'KR', 'LK', 'LT', 'LU', 'LV', 'MA', 'MD', 'MX', 'MT', 'MY', 'NL', 'NO', 'NZ', 'PK', 'PE', 'PH', 'PL', 'PT', 'RO', 'RU', 'SG', 'SK', 'SI', 'SE', 'TH', 'TR', 'US', 'ZA'] # # # set dictionary for wbdata # inflation = {'FP.CPI.TOTL.ZG': 'CPI_annual', 'NY.GDP.MKTP.KD.ZG': 'GDP_annual'} # # # download wb data # df = wd.get_dataframe(inflation, country = countries, data_date = (pd.datetime(start_year, 1, 1), pd.datetime(end_year, 1, 1))) # # # print(df.head()) # df.to_csv('WB_data.csv', index = False) # Libraries --------------------------------------------------------------- library('data.table') library('ggplot2') library('grid') library('gridExtra') library('forcats') # Prepare data ------------------------------------------------------------ dt_cpi <- fread('data/WB_data.csv') setkey(dt_cpi, "country") dt_cpi_s <- dt_cpi dt_cpi_s[country == 'Czech Republic', country := "Czech\nRepublic"] dt_cpi_s[country == 'Korea, Rep.', country := "Republic\nof Korea"] dt_cpi_s[country == 'New Zealand', country := "New\nZealand"] dt_cpi_s[country == 'Russian Federation', country := "Russian\nFederation"] dt_cpi_s[country == 'South Africa', country := "South\nAfrica"] dt_cpi_s[country == 'Slovak Republic', country := "Slovak\nRepublic"] dt_cpi_s[country == 'United Kingdom', country := "United\nKingdom"] dt_cpi_s[country == 'United States', country := "United\nStates"] dt_cpi_melt <- melt.data.table(dt_cpi_s, id.vars = c("country", "date")) # Data for additional labels inside the plot ------------------------------ # Color encoding on the first small multiple instead of a legend dt_cpi_melt[, lbl := ""] dt_cpi_melt[(country %in% c("Armenia", "Jamaica")) & (date == 2017) & (variable == "CPI_annual"), lbl := "CPI"] dt_cpi_melt[(country %in% c("Armenia", "Jamaica")) & (date == 2017) & (variable == "GDP_annual"), lbl := "GDP"] # Position of new labels dt_cpi_melt[, y_pos := c(0)] dt_cpi_melt[(country == "Armenia") & (date == 2017) & (variable == "CPI_annual"), y_pos := -9] dt_cpi_melt[(country == "Armenia") & (date == 2017) & (variable == "GDP_annual"), y_pos := 8] dt_cpi_melt[(country == "Jamaica") & (date == 2017) & (variable == "CPI_annual"), y_pos := 6] dt_cpi_melt[(country == "Jamaica") & (date == 2017) & (variable == "GDP_annual"), y_pos := -8.5] # Outlier labels dt_cpi_melt[, y_pos_out := c(0)] dt_cpi_melt[(country == "Russian\nFederation") & (date == 2015) & (variable == "CPI_annual"), y_pos_out := 15.9] dt_cpi_melt[(country == "Russian\nFederation") & (date == 2015) & (variable == "GDP_annual"), y_pos_out := 2.5] dt_cpi_melt[(country == "Ireland") & (date == 2015) & (variable == "GDP_annual"), y_pos_out := 20] # Base plot --------------------------------------------------------------- p <- ggplot() + # Pseudo grid line for finer control geom_hline(yintercept = -10, color = "#BFBFBF", size = 0.2) + geom_hline(yintercept = -5, color = "#BFBFBF", size = 0.1) + geom_hline(yintercept = 0, color = "#707070", size = 0.3) + geom_hline(yintercept = 5, color = "#BFBFBF", size = 0.1) + geom_hline(yintercept = 10, color = "#BFBFBF", size = 0.2) + geom_hline(yintercept = 15, color = "#BFBFBF", size = 0.1) + geom_hline(yintercept = 20, color = "#BFBFBF", size = 0.2) + # Data geom_line(data = dt_cpi_melt, aes(x = date, y = value, color = variable)) + geom_point(data = dt_cpi_melt, aes(x = date, y = value, color = variable), size = 0.5, shape = 20, show.legend = FALSE) + geom_text(data = dt_cpi_melt, aes(x = date, y = y_pos, color = variable, label = lbl), show.legend = FALSE, family = "Inter UI", size = 3.5, hjust = 0.9, vjust = 0) + # Outlier labels geom_text(data = dt_cpi_melt[(country == "Russian\nFederation") & (date == 2015), ], aes(x = date, y = y_pos_out, color = variable, label = round(value, 2)), show.legend = FALSE, family = "Inter UI", size = 3.5, hjust = 0.5, vjust = 0) + geom_text(data = dt_cpi_melt[(country == "Ireland") & (date == 2015) & (variable == "GDP_annual"), ], aes(x = 2015.2, y = y_pos_out, color = variable, label = round(value, 2)), show.legend = FALSE, family = "Inter UI", hjust = 0, vjust = 0) + # Facets setup facet_wrap(~country, ncol = 6) + # Scales scale_x_continuous(labels = c("2013", "14", "15", "16", "17")) + scale_y_continuous(sec.axis = dup_axis(name = "")) + scale_color_manual(label = c("CPI", "GDP"), values = c("#aa8c3b", "#003831")) + # Labs labs(x = "", y = "Annual\nGDP growth rate\nand CPI, %", # subtitle = "Period: 2013-2017", title = "Inflation (CPI) and GDP Growth Rate\nin Inflation Targeting Countries") + # Theme theme_minimal() + theme(panel.spacing = unit(1.05, "lines"), panel.grid.minor = element_blank(), panel.grid.major = element_blank(), plot.margin = margin(t = 15, b = 50, l = 0, r = 30, unit = "pt"), # Axes axis.text.x = element_text(family = "Roboto Condensed", size = 9, color = "#3B3B3B"), axis.text.y = element_text(family = "Roboto Condensed", size = 10, color = "#3B3B3B"), axis.title.y = element_text(family = "Inter UI", size = 12, color = "#3B3B3B", hjust = 1, angle = 0, # debug = TRUE, margin = margin(r = 10, unit = "pt")), # Legend legend.position = "", # Titles plot.title = element_text(family = "Inter UI Black", size = 18, color = "#3B3B3B"), strip.text = element_text(family = "Roboto Condensed", size = 12.5, color = "#3B3B3B") ) # Helper plot to extract the x axis with 6 labels ------------------------- clist <- c("Armenia", "Australia", "Austria", "Belgium", "Brazil", "Bulgaria") p6 <- ggplot() + # Data geom_line(data = dt_cpi_melt[country %in% clist], aes(x = date, y = value, color = variable)) + geom_point(data = dt_cpi_melt[country %in% clist], aes(x = date, y = value, color = variable), size = 0.5, shape = 20, show.legend = FALSE) + facet_wrap(~country, ncol = 6) + # Scales scale_x_continuous(labels = c("2013", "14", "15", "16", "17")) + scale_y_continuous(sec.axis = dup_axis(name = "")) + scale_color_manual(label = c("CPI", "GDP"), values = c("#aa8c3b", "#003831")) + # Theme theme_minimal() + theme(panel.spacing = unit(1.05, "lines"), panel.grid.minor = element_blank(), panel.grid.major = element_blank(), plot.margin = margin(t = 15, b = 10, l = 0, r = 30, unit = "pt"), # Axes axis.text.x = element_text(family = "Roboto Condensed", size = 9, color = "#3B3B3B"), axis.text.y = element_text(family = "Roboto Condensed", size = 10, color = "#3B3B3B"), axis.title.y = element_text(family = "Inter UI", size = 12, color = "#3B3B3B", hjust = 1, angle = 0, # debug = TRUE, margin = margin(r = 10, unit = "pt")), legend.position = "", # Titles strip.text = element_text(family = "Roboto Condensed", size = 12.5, color = "#3B3B3B") ) # Convert plots to grobs to add xaxis on the center of facets ------------- g <- ggplotGrob(p) p6 <- ggplotGrob(p6) # Locate the data panels panels <- grep("panel", g$layout$name) top <- unique(g$layout$t[panels]) # X axis object xax <- p6[9, ] # Add it to the first row first_row <- gtable:::rbind_gtable(g[seq.int(min(top)), ], xax, "first" ) # Add it to row starting from Jamaica jam_row <- gtable:::rbind_gtable(g[27:29, ], xax, "first" ) # Merge it with the rest of the data between 1st row # and row starting from Jamaica first_to_jam <- gtable:::rbind_gtable(first_row, g[(min(top) + 1):26,], "first") # Merge row starting from Jamaica with the rest of the rows jam_to_end <- gtable:::rbind_gtable(jam_row, g[32:(max(top) + 3),], "first") # Collect everything in one plot with caption and y axis title ------------ all <- arrangeGrob( # Combine y axis title and the rest of the plot arrangeGrob(textGrob("Annual\nGDP growth rate\nand CPI, %", gp = gpar(fontsize = 12, family = "Inter UI", col = "#3B3B3B"), x = unit(1, "npc"), y = unit(0.93, "npc"), just = c("left", "top")), gtable:::rbind_gtable(first_to_jam, jam_to_end, "first"), ncol = 2, widths = c(0.01, 0.99)), # Add annotation textGrob("Data: Worldbank::FP.CPI.TOTL.ZG & Worldbank::NY.GDP.MKTP.KD.ZG Indicators.", gp = gpar(fontsize = 10, family = "Inter UI", col = "#3B3B3B"), x = unit(0.18, "npc"), just = c("left", "bottom")), nrow = 2, heights = c(0.99, 0.01) ) # Save result ------------------------------------------------------------- ggsave("wb_gdp_cpi.svg", all, width = 12, height = 18, scale = 0.8) ggsave("wb_gdp_cpi.png", all, width = 12, height = 18, scale = 0.8, dpi = 600, type = "cairo-png")
weighted.sd = function(vec, weight) { if(length(vec) == 1) { return(NA) } return(sqrt(weighted.mean(vec*vec, weight, na.rm=TRUE) - weighted.mean(vec, weight, na.rm=TRUE)**2)) } languages = c('Hebrew', 'Romanian', 'Finnish', 'Danish', 'Old_Church_Slavonic', 'Galician-TreeGal', 'Swedish-LinES', 'Marathi', 'Greek', 'Latin-PROIEL', 'Polish', 'Spanish-AnCora', 'Finnish-FTB', 'Kazakh', 'Arabic', 'Japanese', 'Slovenian', 'Ancient_Greek-PROIEL', 'Latvian', 'Swedish_Sign_Language', 'Coptic', 'Turkish', 'Ancient_Greek', 'Ukrainian', 'Hungarian', 'Russian-SynTagRus', 'Italian-ParTUT', 'Chinese', 'Dutch-LassySmall', 'Italian', 'Bulgarian', 'Irish', 'Romanian-Nonstandard', 'Norwegian-Nynorsk', 'Indonesian', 'Latin-ITTB', 'Tamil', 'French-Sequoia', 'Belarusian', 'Lithuanian', 'Afrikaans', 'Persian', 'Portuguese-BR', 'Croatian', 'Russian', 'English-ParTUT', 'Arabic-NYUAD', 'Estonian', 'Gothic', 'Telugu', 'Czech-CLTT', 'Catalan', 'Dutch', 'French-FTB', 'Spanish', 'English', 'French', 'Galician', 'Slovenian-SST', 'Korean', 'Portuguese', 'Basque', 'German', 'Urdu', 'Hindi', 'Slovak', 'Czech-CAC', 'Italian-PoSTWITA', 'Latin', 'Swedish', 'Vietnamese', 'French-ParTUT', 'Czech', 'Norwegian-Bokmaal', 'North_Sami', 'English-LinES', 'Serbian', 'Czech-FicTree') library(dplyr) library(tidyr) library(ggplot2) options(width=130) auto = read.csv("CS_SCR/deps/manual_output/auto-summary.tsv", sep="\t")# %>% rename(Quality=AverageLength) auto$Direction = NA auto$FileName = as.character(auto$FileName) data = read.csv("CS_SCR/deps/manual_output/results.tsv", sep="\t") %>% rename(AverageLoss=Perplexity) data$DH_Weight = NA data$Counter = NA data$Language = "English" #data$Objective = NA data$FileName = as.character(data$FileName) #data$ObjectiveName = NA data$EntropyWeight = NA data$LR_POLICY=NA data$Lagrange_Lambda=NA data$Lagrange_B=NA data$L2_Weight=NA data = bind_rows(data, auto) #aggregate(data["Counter"], by=c(data["Language"], data["FileName"], data["ModelName"], data["ObjectiveName"]), NROW) # important that Counter is never NA data = data %>% filter(Counter > 10) ################################ forAll = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorpora.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopes.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUID.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDDepLBugfix.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDLogDepL.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDDepLBugfix_UIDs.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUID.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDDepLBugfix_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_Tight_Guide.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDLogDepL_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDDepLBugfix_LogExp.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_LogExp.py") forSurp = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogVar_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogVar_Tight_Guide.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogExpVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogExpVar_NoScaling.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogVar_NoScaling.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogExpVar_NoScaling_Eval.py") forDepL = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepL.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix_LogVar.py" , "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix_LogVar_TightVariancePrior.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix_LogExpVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix_LogExpVar_NoScaling.py") forSurpWord = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurpWord_LogVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurpWord_LogVar_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurpWord_LogExpVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurpWord_LogExpVar_NoScaling.py") forBackSurp = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogExpVar_NoScaling_Eval_Backwards.py") forSurpDepL = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_LogExp_OnlySurpDepL_NoScaling_Eval.py","readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_LogExp_OnlySurpDepL.py","readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_LogExp_OnlySurpDepL_NoScaling_Eval_MultiVar.py") data = data %>% mutate(RegType = ifelse(ModelName %in% forSurp, "Surp", ifelse(ModelName %in% forDepL, "DepL", ifelse(ModelName %in% forAll, "DL+Surp+ID", ifelse(ModelName %in% forSurpDepL, "DL+Surp", ifelse(ModelName %in% forSurpWord, "SurpWord", ifelse(ModelName %in% forBackSurp, "BackSurp", ifelse(ModelName == "readDataRegressionDepLengthAndSurprisalRandomEffectsVariational.py", "All_NoSlope", "NONE")))))))) cat("\nAssigned RegType\n") cat(unique((data %>% filter(RegType == "NONE"))$ModelName), sep="\n") badModels = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorpora.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopes.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUID.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUID.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepL.py" ) data = data %>% filter(!(ModelName %in% badModels)) cat("Take into account (1) log variances, (2) UID rate vs UID") data = data %>% mutate(Var_Slope_Surp_POS = ifelse(is.na(Var_Slope_Surp_POS), exp(LogVar_Slope_Surp_POS), Var_Slope_Surp_POS)) data = data %>% mutate(Var_Slope_Surp_Word = ifelse(is.na(Var_Slope_Surp_Word), exp(LogVar_Slope_Surp_Word), Var_Slope_Surp_Word)) data$Var_Slope_UIDRate = NA data = data %>% mutate(Var_Slope_UIDRate = ifelse(is.na(Var_Slope_UIDRate), exp(LogVar_Slope_UIDRate), Var_Slope_UIDRate)) data = data %>% mutate(Var_Slope_UID = ifelse(is.na(Var_Slope_UID), exp(LogVar_Slope_UID), Var_Slope_UID)) data = data %>% mutate(DH_Sigma = ifelse(is.na(DH_Sigma), exp(DH_LogSigma), DH_Sigma)) data = data %>% mutate(Mean_Slope_DepLength = ifelse(is.na(Mean_Slope_DepLength), exp(Mean_Slope_DepLogLength), Mean_Slope_DepLength)) data = data %>% mutate(Distance_Sigma = ifelse(is.na(Distance_Sigma), exp(Distance_LogSigma), Distance_Sigma)) data = data %>% mutate(Var_Slope_DepLength = ifelse(is.na(Var_Slope_DepLength), exp(LogVar_Slope_DepLogLength), Var_Slope_DepLength)) data = data %>% mutate(Var_Slope_DepLength = ifelse(is.na(Var_Slope_DepLength), exp(LogVar_Slope_DepLength), Var_Slope_DepLength)) data = data %>% mutate(Var_Slope_Surp_POS = ifelse(is.na( Var_Slope_Surp_POS ), log(1+exp(LogExpVar_Slope_Surp_POS )), Var_Slope_Surp_POS )) data = data %>% mutate(DH_Sigma = ifelse(is.na( DH_Sigma ), log(1+exp(DH_LogExpSigma )), DH_Sigma )) data = data %>% mutate(Var_Slope_DepLength = ifelse(is.na( Var_Slope_DepLength ), log(1+exp(LogExpVar_Slope_DepLength )), Var_Slope_DepLength )) data = data %>% mutate(Var_Slope_UID = ifelse(is.na( Var_Slope_UID ), log(1+exp(LogExpVar_Slope_UID )), Var_Slope_UID )) data = data %>% mutate(Var_Slope_Surp_Word = ifelse(is.na( Var_Slope_Surp_Word ), log(1+exp(LogExpVar_Slope_Surp_Word )), Var_Slope_Surp_Word )) data = data %>% mutate(Var_Slope_UIDRate = ifelse(is.na( Var_Slope_UIDRate ), log(1+exp( LogExpVar_Slope_UIDRate )), Var_Slope_UIDRate )) data = data %>% mutate(Distance_Sigma = ifelse(is.na(Distance_Sigma ), log(1+exp( Distance_LogExpSigma )), Distance_Sigma )) cat(" TODO do same with other variances and Sigmas") cat("ALSO take into account depLogLength") cat("Where is Norwegian surprisal data???") dodge = position_dodge(.9)
/writeup/readData.r
no_license
m-hahn/dependency-ordering
R
false
false
10,749
r
weighted.sd = function(vec, weight) { if(length(vec) == 1) { return(NA) } return(sqrt(weighted.mean(vec*vec, weight, na.rm=TRUE) - weighted.mean(vec, weight, na.rm=TRUE)**2)) } languages = c('Hebrew', 'Romanian', 'Finnish', 'Danish', 'Old_Church_Slavonic', 'Galician-TreeGal', 'Swedish-LinES', 'Marathi', 'Greek', 'Latin-PROIEL', 'Polish', 'Spanish-AnCora', 'Finnish-FTB', 'Kazakh', 'Arabic', 'Japanese', 'Slovenian', 'Ancient_Greek-PROIEL', 'Latvian', 'Swedish_Sign_Language', 'Coptic', 'Turkish', 'Ancient_Greek', 'Ukrainian', 'Hungarian', 'Russian-SynTagRus', 'Italian-ParTUT', 'Chinese', 'Dutch-LassySmall', 'Italian', 'Bulgarian', 'Irish', 'Romanian-Nonstandard', 'Norwegian-Nynorsk', 'Indonesian', 'Latin-ITTB', 'Tamil', 'French-Sequoia', 'Belarusian', 'Lithuanian', 'Afrikaans', 'Persian', 'Portuguese-BR', 'Croatian', 'Russian', 'English-ParTUT', 'Arabic-NYUAD', 'Estonian', 'Gothic', 'Telugu', 'Czech-CLTT', 'Catalan', 'Dutch', 'French-FTB', 'Spanish', 'English', 'French', 'Galician', 'Slovenian-SST', 'Korean', 'Portuguese', 'Basque', 'German', 'Urdu', 'Hindi', 'Slovak', 'Czech-CAC', 'Italian-PoSTWITA', 'Latin', 'Swedish', 'Vietnamese', 'French-ParTUT', 'Czech', 'Norwegian-Bokmaal', 'North_Sami', 'English-LinES', 'Serbian', 'Czech-FicTree') library(dplyr) library(tidyr) library(ggplot2) options(width=130) auto = read.csv("CS_SCR/deps/manual_output/auto-summary.tsv", sep="\t")# %>% rename(Quality=AverageLength) auto$Direction = NA auto$FileName = as.character(auto$FileName) data = read.csv("CS_SCR/deps/manual_output/results.tsv", sep="\t") %>% rename(AverageLoss=Perplexity) data$DH_Weight = NA data$Counter = NA data$Language = "English" #data$Objective = NA data$FileName = as.character(data$FileName) #data$ObjectiveName = NA data$EntropyWeight = NA data$LR_POLICY=NA data$Lagrange_Lambda=NA data$Lagrange_B=NA data$L2_Weight=NA data = bind_rows(data, auto) #aggregate(data["Counter"], by=c(data["Language"], data["FileName"], data["ModelName"], data["ObjectiveName"]), NROW) # important that Counter is never NA data = data %>% filter(Counter > 10) ################################ forAll = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorpora.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopes.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUID.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDDepLBugfix.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDLogDepL.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDDepLBugfix_UIDs.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUID.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDDepLBugfix_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_Tight_Guide.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDLogDepL_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUIDDepLBugfix_LogExp.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_LogExp.py") forSurp = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogVar_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogVar_Tight_Guide.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogExpVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogExpVar_NoScaling.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogVar_NoScaling.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogExpVar_NoScaling_Eval.py") forDepL = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepL.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix_LogVar.py" , "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix_LogVar_TightVariancePrior.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix_LogExpVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepLOnly_Bugfix_LogExpVar_NoScaling.py") forSurpWord = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurpWord_LogVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurpWord_LogVar_Tight.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurpWord_LogExpVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurpWord_LogExpVar_NoScaling.py") forBackSurp = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaSurp_LogExpVar_NoScaling_Eval_Backwards.py") forSurpDepL = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_LogExp_OnlySurpDepL_NoScaling_Eval.py","readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_LogExp_OnlySurpDepL.py","readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarDepLBugfix_LogExp_OnlySurpDepL_NoScaling_Eval_MultiVar.py") data = data %>% mutate(RegType = ifelse(ModelName %in% forSurp, "Surp", ifelse(ModelName %in% forDepL, "DepL", ifelse(ModelName %in% forAll, "DL+Surp+ID", ifelse(ModelName %in% forSurpDepL, "DL+Surp", ifelse(ModelName %in% forSurpWord, "SurpWord", ifelse(ModelName %in% forBackSurp, "BackSurp", ifelse(ModelName == "readDataRegressionDepLengthAndSurprisalRandomEffectsVariational.py", "All_NoSlope", "NONE")))))))) cat("\nAssigned RegType\n") cat(unique((data %>% filter(RegType == "NONE"))$ModelName), sep="\n") badModels = c("readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorpora.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopes.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVarRateUID.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaLogVar.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUID.py", "readDataRegressionDepLengthAndSurprisalRandomEffectsVariationalUIDRandInterceptSlopesAllCorporaDepL.py" ) data = data %>% filter(!(ModelName %in% badModels)) cat("Take into account (1) log variances, (2) UID rate vs UID") data = data %>% mutate(Var_Slope_Surp_POS = ifelse(is.na(Var_Slope_Surp_POS), exp(LogVar_Slope_Surp_POS), Var_Slope_Surp_POS)) data = data %>% mutate(Var_Slope_Surp_Word = ifelse(is.na(Var_Slope_Surp_Word), exp(LogVar_Slope_Surp_Word), Var_Slope_Surp_Word)) data$Var_Slope_UIDRate = NA data = data %>% mutate(Var_Slope_UIDRate = ifelse(is.na(Var_Slope_UIDRate), exp(LogVar_Slope_UIDRate), Var_Slope_UIDRate)) data = data %>% mutate(Var_Slope_UID = ifelse(is.na(Var_Slope_UID), exp(LogVar_Slope_UID), Var_Slope_UID)) data = data %>% mutate(DH_Sigma = ifelse(is.na(DH_Sigma), exp(DH_LogSigma), DH_Sigma)) data = data %>% mutate(Mean_Slope_DepLength = ifelse(is.na(Mean_Slope_DepLength), exp(Mean_Slope_DepLogLength), Mean_Slope_DepLength)) data = data %>% mutate(Distance_Sigma = ifelse(is.na(Distance_Sigma), exp(Distance_LogSigma), Distance_Sigma)) data = data %>% mutate(Var_Slope_DepLength = ifelse(is.na(Var_Slope_DepLength), exp(LogVar_Slope_DepLogLength), Var_Slope_DepLength)) data = data %>% mutate(Var_Slope_DepLength = ifelse(is.na(Var_Slope_DepLength), exp(LogVar_Slope_DepLength), Var_Slope_DepLength)) data = data %>% mutate(Var_Slope_Surp_POS = ifelse(is.na( Var_Slope_Surp_POS ), log(1+exp(LogExpVar_Slope_Surp_POS )), Var_Slope_Surp_POS )) data = data %>% mutate(DH_Sigma = ifelse(is.na( DH_Sigma ), log(1+exp(DH_LogExpSigma )), DH_Sigma )) data = data %>% mutate(Var_Slope_DepLength = ifelse(is.na( Var_Slope_DepLength ), log(1+exp(LogExpVar_Slope_DepLength )), Var_Slope_DepLength )) data = data %>% mutate(Var_Slope_UID = ifelse(is.na( Var_Slope_UID ), log(1+exp(LogExpVar_Slope_UID )), Var_Slope_UID )) data = data %>% mutate(Var_Slope_Surp_Word = ifelse(is.na( Var_Slope_Surp_Word ), log(1+exp(LogExpVar_Slope_Surp_Word )), Var_Slope_Surp_Word )) data = data %>% mutate(Var_Slope_UIDRate = ifelse(is.na( Var_Slope_UIDRate ), log(1+exp( LogExpVar_Slope_UIDRate )), Var_Slope_UIDRate )) data = data %>% mutate(Distance_Sigma = ifelse(is.na(Distance_Sigma ), log(1+exp( Distance_LogExpSigma )), Distance_Sigma )) cat(" TODO do same with other variances and Sigmas") cat("ALSO take into account depLogLength") cat("Where is Norwegian surprisal data???") dodge = position_dodge(.9)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) if (!require("pacman")) install.packages("pacman"); library(pacman) p_load(stats, dplyr, psych, rstatix, readxl, effsize) df <- read_excel('data/ratings.xls') df['Condition'] <- as.factor(df$Condition) effsize::cohen.d(Rating~Condition, data = df) effsize::cohen.d(Rating~Condition, data = df, hedges.correction = TRUE) res.aov <- aov(Rating~Condition, data = df) aov.sum <- anova(res.aov) ssq_total <- aov.sum$`Sum Sq`[1] + aov.sum$`Sum Sq`[2] ssq_total eta_s <- aov.sum$`Sum Sq`[1] / ssq_total eta_s #ω2 = (SSeffect – (dfeffect)(MSerror)) / MSerror + SStotal mse <- aov.sum$`Mean Sq`[2] omega.sq <- (aov.sum$`Sum Sq`[1]-aov.sum$Df[1]*mse)/(mse + ssq_total) omega.sq #hedges' g = (M1-M2)/sqrt(MSE) #cohen's d = g * sqrt(N/(N-2)) m.diff <- t.test(df$Rating~df$Condition, var.equal = TRUE) m.diff <- m.diff$estimate[1] - m.diff$estimate[2] m.diff <- unname(m.diff) m.diff hedges <- m.diff/sqrt(mse) hedges cohen <- hedges *sqrt(nrow(df)/(nrow(df)-2)) cohen #cohens_d(df, Rating~Condition, hedges.correction=FALSE) #### Textbook example: Cohens d and Hedges g #### animal <- read_excel("data/animals.xls") animal['GENDER'] <- as.factor(animal$GENDER) aov.sum <- anova(aov(WRONG~GENDER, animal)) aov.sum mse <- aov.sum$`Mean Sq`[2] m.diff <- t.test(animal$WRONG~animal$GENDER, var.equal = TRUE) m.diff <- m.diff$estimate[1] - m.diff$estimate[2] m.diff <- unname(m.diff) m.diff g <- m.diff/sqrt(mse) #### Textbook example #### lncy <- read_excel('data/leniency.xls') lncy['smile'] <- as.factor(lncy$smile) res.aov <- aov(leniency~smile, data = lncy) aov.sum <- anova(res.aov) ssq_total <- aov.sum$`Sum Sq`[1] + aov.sum$`Sum Sq`[2] ssq_total eta_s <- aov.sum$`Sum Sq`[1] / ssq_total eta_s #ω2 = (SSeffect – (dfeffect)(MSerror)) / MSerror + SStotal mse <- aov.sum$`Mean Sq`[2] omega.sq <- (aov.sum$`Sum Sq`[1]-aov.sum$Df[1]*mse)/(mse + ssq_total) omega.sq
/hw_20.R
no_license
silverer/PSYC-502-Code
R
false
false
1,926
r
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) if (!require("pacman")) install.packages("pacman"); library(pacman) p_load(stats, dplyr, psych, rstatix, readxl, effsize) df <- read_excel('data/ratings.xls') df['Condition'] <- as.factor(df$Condition) effsize::cohen.d(Rating~Condition, data = df) effsize::cohen.d(Rating~Condition, data = df, hedges.correction = TRUE) res.aov <- aov(Rating~Condition, data = df) aov.sum <- anova(res.aov) ssq_total <- aov.sum$`Sum Sq`[1] + aov.sum$`Sum Sq`[2] ssq_total eta_s <- aov.sum$`Sum Sq`[1] / ssq_total eta_s #ω2 = (SSeffect – (dfeffect)(MSerror)) / MSerror + SStotal mse <- aov.sum$`Mean Sq`[2] omega.sq <- (aov.sum$`Sum Sq`[1]-aov.sum$Df[1]*mse)/(mse + ssq_total) omega.sq #hedges' g = (M1-M2)/sqrt(MSE) #cohen's d = g * sqrt(N/(N-2)) m.diff <- t.test(df$Rating~df$Condition, var.equal = TRUE) m.diff <- m.diff$estimate[1] - m.diff$estimate[2] m.diff <- unname(m.diff) m.diff hedges <- m.diff/sqrt(mse) hedges cohen <- hedges *sqrt(nrow(df)/(nrow(df)-2)) cohen #cohens_d(df, Rating~Condition, hedges.correction=FALSE) #### Textbook example: Cohens d and Hedges g #### animal <- read_excel("data/animals.xls") animal['GENDER'] <- as.factor(animal$GENDER) aov.sum <- anova(aov(WRONG~GENDER, animal)) aov.sum mse <- aov.sum$`Mean Sq`[2] m.diff <- t.test(animal$WRONG~animal$GENDER, var.equal = TRUE) m.diff <- m.diff$estimate[1] - m.diff$estimate[2] m.diff <- unname(m.diff) m.diff g <- m.diff/sqrt(mse) #### Textbook example #### lncy <- read_excel('data/leniency.xls') lncy['smile'] <- as.factor(lncy$smile) res.aov <- aov(leniency~smile, data = lncy) aov.sum <- anova(res.aov) ssq_total <- aov.sum$`Sum Sq`[1] + aov.sum$`Sum Sq`[2] ssq_total eta_s <- aov.sum$`Sum Sq`[1] / ssq_total eta_s #ω2 = (SSeffect – (dfeffect)(MSerror)) / MSerror + SStotal mse <- aov.sum$`Mean Sq`[2] omega.sq <- (aov.sum$`Sum Sq`[1]-aov.sum$Df[1]*mse)/(mse + ssq_total) omega.sq
# Author: Jeff Hemsley jhemsley at uw dot edu # Created: Nov 20 2012 # # File generates random graph with random dates and creates set of pngs for animation #Load igraph library(igraph) # igraph has many nifty ways to generate random graphs. :-) start.nodes <- 2 total.nodes <- 500 g <- erdos.renyi.game(start.nodes, 1/2, directed=T) g <- barabasi.game(total.nodes, start.graph=g, out.pref=T, directed=T, out.seq=rep(2, total.nodes - start.nodes)) # make a layout and set the x & y attributes of the graph vertices l <- layout.fruchterman.reingold(g) V(g)$x <- l[,1] V(g)$y <- l[,2] # since I'm using a random graph, i want to use random dates for this example start.date <- as.POSIXct(strptime('2012-01-01 07:00:00', '%Y-%m-%d %H:%M:%S')) end.date <- as.POSIXct(strptime('2012-01-07 07:00:00', '%Y-%m-%d %H:%M:%S')) possible.dates <- seq.POSIXt(start.date, end.date, by="hour") num.time.steps <- length(possible.dates) # we use later for the loop. # now we need to associate dates with links and I use sample with replace=T num.edges <- ecount(g) E(g)$date <- sample(possible.dates, num.edges, replace=T) E(g)$width <- 2 # these are some initial edge and vertex settings. # Note that I set a value for red, green and blue between 0 and 255, # an alpha, or transparency value of 0 making all objects transparent E(g)$red <- 255 E(g)$green <- 255 E(g)$blue <- 255 E(g)$alpha <- 0 # then I give a default color just so the attribute exists for later. E(g)$color <- "black" V(g)$red <- 45 V(g)$green <- 198 V(g)$blue <- 214 V(g)$alpha <- 0 V(g)$color <- "black" # season to taste V(g)$size <- 5 V(g)$frame.color <- NA V(g)$label <- "" # in this example I am using a look back of 12 frames for the fade out # so, over 12 movie frames the links and vertices get more and more # transparent. look.back.default <- 12 alpha.vec.default <- round(seq(0, 255, length=look.back.default + 1),0) alpha.vec.length <- length(alpha.vec.default) # workhorse loop for (time.step in 1:num.time.steps) { # look.back needs to be altered at the early part of the animation look.back <- time.step - look.back.default if (look.back < 0) { look.back <- 1 } date.fade.index <- look.back:time.step date.fade.index.length <- length(date.fade.index) # we always want to set the newest edge/vertex alpha last and we # we always want it to be opaque. But if look.back is greater than # available time steps we need to shorten the alpha vector alpha.vec <- alpha.vec.default if ((alpha.vec.length - date.fade.index.length) > 0) { alpha.vec <- alpha.vec[-(1:(alpha.vec.length - date.fade.index.length))] } # for each look.back time step we set alpha for edges/vertices # with those time stamps. Some time steps may have no links or # vertices, some have many. Do the newest last so that they don't # get down-graded if they show up more than once in the fade out # period for (j in 1:length(date.fade.index)) { active.edges <- which(E(g)$date == possible.dates[date.fade.index[j]]) if (length(active.edges) > 0) { E(g)[active.edges]$alpha <- alpha.vec[j] V(g)[from(E(g)[active.edges])]$alpha <- alpha.vec[j] V(g)[to(E(g)[active.edges])]$alpha <- alpha.vec[j] } } # now make sure all edge/vertext colors are set with whatever their alphas are E(g)$color <- rgb(red=E(g)$red, green=E(g)$green , blue=E(g)$blue, maxColorValue=255, alpha=E(g)$alpha) V(g)$color <- rgb(V(g)$red, V(g)$green, V(g)$blue, V(g)$alpha, maxColorValue=255) # file names should be the same except with an incremented number. # many other R-animation authors use ffmpeg out.file.name <- paste("img/frames/graph_", stringr::str_pad(time.step, width = 3, pad = "0"), ".png", sep="") png(out.file.name, width=640, height=480) par(bg=NA) plot.igraph(g, edge.arrow.size=0, edge.arrow.width=0, edge.curved = .5, main="") dev.off() } # Make gif ---------------------------------------------------------------- # (brew install webp) # loop: 0 Loop infinitely # d: ms between images # lossless vs. lossy system("img2webp -loop 0 -d 500 -lossless *.png -o out.webp")
/R/plot_graph_network.R
no_license
TUHHStartupEngineers/mledays-slides
R
false
false
4,155
r
# Author: Jeff Hemsley jhemsley at uw dot edu # Created: Nov 20 2012 # # File generates random graph with random dates and creates set of pngs for animation #Load igraph library(igraph) # igraph has many nifty ways to generate random graphs. :-) start.nodes <- 2 total.nodes <- 500 g <- erdos.renyi.game(start.nodes, 1/2, directed=T) g <- barabasi.game(total.nodes, start.graph=g, out.pref=T, directed=T, out.seq=rep(2, total.nodes - start.nodes)) # make a layout and set the x & y attributes of the graph vertices l <- layout.fruchterman.reingold(g) V(g)$x <- l[,1] V(g)$y <- l[,2] # since I'm using a random graph, i want to use random dates for this example start.date <- as.POSIXct(strptime('2012-01-01 07:00:00', '%Y-%m-%d %H:%M:%S')) end.date <- as.POSIXct(strptime('2012-01-07 07:00:00', '%Y-%m-%d %H:%M:%S')) possible.dates <- seq.POSIXt(start.date, end.date, by="hour") num.time.steps <- length(possible.dates) # we use later for the loop. # now we need to associate dates with links and I use sample with replace=T num.edges <- ecount(g) E(g)$date <- sample(possible.dates, num.edges, replace=T) E(g)$width <- 2 # these are some initial edge and vertex settings. # Note that I set a value for red, green and blue between 0 and 255, # an alpha, or transparency value of 0 making all objects transparent E(g)$red <- 255 E(g)$green <- 255 E(g)$blue <- 255 E(g)$alpha <- 0 # then I give a default color just so the attribute exists for later. E(g)$color <- "black" V(g)$red <- 45 V(g)$green <- 198 V(g)$blue <- 214 V(g)$alpha <- 0 V(g)$color <- "black" # season to taste V(g)$size <- 5 V(g)$frame.color <- NA V(g)$label <- "" # in this example I am using a look back of 12 frames for the fade out # so, over 12 movie frames the links and vertices get more and more # transparent. look.back.default <- 12 alpha.vec.default <- round(seq(0, 255, length=look.back.default + 1),0) alpha.vec.length <- length(alpha.vec.default) # workhorse loop for (time.step in 1:num.time.steps) { # look.back needs to be altered at the early part of the animation look.back <- time.step - look.back.default if (look.back < 0) { look.back <- 1 } date.fade.index <- look.back:time.step date.fade.index.length <- length(date.fade.index) # we always want to set the newest edge/vertex alpha last and we # we always want it to be opaque. But if look.back is greater than # available time steps we need to shorten the alpha vector alpha.vec <- alpha.vec.default if ((alpha.vec.length - date.fade.index.length) > 0) { alpha.vec <- alpha.vec[-(1:(alpha.vec.length - date.fade.index.length))] } # for each look.back time step we set alpha for edges/vertices # with those time stamps. Some time steps may have no links or # vertices, some have many. Do the newest last so that they don't # get down-graded if they show up more than once in the fade out # period for (j in 1:length(date.fade.index)) { active.edges <- which(E(g)$date == possible.dates[date.fade.index[j]]) if (length(active.edges) > 0) { E(g)[active.edges]$alpha <- alpha.vec[j] V(g)[from(E(g)[active.edges])]$alpha <- alpha.vec[j] V(g)[to(E(g)[active.edges])]$alpha <- alpha.vec[j] } } # now make sure all edge/vertext colors are set with whatever their alphas are E(g)$color <- rgb(red=E(g)$red, green=E(g)$green , blue=E(g)$blue, maxColorValue=255, alpha=E(g)$alpha) V(g)$color <- rgb(V(g)$red, V(g)$green, V(g)$blue, V(g)$alpha, maxColorValue=255) # file names should be the same except with an incremented number. # many other R-animation authors use ffmpeg out.file.name <- paste("img/frames/graph_", stringr::str_pad(time.step, width = 3, pad = "0"), ".png", sep="") png(out.file.name, width=640, height=480) par(bg=NA) plot.igraph(g, edge.arrow.size=0, edge.arrow.width=0, edge.curved = .5, main="") dev.off() } # Make gif ---------------------------------------------------------------- # (brew install webp) # loop: 0 Loop infinitely # d: ms between images # lossless vs. lossy system("img2webp -loop 0 -d 500 -lossless *.png -o out.webp")
context("pull") test_that("default extracts last var from data frame", { df <- data_frame(x = 1:10, y = 1:10) expect_equal(pull(df), 1:10) }) test_that("default extracts last var from remote source", { mf <- memdb_frame(x = 1:10, y = 1:10) expect_equal(pull(mf), 1:10) }) # find_var ---------------------------------------------------------------- test_that("errors for bad inputs", { expect_error(find_var(letters, letters), "of length 1") expect_error(find_var(quote(a), letters), "of length 1") expect_error(find_var("aa", letters), "Unknown variable") expect_error(find_var(0, letters), "must take a value") expect_error(find_var(100, letters), "must take a value") expect_warning(regexp = "NAs introduced by coercion", expect_error(find_var(-Inf, letters), "must take a value") ) expect_error(find_var(NA_integer_, letters), "must take a value") })
/tests/testthat/test-pull.R
permissive
sunnycd/dplyr
R
false
false
888
r
context("pull") test_that("default extracts last var from data frame", { df <- data_frame(x = 1:10, y = 1:10) expect_equal(pull(df), 1:10) }) test_that("default extracts last var from remote source", { mf <- memdb_frame(x = 1:10, y = 1:10) expect_equal(pull(mf), 1:10) }) # find_var ---------------------------------------------------------------- test_that("errors for bad inputs", { expect_error(find_var(letters, letters), "of length 1") expect_error(find_var(quote(a), letters), "of length 1") expect_error(find_var("aa", letters), "Unknown variable") expect_error(find_var(0, letters), "must take a value") expect_error(find_var(100, letters), "must take a value") expect_warning(regexp = "NAs introduced by coercion", expect_error(find_var(-Inf, letters), "must take a value") ) expect_error(find_var(NA_integer_, letters), "must take a value") })
args <- commandArgs(trailingOnly = TRUE) demographics <- read.csv(file=args[1], head=TRUE, sep="\t") age <- demographics$age mean_age <- sum(age)/length(age) # Add in the assertion tests # to check that the mean age is # between 100 and 10 stopifnot(mean_age < 100) stopifnot(mean_age > 10) # Carry on if the tests pass :) demean_age <- age - sum(age)/length(age) write.table(demean_age, file="age_demeaned.tsv", row.names=FALSE, col.names=FALSE, sep="\t") print("done!")
/demean_age.R
permissive
KirstieJane/NHW16-testing
R
false
false
478
r
args <- commandArgs(trailingOnly = TRUE) demographics <- read.csv(file=args[1], head=TRUE, sep="\t") age <- demographics$age mean_age <- sum(age)/length(age) # Add in the assertion tests # to check that the mean age is # between 100 and 10 stopifnot(mean_age < 100) stopifnot(mean_age > 10) # Carry on if the tests pass :) demean_age <- age - sum(age)/length(age) write.table(demean_age, file="age_demeaned.tsv", row.names=FALSE, col.names=FALSE, sep="\t") print("done!")
library(testthat) library(aviationgeoms) test_check("aviationgeoms")
/tests/testthat.R
permissive
mitre/aviationgeoms
R
false
false
70
r
library(testthat) library(aviationgeoms) test_check("aviationgeoms")
#***************Reading the DATA********************************* data<-read.table(a,sep =";",stringsAsFactors=F,header=T,na.strings="?") #Where "a" is the path file data2<-subset(data,Date=="1/2/2007" | Date=="2/2/2007") data2[,10]<-paste(data2[,1],data2[,2]) fecha<-data2[,10] fecha<-strptime(fecha,"%d/%m/%Y %H:%M:%S") data2<-data2[,1:9] data2<-cbind(data2,fecha) #***************Ploting the DATA********************************* png(filename="plot1.png") #This save the image in your Working Directory hist(data2$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power") dev.off() ***********HArd Way ***************************** table(cut(data2$Global_active_power, breaks = seq(0,6,0.5))) (0,0.5] (0.5,1] (1,1.5] (1.5,2] (2,2.5] (2.5,3] (3,3.5] (3.5,4] (4,4.5] (4.5,5] (5,5.5] (5.5,6] 1234 181 633 276 160 212 69 69 20 16 6 2 barplot(table(cut(data2$Global_active_power, breaks = seq(0,6,0.5)) ),ylab="Frequecy",space=0,col="red")
/Project 1/plot1.R
no_license
andraderdz/Exploratory-Data-Analysis
R
false
false
1,040
r
#***************Reading the DATA********************************* data<-read.table(a,sep =";",stringsAsFactors=F,header=T,na.strings="?") #Where "a" is the path file data2<-subset(data,Date=="1/2/2007" | Date=="2/2/2007") data2[,10]<-paste(data2[,1],data2[,2]) fecha<-data2[,10] fecha<-strptime(fecha,"%d/%m/%Y %H:%M:%S") data2<-data2[,1:9] data2<-cbind(data2,fecha) #***************Ploting the DATA********************************* png(filename="plot1.png") #This save the image in your Working Directory hist(data2$Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power") dev.off() ***********HArd Way ***************************** table(cut(data2$Global_active_power, breaks = seq(0,6,0.5))) (0,0.5] (0.5,1] (1,1.5] (1.5,2] (2,2.5] (2.5,3] (3,3.5] (3.5,4] (4,4.5] (4.5,5] (5,5.5] (5.5,6] 1234 181 633 276 160 212 69 69 20 16 6 2 barplot(table(cut(data2$Global_active_power, breaks = seq(0,6,0.5)) ),ylab="Frequecy",space=0,col="red")
decompress <- function(src, target = tempdir()) { stopifnot(file.exists(src)) if (grepl("\\.zip$", src)) { unzip(src, exdir = target, unzip = getOption("unzip")) outdir <- getdir(as.character(unzip(src, list = TRUE)$Name[1])) } else if (grepl("\\.tar$", src)) { untar(src, exdir = target) outdir <- getdir(untar(src, list = TRUE)[1]) } else if (grepl("\\.(tar\\.gz|tgz)$", src)) { untar(src, exdir = target, compressed = "gzip") outdir <- getdir(untar(src, compressed = "gzip", list = TRUE)[1]) } else if (grepl("\\.(tar\\.bz2|tbz)$", src)) { untar(src, exdir = target, compressed = "bzip2") outdir <- getdir(untar(src, compressed = "bzip2", list = TRUE)[1]) } else { ext <- gsub("^[^.]*\\.", "", src) stop("Don't know how to decompress files with extension ", ext, call. = FALSE) } file.path(target, outdir) } # Returns everything before the last slash in a filename # getdir("path/to/file") returns "path/to" # getdir("path/to/dir/") returns "path/to/dir" getdir <- function(path) sub("/[^/]*$", "", path)
/R/decompress.r
no_license
praveer13/devtools
R
false
false
1,079
r
decompress <- function(src, target = tempdir()) { stopifnot(file.exists(src)) if (grepl("\\.zip$", src)) { unzip(src, exdir = target, unzip = getOption("unzip")) outdir <- getdir(as.character(unzip(src, list = TRUE)$Name[1])) } else if (grepl("\\.tar$", src)) { untar(src, exdir = target) outdir <- getdir(untar(src, list = TRUE)[1]) } else if (grepl("\\.(tar\\.gz|tgz)$", src)) { untar(src, exdir = target, compressed = "gzip") outdir <- getdir(untar(src, compressed = "gzip", list = TRUE)[1]) } else if (grepl("\\.(tar\\.bz2|tbz)$", src)) { untar(src, exdir = target, compressed = "bzip2") outdir <- getdir(untar(src, compressed = "bzip2", list = TRUE)[1]) } else { ext <- gsub("^[^.]*\\.", "", src) stop("Don't know how to decompress files with extension ", ext, call. = FALSE) } file.path(target, outdir) } # Returns everything before the last slash in a filename # getdir("path/to/file") returns "path/to" # getdir("path/to/dir/") returns "path/to/dir" getdir <- function(path) sub("/[^/]*$", "", path)
#!/usr/bin/env Rscript library(SCnorm) counts <- read.table("rawcount.txt",row.names = 1, stringsAsFactors = FALSE) counts <- as.matrix(counts) ExampleSimSCData1 <- SingleCellExperiment::SingleCellExperiment(assays = list('counts' = counts)) Conditions <- read.csv("condition.csv",header=F)$V1 pdf("MyNormalizedData_k_evaluation.pdf") par(mfrow=c(2,2)) DataNorm <- SCnorm(Data=ExampleSimSCData1,Conditions=Conditions,PrintProgressPlots = TRUE,FilterCellNum = 10,NCores=3, useZerosToScale=TRUE) dev.off() GenesNotNormalized <- results(DataNorm, type="GenesFilteredOut") ScaleFactors <- results(DataNorm, type="ScaleFactors") pdf("check_exampleData_count-depth_evaluation3.pdf", height=5, width=7) countDeptEst.SCNORM <- plotCountDepth(Data = ExampleSimSCData1, NormalizedData = NormalizedData, Conditions = Conditions, FilterCellProportion = .1, NCores=3) dev.off() save.image("scnorm.RData")
/code/SCnorm.R
permissive
lijing28101/yeast_supplementary
R
false
false
969
r
#!/usr/bin/env Rscript library(SCnorm) counts <- read.table("rawcount.txt",row.names = 1, stringsAsFactors = FALSE) counts <- as.matrix(counts) ExampleSimSCData1 <- SingleCellExperiment::SingleCellExperiment(assays = list('counts' = counts)) Conditions <- read.csv("condition.csv",header=F)$V1 pdf("MyNormalizedData_k_evaluation.pdf") par(mfrow=c(2,2)) DataNorm <- SCnorm(Data=ExampleSimSCData1,Conditions=Conditions,PrintProgressPlots = TRUE,FilterCellNum = 10,NCores=3, useZerosToScale=TRUE) dev.off() GenesNotNormalized <- results(DataNorm, type="GenesFilteredOut") ScaleFactors <- results(DataNorm, type="ScaleFactors") pdf("check_exampleData_count-depth_evaluation3.pdf", height=5, width=7) countDeptEst.SCNORM <- plotCountDepth(Data = ExampleSimSCData1, NormalizedData = NormalizedData, Conditions = Conditions, FilterCellProportion = .1, NCores=3) dev.off() save.image("scnorm.RData")
#' Plot circles #' #' Plots any numbers of circles using base R graphics, starting by plotting the #' first circle on the bottom and going up. #' #' draw_circles uses \code{\link[graphics]{polygon}} to draw the circles, #' which means that they are technically just circle-shaped polygons. The #' function is vectorized and can handle any number of circles. #' #' @param x x Coordinates for the circles' centers #' @param y y coordinates for the circles' centers #' @param r Radiuses of the circles #' @param vertices The number of vertices used for drawing the circumference of #' each circle. #' @param polygon_args List of arguments to pass to #' \code{\link[graphics]{polygon}} to adjust borders, fill, shadings, etc. #' @param add If \code{FALSE} initializes a new plotting frame with #' \code{\link[graphics]{plot}}. #' @param asp Controls the aspect ratio of the plot. Will not produce circles #' if it ses to anything other than 1. #' @param \dots Arguments to pass to \code{\link[graphics]{plot}} #' @return Plots circles either in the current plotting window or on a new #' plotting window. #' @seealso \code{\link[graphics]{polygon}}, \code{\link[graphics]{plot}} #' @examples #' x <- runif(3) #' y <- runif(3) #' r <- runif(3, 0.2, 0.6) #' #' draw_circles(x, y, r, polygon_args = list(border = "transparent", #' col = c("goldenrod1", #' "steelblue1", #' "black"))) #' @export #' @import assertthat draw_circles <- function(x, y, r, polygon_args = list(), vertices = 500, add = FALSE, asp = 1, ...) { assert_that( is.numeric(x), is.numeric(y), is.numeric(r), is.count(vertices), length(vertices) == 1, is.flag(add), length(asp) == 1, is.numeric(asp), all(r >= 0), is.list(polygon_args), length(x) == length(y), length(r) == length(x), length(r) == length(y) ) x_coords <- double(0L) y_coords <- double(0L) g <- seq(0L, 2L * pi, length = vertices) for (i in seq_along(x)) { x_coords <- c(x_coords, r[i] * cos(g) + x[i], NA) y_coords <- c(y_coords, r[i] * sin(g) + y[i], NA) } if (!add) { graphics::plot(NULL, xlim = range(x_coords, na.rm = TRUE), ylim = range(y_coords, na.rm = TRUE), asp = asp, ...) } polygon_args[["x"]] <- x_coords polygon_args[["y"]] <- y_coords do.call(graphics::polygon, polygon_args) }
/R/plotting.R
permissive
jolars/euclidr
R
false
false
2,536
r
#' Plot circles #' #' Plots any numbers of circles using base R graphics, starting by plotting the #' first circle on the bottom and going up. #' #' draw_circles uses \code{\link[graphics]{polygon}} to draw the circles, #' which means that they are technically just circle-shaped polygons. The #' function is vectorized and can handle any number of circles. #' #' @param x x Coordinates for the circles' centers #' @param y y coordinates for the circles' centers #' @param r Radiuses of the circles #' @param vertices The number of vertices used for drawing the circumference of #' each circle. #' @param polygon_args List of arguments to pass to #' \code{\link[graphics]{polygon}} to adjust borders, fill, shadings, etc. #' @param add If \code{FALSE} initializes a new plotting frame with #' \code{\link[graphics]{plot}}. #' @param asp Controls the aspect ratio of the plot. Will not produce circles #' if it ses to anything other than 1. #' @param \dots Arguments to pass to \code{\link[graphics]{plot}} #' @return Plots circles either in the current plotting window or on a new #' plotting window. #' @seealso \code{\link[graphics]{polygon}}, \code{\link[graphics]{plot}} #' @examples #' x <- runif(3) #' y <- runif(3) #' r <- runif(3, 0.2, 0.6) #' #' draw_circles(x, y, r, polygon_args = list(border = "transparent", #' col = c("goldenrod1", #' "steelblue1", #' "black"))) #' @export #' @import assertthat draw_circles <- function(x, y, r, polygon_args = list(), vertices = 500, add = FALSE, asp = 1, ...) { assert_that( is.numeric(x), is.numeric(y), is.numeric(r), is.count(vertices), length(vertices) == 1, is.flag(add), length(asp) == 1, is.numeric(asp), all(r >= 0), is.list(polygon_args), length(x) == length(y), length(r) == length(x), length(r) == length(y) ) x_coords <- double(0L) y_coords <- double(0L) g <- seq(0L, 2L * pi, length = vertices) for (i in seq_along(x)) { x_coords <- c(x_coords, r[i] * cos(g) + x[i], NA) y_coords <- c(y_coords, r[i] * sin(g) + y[i], NA) } if (!add) { graphics::plot(NULL, xlim = range(x_coords, na.rm = TRUE), ylim = range(y_coords, na.rm = TRUE), asp = asp, ...) } polygon_args[["x"]] <- x_coords polygon_args[["y"]] <- y_coords do.call(graphics::polygon, polygon_args) }
with(a9491003e06314c47b265c63a98d610b9, {ROOT <- 'C:/tools/111719/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())});
/80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/awm4VyaTNkKLy.R
no_license
ayanmanna8/test
R
false
false
207
r
with(a9491003e06314c47b265c63a98d610b9, {ROOT <- 'C:/tools/111719/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())});
library(ashr) ### Name: ashci ### Title: Credible Interval Computation for the ash object ### Aliases: ashci ### ** Examples beta = c(rep(0,20),rnorm(20)) sebetahat = abs(rnorm(40,0,1)) betahat = rnorm(40,beta,sebetahat) beta.ash = ash(betahat, sebetahat) CImatrix=ashci(beta.ash,level=0.95) CImatrix1=ashci(beta.ash,level=0.95,betaindex=c(1,2,5)) CImatrix2=ashci(beta.ash,level=0.95,lfsr_threshold=0.1)
/data/genthat_extracted_code/ashr/examples/ashci.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
413
r
library(ashr) ### Name: ashci ### Title: Credible Interval Computation for the ash object ### Aliases: ashci ### ** Examples beta = c(rep(0,20),rnorm(20)) sebetahat = abs(rnorm(40,0,1)) betahat = rnorm(40,beta,sebetahat) beta.ash = ash(betahat, sebetahat) CImatrix=ashci(beta.ash,level=0.95) CImatrix1=ashci(beta.ash,level=0.95,betaindex=c(1,2,5)) CImatrix2=ashci(beta.ash,level=0.95,lfsr_threshold=0.1)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_wholenumber.R \name{is_wholenumber} \alias{is_wholenumber} \title{Logical to test if a number is a whole number (out to machine precision)} \usage{ is_wholenumber(x, tol = .Machine$double.eps^0.5) } \arguments{ \item{x}{a number} } \description{ Warning the result is not numeric anymore, so do not do any calculations } \examples{ is_wholenumber(7) is_wholenumber(7.00) is_wholenumber(7.001) is_wholenumber(7.000000001) }
/man/is_wholenumber.Rd
no_license
ckhead/HeadR
R
false
true
504
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_wholenumber.R \name{is_wholenumber} \alias{is_wholenumber} \title{Logical to test if a number is a whole number (out to machine precision)} \usage{ is_wholenumber(x, tol = .Machine$double.eps^0.5) } \arguments{ \item{x}{a number} } \description{ Warning the result is not numeric anymore, so do not do any calculations } \examples{ is_wholenumber(7) is_wholenumber(7.00) is_wholenumber(7.001) is_wholenumber(7.000000001) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helper-functions.R \name{check_alternate} \alias{check_alternate} \title{check if alternative edge exists} \usage{ check_alternate(sub_df, all_df) } \arguments{ \item{sub_df}{dataframe} \item{all_df}{dataframe} } \value{ Dataframe with assigned edges } \description{ check if alternative edge exists }
/man/check_alternate.Rd
no_license
kzintas/TreeSE
R
false
true
381
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helper-functions.R \name{check_alternate} \alias{check_alternate} \title{check if alternative edge exists} \usage{ check_alternate(sub_df, all_df) } \arguments{ \item{sub_df}{dataframe} \item{all_df}{dataframe} } \value{ Dataframe with assigned edges } \description{ check if alternative edge exists }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DBI.R \docType{methods} \name{dbDisconnect,DatabaseConnectorConnection-method} \alias{dbDisconnect,DatabaseConnectorConnection-method} \title{Disconnect (close) a connection} \usage{ \S4method{dbDisconnect}{DatabaseConnectorConnection}(conn) } \arguments{ \item{conn}{A \linkS4class{DBIConnection} object, as returned by \code{\link[=dbConnect]{dbConnect()}}.} } \value{ \code{dbDisconnect()} returns \code{TRUE}, invisibly. } \description{ This closes the connection, discards all pending work, and frees resources (e.g., memory, sockets). \Sexpr[results=rd,stage=render]{DBI:::methods_as_rd("dbDisconnect")} } \seealso{ Other DBIConnection generics: \code{\link{DBIConnection-class}}, \code{\link{dbAppendTable}}, \code{\link{dbCreateTable}}, \code{\link{dbDataType}}, \code{\link{dbExecute}}, \code{\link{dbExistsTable}}, \code{\link{dbGetException}}, \code{\link{dbGetInfo}}, \code{\link{dbGetQuery}}, \code{\link{dbIsReadOnly}}, \code{\link{dbIsValid}}, \code{\link{dbListFields}}, \code{\link{dbListObjects}}, \code{\link{dbListResults}}, \code{\link{dbListTables}}, \code{\link{dbReadTable}}, \code{\link{dbRemoveTable}}, \code{\link{dbSendQuery}}, \code{\link{dbSendStatement}}, \code{\link{dbWriteTable}} }
/man/dbDisconnect-DatabaseConnectorConnection-method.Rd
permissive
InfoClinika/DatabaseConnector
R
false
true
1,315
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DBI.R \docType{methods} \name{dbDisconnect,DatabaseConnectorConnection-method} \alias{dbDisconnect,DatabaseConnectorConnection-method} \title{Disconnect (close) a connection} \usage{ \S4method{dbDisconnect}{DatabaseConnectorConnection}(conn) } \arguments{ \item{conn}{A \linkS4class{DBIConnection} object, as returned by \code{\link[=dbConnect]{dbConnect()}}.} } \value{ \code{dbDisconnect()} returns \code{TRUE}, invisibly. } \description{ This closes the connection, discards all pending work, and frees resources (e.g., memory, sockets). \Sexpr[results=rd,stage=render]{DBI:::methods_as_rd("dbDisconnect")} } \seealso{ Other DBIConnection generics: \code{\link{DBIConnection-class}}, \code{\link{dbAppendTable}}, \code{\link{dbCreateTable}}, \code{\link{dbDataType}}, \code{\link{dbExecute}}, \code{\link{dbExistsTable}}, \code{\link{dbGetException}}, \code{\link{dbGetInfo}}, \code{\link{dbGetQuery}}, \code{\link{dbIsReadOnly}}, \code{\link{dbIsValid}}, \code{\link{dbListFields}}, \code{\link{dbListObjects}}, \code{\link{dbListResults}}, \code{\link{dbListTables}}, \code{\link{dbReadTable}}, \code{\link{dbRemoveTable}}, \code{\link{dbSendQuery}}, \code{\link{dbSendStatement}}, \code{\link{dbWriteTable}} }
#' Add Circulation Entry #' #' @template db #' #' @param op_type 1: Borrow and Return. 2: Write-off and other quantity changes #' #' @family circulation #' #' @export db_add_circulation <- function(db, user_id, subtype_id, quantity, op_type) { stopifnot(op_type %in% 1:2) entry <- tibble::tibble( user_id = user_id, subtype_id = subtype_id, quantity = quantity, time = as.character(Sys.time()), op_type = op_type ) DBI::dbAppendTable( db, "circulation", entry ) } #' Get Circulation Table #' #' @template db #' #' @family circulation #' #' @export db_get_circulation_table <- function(db) { DBI::dbGetQuery( db, " SELECT circulation.rowid AS rowid, circulation.user_id AS user_id, circulation.subtype_id AS subtype_id, circulation.quantity AS quantity, circulation.time AS time, circulation.op_type AS op_type, user.removed AS user_removed, user.name AS user_name, subtype.subtype_name AS subtype_name, subtype.removed AS subtype_removed, type.type_name AS type_name, type.removed AS type_removed, type.rowid AS type_id FROM circulation INNER JOIN user ON circulation.user_id = user.rowid INNER JOIN subtype ON circulation.subtype_id = subtype.rowid INNER JOIN type ON subtype.type_id = type.rowid " ) } #' Get Borrowed Quantity #' #' @template db #' #' @family circulation #' #' @export db_get_borrowed_quantity <- function(db, subtype_id) { borrowed <- DBI::dbGetQuery( db, "SELECT SUM(quantity) AS borrowed FROM circulation WHERE subtype_id = ? AND op_type = 1", params = list(subtype_id) )$borrowed ifelse(is.na(borrowed), 0, borrowed) } #' Get Available Quantity #' #' @template db #' #' @family circulation #' #' @export db_get_available_quantity <- function(db, subtype_id) { max_quantity <- db_get_subtype_max_quantity(db, subtype_id) borrowed_quantity <- db_get_borrowed_quantity(db, subtype_id) max_quantity - borrowed_quantity } #' Get Borrowed Quantity From User #' #' @template db #' #' @family circulation #' #' @export db_get_borrowed_quantity_by_user_id <- function(db, user_id, subtype_id) { borrowed <- DBI::dbGetQuery( db, "SELECT SUM(quantity) AS borrowed FROM circulation WHERE user_id = ? AND subtype_id = ? AND op_type = 1", params = list(user_id, subtype_id) )$borrowed ifelse(is.na(borrowed), 0, borrowed) } #' Get Total Borrowed Quantity From user #' #' @template db #' #' @family circulation #' #' @export db_get_total_borrowed_quantity_by_user_id <- function(db, user_id) { borrowed <- DBI::dbGetQuery( db, "SELECT SUM(quantity) AS borrowed FROM circulation WHERE user_id = ? AND op_type = 1", params = list(user_id) )$borrowed if (is.na(borrowed)) 0 else borrowed } #' Get Borrow Summary #' #' @template db #' #' @family circulation #' #' @export db_get_borrow_summary <- function(db) { DBI::dbGetQuery( db, "SELECT subtype_id, quantity, time FROM circulation WHERE op_type = 1" ) %>% borrow_summary(sym("subtype_id")) } #' Get Borrow Summary By User ID #' #' @template db #' #' @family circulation #' #' @export db_get_borrow_summary_by_user_id <- function(db, user_id) { DBI::dbGetQuery( db, "SELECT subtype_id, quantity, time FROM circulation WHERE user_id = ? AND op_type = 1", params = list(user_id) ) %>% borrow_summary(sym("subtype_id")) } #' Get Borrow Summary By Subtype ID #' #' @template db #' #' @family circulation #' #' @export db_get_borrow_summary_by_subtype_id <- function(db, subtype_id) { DBI::dbGetQuery( db, "SELECT user_id, quantity, time FROM circulation WHERE subtype_id = ? AND op_type = 1", params = list(subtype_id) ) %>% borrow_summary(sym("user_id")) } borrow_summary <- function(tbl, group_by) { tbl %>% dplyr::group_by(!!group_by) %>% dplyr::summarise( quantity = sum(quantity), time = suppressWarnings(max(time, na.rm = TRUE)), .groups = "drop" ) %>% dplyr::filter(quantity > 0) } #' Get Available Summary #' #' @template db #' #' @family circulation #' #' @export db_get_available_summary <- function(db, type_id) { DBI::dbWithTransaction( db, { subtype_ids <- db_get_subtypes_by_type_id(db, type_id) tibble::tibble( subtype_id = subtype_ids, quantity = db_get_available_quantity(db, subtype_ids), max_quantity = db_get_subtype_max_quantity(db, subtype_ids), critical_quantity = db_get_critical_quantity(db, subtype_ids) ) } ) }
/db/func/circulation.R
no_license
PFA-WebApp/App
R
false
false
4,619
r
#' Add Circulation Entry #' #' @template db #' #' @param op_type 1: Borrow and Return. 2: Write-off and other quantity changes #' #' @family circulation #' #' @export db_add_circulation <- function(db, user_id, subtype_id, quantity, op_type) { stopifnot(op_type %in% 1:2) entry <- tibble::tibble( user_id = user_id, subtype_id = subtype_id, quantity = quantity, time = as.character(Sys.time()), op_type = op_type ) DBI::dbAppendTable( db, "circulation", entry ) } #' Get Circulation Table #' #' @template db #' #' @family circulation #' #' @export db_get_circulation_table <- function(db) { DBI::dbGetQuery( db, " SELECT circulation.rowid AS rowid, circulation.user_id AS user_id, circulation.subtype_id AS subtype_id, circulation.quantity AS quantity, circulation.time AS time, circulation.op_type AS op_type, user.removed AS user_removed, user.name AS user_name, subtype.subtype_name AS subtype_name, subtype.removed AS subtype_removed, type.type_name AS type_name, type.removed AS type_removed, type.rowid AS type_id FROM circulation INNER JOIN user ON circulation.user_id = user.rowid INNER JOIN subtype ON circulation.subtype_id = subtype.rowid INNER JOIN type ON subtype.type_id = type.rowid " ) } #' Get Borrowed Quantity #' #' @template db #' #' @family circulation #' #' @export db_get_borrowed_quantity <- function(db, subtype_id) { borrowed <- DBI::dbGetQuery( db, "SELECT SUM(quantity) AS borrowed FROM circulation WHERE subtype_id = ? AND op_type = 1", params = list(subtype_id) )$borrowed ifelse(is.na(borrowed), 0, borrowed) } #' Get Available Quantity #' #' @template db #' #' @family circulation #' #' @export db_get_available_quantity <- function(db, subtype_id) { max_quantity <- db_get_subtype_max_quantity(db, subtype_id) borrowed_quantity <- db_get_borrowed_quantity(db, subtype_id) max_quantity - borrowed_quantity } #' Get Borrowed Quantity From User #' #' @template db #' #' @family circulation #' #' @export db_get_borrowed_quantity_by_user_id <- function(db, user_id, subtype_id) { borrowed <- DBI::dbGetQuery( db, "SELECT SUM(quantity) AS borrowed FROM circulation WHERE user_id = ? AND subtype_id = ? AND op_type = 1", params = list(user_id, subtype_id) )$borrowed ifelse(is.na(borrowed), 0, borrowed) } #' Get Total Borrowed Quantity From user #' #' @template db #' #' @family circulation #' #' @export db_get_total_borrowed_quantity_by_user_id <- function(db, user_id) { borrowed <- DBI::dbGetQuery( db, "SELECT SUM(quantity) AS borrowed FROM circulation WHERE user_id = ? AND op_type = 1", params = list(user_id) )$borrowed if (is.na(borrowed)) 0 else borrowed } #' Get Borrow Summary #' #' @template db #' #' @family circulation #' #' @export db_get_borrow_summary <- function(db) { DBI::dbGetQuery( db, "SELECT subtype_id, quantity, time FROM circulation WHERE op_type = 1" ) %>% borrow_summary(sym("subtype_id")) } #' Get Borrow Summary By User ID #' #' @template db #' #' @family circulation #' #' @export db_get_borrow_summary_by_user_id <- function(db, user_id) { DBI::dbGetQuery( db, "SELECT subtype_id, quantity, time FROM circulation WHERE user_id = ? AND op_type = 1", params = list(user_id) ) %>% borrow_summary(sym("subtype_id")) } #' Get Borrow Summary By Subtype ID #' #' @template db #' #' @family circulation #' #' @export db_get_borrow_summary_by_subtype_id <- function(db, subtype_id) { DBI::dbGetQuery( db, "SELECT user_id, quantity, time FROM circulation WHERE subtype_id = ? AND op_type = 1", params = list(subtype_id) ) %>% borrow_summary(sym("user_id")) } borrow_summary <- function(tbl, group_by) { tbl %>% dplyr::group_by(!!group_by) %>% dplyr::summarise( quantity = sum(quantity), time = suppressWarnings(max(time, na.rm = TRUE)), .groups = "drop" ) %>% dplyr::filter(quantity > 0) } #' Get Available Summary #' #' @template db #' #' @family circulation #' #' @export db_get_available_summary <- function(db, type_id) { DBI::dbWithTransaction( db, { subtype_ids <- db_get_subtypes_by_type_id(db, type_id) tibble::tibble( subtype_id = subtype_ids, quantity = db_get_available_quantity(db, subtype_ids), max_quantity = db_get_subtype_max_quantity(db, subtype_ids), critical_quantity = db_get_critical_quantity(db, subtype_ids) ) } ) }
\name{genesbymicroarrpagetbrucei} \alias{GenesByMicroArrPaGETbrucei} \title{Identify <i>T.brucei</i> genes based on fold change expression difference between a "comparison" lifecycle stage and a reference lifecycle.} \description{Find genes based on fold change expression difference between a "comparison" lifecycle stage and a reference lifecycle. <br><br>Whole genome comparison of RNA levels for both protein coding genes and structural RNAs in five different life cycle stages: in vivo slender bloodstream form, in vivo stumpy bloodstream form, cultured bloodstream form, log-phase procyclic culture form and stationary-phase procyclic culture form. RNA from three independent biological replicates from five different life cycle stages were hybridized to Nimblegen arrays (Madison,WI USA) that contained 8 probes per open reading frame and 3 probes per structural RNA spotted three times per array. <b>For this experiment, in the cases where the probe set mapped to near-identical genes, data was assigned to a single representative gene. (see Jensen et al. PMID: 19840382.) </b>} \arguments{ \item{life_cyc_brucei_ref}{Choose a life cycle stage} \item{life_cyc_brucei_cmp}{Choose a life cycle stage} \item{fold_change}{Enter a non-negative number. NOTE: Fold change is reported in the summary as positive numbers for up-regulated genes and negative numbers for down-regulated genes} \item{regulated_dir}{For ConditionA vs. ConditionB, select up-regulated for genes where ConditionA > ConditionB and select down-regulated for genes where ConditionB > ConditionA.} \item{confidence}{PaGE reports 1 - False Discovery Rate (FDR), which is called the "confidence" for the predictions. The FDR of a set of predictions is the expected percent of false predictions in the set of predictions. For example if the algorithm returns 100 genes with a false discovery rate of .3 (confidence=0.7) then we should expect 70 of them to be correct.} \item{protein_coding_only}{Should only protein coding genes be returned?} \item{o-fields}{Single valued attributes of the feature. Provide one or more values. Use comma as a delimter.} \item{o-tables}{Multi-valued attributes of the feature. Provide one or more values. Use comma as a delimter.} \item{.convert}{a logical value or a function that controls how the result of the method is returned. If this is a function, the character string or raw vector is passed to this function and it converts it appropriately. If this is a logical value and \code{TRUE}, then we attempt to convert the result based on its Content-Type returned by the Web server. If this is \code{FALSE}, the value from the Web server is returned as is.} \item{.url}{the URL for the Web request. This defaults to the correct value, but can be specified by the caller if the method is available at a different URL, e.g. locally or in a mirror server.} \item{.json}{a logical value controlling whether to use the JSON or the XML version of the method}} \value{text/xml text/plain} \author{}
/man/genesbymicroarrpagetbrucei.Rd
no_license
duncantl/REuPathDB
R
false
false
3,007
rd
\name{genesbymicroarrpagetbrucei} \alias{GenesByMicroArrPaGETbrucei} \title{Identify <i>T.brucei</i> genes based on fold change expression difference between a "comparison" lifecycle stage and a reference lifecycle.} \description{Find genes based on fold change expression difference between a "comparison" lifecycle stage and a reference lifecycle. <br><br>Whole genome comparison of RNA levels for both protein coding genes and structural RNAs in five different life cycle stages: in vivo slender bloodstream form, in vivo stumpy bloodstream form, cultured bloodstream form, log-phase procyclic culture form and stationary-phase procyclic culture form. RNA from three independent biological replicates from five different life cycle stages were hybridized to Nimblegen arrays (Madison,WI USA) that contained 8 probes per open reading frame and 3 probes per structural RNA spotted three times per array. <b>For this experiment, in the cases where the probe set mapped to near-identical genes, data was assigned to a single representative gene. (see Jensen et al. PMID: 19840382.) </b>} \arguments{ \item{life_cyc_brucei_ref}{Choose a life cycle stage} \item{life_cyc_brucei_cmp}{Choose a life cycle stage} \item{fold_change}{Enter a non-negative number. NOTE: Fold change is reported in the summary as positive numbers for up-regulated genes and negative numbers for down-regulated genes} \item{regulated_dir}{For ConditionA vs. ConditionB, select up-regulated for genes where ConditionA > ConditionB and select down-regulated for genes where ConditionB > ConditionA.} \item{confidence}{PaGE reports 1 - False Discovery Rate (FDR), which is called the "confidence" for the predictions. The FDR of a set of predictions is the expected percent of false predictions in the set of predictions. For example if the algorithm returns 100 genes with a false discovery rate of .3 (confidence=0.7) then we should expect 70 of them to be correct.} \item{protein_coding_only}{Should only protein coding genes be returned?} \item{o-fields}{Single valued attributes of the feature. Provide one or more values. Use comma as a delimter.} \item{o-tables}{Multi-valued attributes of the feature. Provide one or more values. Use comma as a delimter.} \item{.convert}{a logical value or a function that controls how the result of the method is returned. If this is a function, the character string or raw vector is passed to this function and it converts it appropriately. If this is a logical value and \code{TRUE}, then we attempt to convert the result based on its Content-Type returned by the Web server. If this is \code{FALSE}, the value from the Web server is returned as is.} \item{.url}{the URL for the Web request. This defaults to the correct value, but can be specified by the caller if the method is available at a different URL, e.g. locally or in a mirror server.} \item{.json}{a logical value controlling whether to use the JSON or the XML version of the method}} \value{text/xml text/plain} \author{}
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/getWordNetFiles.R \name{getWordNetFiles} \alias{getWordNetFiles} \title{Download WordNet database files} \usage{ getWordNetFiles(wordNetUrl = "http://wordnetcode.princeton.edu/wn3.1.dict.tar.gz", dest = getwd(), verbose = TRUE) } \arguments{ \item{wordNetUrl}{URL for the desired version of the WordNet database files. Defaults to version 3.1.} \item{dest}{Destination path for WordNet files.} \item{verbose}{Should the function provide progress details? Defaults to true.} } \description{ \code{getWordNetFiles.R} downloads and processes WordNet index and data files for later load into Neo4j }
/man/getWordNetFiles.Rd
permissive
jamcheung/neoWordNet
R
false
false
690
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/getWordNetFiles.R \name{getWordNetFiles} \alias{getWordNetFiles} \title{Download WordNet database files} \usage{ getWordNetFiles(wordNetUrl = "http://wordnetcode.princeton.edu/wn3.1.dict.tar.gz", dest = getwd(), verbose = TRUE) } \arguments{ \item{wordNetUrl}{URL for the desired version of the WordNet database files. Defaults to version 3.1.} \item{dest}{Destination path for WordNet files.} \item{verbose}{Should the function provide progress details? Defaults to true.} } \description{ \code{getWordNetFiles.R} downloads and processes WordNet index and data files for later load into Neo4j }
############################################################ ### COURSE PROJECT: Getting and Cleaning Data ### ### AUTHOR: Asish Mahapatra ### ### GITHUB: kluge-iitk ### ############################################################ ### Please refer to the README.md for instructions on how ### to utilize this script ### NOTE: This script can take upto 20 seconds to run. ### DEPENDENCIES: This script requires the dplyr package to run ## SETUP ## ################################################################################## # This section establishes variables for the paths to various files # # Main_URL is the path to the data set folder # # the other variables are the relative paths with respect to the data set folder # ################################################################################## Main_URL <- "./GettingCleaningData/UCI HAR Dataset" x_testurl <- "test/X_test.txt" y_testurl <- "test/y_test.txt" x_trainurl <- "train/X_train.txt" y_trainurl <- "train/y_train.txt" subtesturl <- "test/subject_test.txt" subtrainurl <- "train/subject_train.txt" featuresurl <- "features.txt" activityurl <- "activity_labels.txt" ########################################################################## ## STEP1: Merges the training and the test sets to create one data set. ## ########################################################################## x_test_data <- read.table(paste(Main_URL, x_testurl, sep = "/"), colClasses = "numeric") x_train_data <- read.table(paste(Main_URL, x_trainurl, sep = "/"), colClasses = "numeric") merged_data <- rbind(x_train_data, x_test_data) # read the feature names features <- read.table(paste(Main_URL, featuresurl, sep = "/")) ##################################################################################################### ## STEP 2: Extracts only the measurements on the mean and standard deviation for each measurement. ## ## Used a filter that accepts only feature names that have "mean()" or "std()" ## ##################################################################################################### acceptedvars <- grepl("mean\\(\\)|std\\(\\)", features[[2]]) merged_data <- merged_data[, acceptedvars] ## load the subjects data, merge the test and train sets and add to the merged data set sub_test_data <- read.table(paste(Main_URL, subtesturl, sep = "/"), colClasses = "integer") sub_train_data <- read.table(paste(Main_URL, subtrainurl, sep = "/"), colClasses = "integer") sub_data <- rbind(sub_train_data, sub_test_data) merged_data$subject <- sub_data$V1 ######################################################################################### ## STEP 3: Uses descriptive activity names to name the activities in the data set ## ## load the test and train data for activity, merge them and load the activity labels ## ######################################################################################### y_test_data <- read.table(paste(Main_URL, y_testurl, sep = "/"), colClasses = "integer") y_train_data <- read.table(paste(Main_URL, y_trainurl, sep = "/"), colClasses = "integer") activitylabel <- read.table(paste(Main_URL, activityurl, sep = "/")) activities <- rbind(y_train_data, y_test_data) ######################################################################################### ## define function to convert the numeric activity data to activity names ## ## add activity data to the merged data set and convert activity numeric data to names ## ######################################################################################### num_to_label_activity <- function(n){as.character(activitylabel[n,2])} merged_data$activity <- num_to_label_activity(activities$V1) ######################################################################################### ## STEP 4: Appropriately labels the data set with descriptive variable names. ## ## Selects appropriate feature names and assigns them to names(merged_data) ## ## Variable names are made syntactical by replacing '-' with '.' and removing '()' ## ## Also removed duplicate Body from BodyBody ## ######################################################################################### names(merged_data) <- c(as.character(features[[2]][acceptedvars]), "subject", "activity") names(merged_data) <- gsub("BodyBody", "Body", gsub("\\(\\)", "", gsub("-", ".", names(merged_data)))) library(dplyr) ######################################################################################### ## STEP 5: From the data set in step 4, creates a second, independent tidy data set ## ## with the average of each variable for each activity and each subject. ## ## group the merged data by activities and then by subject ## ## summarise the grouped data and calculate the means of the other variables ## ## grouped by activity and subject ## ######################################################################################### grouped_data <- group_by(merged_data, activity, subject) final_data <- summarise_each(grouped_data, funs(mean)) View(final_data)
/run_analysis.R
no_license
asishm/GettingCleaningDataProject
R
false
false
5,356
r
############################################################ ### COURSE PROJECT: Getting and Cleaning Data ### ### AUTHOR: Asish Mahapatra ### ### GITHUB: kluge-iitk ### ############################################################ ### Please refer to the README.md for instructions on how ### to utilize this script ### NOTE: This script can take upto 20 seconds to run. ### DEPENDENCIES: This script requires the dplyr package to run ## SETUP ## ################################################################################## # This section establishes variables for the paths to various files # # Main_URL is the path to the data set folder # # the other variables are the relative paths with respect to the data set folder # ################################################################################## Main_URL <- "./GettingCleaningData/UCI HAR Dataset" x_testurl <- "test/X_test.txt" y_testurl <- "test/y_test.txt" x_trainurl <- "train/X_train.txt" y_trainurl <- "train/y_train.txt" subtesturl <- "test/subject_test.txt" subtrainurl <- "train/subject_train.txt" featuresurl <- "features.txt" activityurl <- "activity_labels.txt" ########################################################################## ## STEP1: Merges the training and the test sets to create one data set. ## ########################################################################## x_test_data <- read.table(paste(Main_URL, x_testurl, sep = "/"), colClasses = "numeric") x_train_data <- read.table(paste(Main_URL, x_trainurl, sep = "/"), colClasses = "numeric") merged_data <- rbind(x_train_data, x_test_data) # read the feature names features <- read.table(paste(Main_URL, featuresurl, sep = "/")) ##################################################################################################### ## STEP 2: Extracts only the measurements on the mean and standard deviation for each measurement. ## ## Used a filter that accepts only feature names that have "mean()" or "std()" ## ##################################################################################################### acceptedvars <- grepl("mean\\(\\)|std\\(\\)", features[[2]]) merged_data <- merged_data[, acceptedvars] ## load the subjects data, merge the test and train sets and add to the merged data set sub_test_data <- read.table(paste(Main_URL, subtesturl, sep = "/"), colClasses = "integer") sub_train_data <- read.table(paste(Main_URL, subtrainurl, sep = "/"), colClasses = "integer") sub_data <- rbind(sub_train_data, sub_test_data) merged_data$subject <- sub_data$V1 ######################################################################################### ## STEP 3: Uses descriptive activity names to name the activities in the data set ## ## load the test and train data for activity, merge them and load the activity labels ## ######################################################################################### y_test_data <- read.table(paste(Main_URL, y_testurl, sep = "/"), colClasses = "integer") y_train_data <- read.table(paste(Main_URL, y_trainurl, sep = "/"), colClasses = "integer") activitylabel <- read.table(paste(Main_URL, activityurl, sep = "/")) activities <- rbind(y_train_data, y_test_data) ######################################################################################### ## define function to convert the numeric activity data to activity names ## ## add activity data to the merged data set and convert activity numeric data to names ## ######################################################################################### num_to_label_activity <- function(n){as.character(activitylabel[n,2])} merged_data$activity <- num_to_label_activity(activities$V1) ######################################################################################### ## STEP 4: Appropriately labels the data set with descriptive variable names. ## ## Selects appropriate feature names and assigns them to names(merged_data) ## ## Variable names are made syntactical by replacing '-' with '.' and removing '()' ## ## Also removed duplicate Body from BodyBody ## ######################################################################################### names(merged_data) <- c(as.character(features[[2]][acceptedvars]), "subject", "activity") names(merged_data) <- gsub("BodyBody", "Body", gsub("\\(\\)", "", gsub("-", ".", names(merged_data)))) library(dplyr) ######################################################################################### ## STEP 5: From the data set in step 4, creates a second, independent tidy data set ## ## with the average of each variable for each activity and each subject. ## ## group the merged data by activities and then by subject ## ## summarise the grouped data and calculate the means of the other variables ## ## grouped by activity and subject ## ######################################################################################### grouped_data <- group_by(merged_data, activity, subject) final_data <- summarise_each(grouped_data, funs(mean)) View(final_data)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/demo_station.R \docType{data} \name{demo_station} \alias{demo_station} \title{Example station} \format{ A \code{\link[base]{data.frame}} with 3 rows and 26 variables: } \usage{ demo_station } \description{ An example dataset for station GIS data. Data are in wide format, one row per station with multiple fields per row. } \examples{ data(demo_station) } \keyword{datasets}
/man/demo_station.Rd
no_license
SCCWRP/ASCI
R
false
true
453
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/demo_station.R \docType{data} \name{demo_station} \alias{demo_station} \title{Example station} \format{ A \code{\link[base]{data.frame}} with 3 rows and 26 variables: } \usage{ demo_station } \description{ An example dataset for station GIS data. Data are in wide format, one row per station with multiple fields per row. } \examples{ data(demo_station) } \keyword{datasets}
# Project 1 # YOUR NAME HERE ### He Ma 22348372 load(url("http://www.stat.berkeley.edu/users/nolan/data/weather2011.rda")) makePlotRegion = function(xlim, ylim, bgcolor, ylabels, margins, cityName, xtop = TRUE) { par(bg = bgcolor,mai = margins) plot(NULL,type="n", xlim=xlim, ylim=ylim, xaxt ="n", yaxt="n",xaxs="i", ylab = "", main = "") axis(side = 2, las =2, at = ylabels, labels = ylabels, tick = TRUE) if (xtop){ axis(side = 3, las =1, at = c(1,32,60,91,121,152,182,213,244,274,305,335)+15, labels = c("Jan", "Feb", "Mar", "Apr","May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), tick = FALSE, pos = 108,cex = 0.5) }else if (!xtop){ axis(side = 1, las =1, at = c(1,32,60,91,121,152,182,213,244,274,305,335)+15, labels = c("Jan", "Feb", "Mar", "Apr","May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), tick = FALSE,cex = 0.5) } } drawTempRegion = function(day, high, low, col){ rect(day-0.5, low, day+0.5, high, density =NA, col = col) } addGrid = function(location, col, ltype, vertical = TRUE) { if (vertical){ for (i in location){ abline(v = i, lty = ltype, col = col,lwd = 0.5) } }else if (!vertical){ for (i in location){ abline(h = i, lty = ltype, col = col,lwd = 0.5) } } } monthPrecip = function(day, dailyprecip, normal){ cumPrecip = cumsum(dailyprecip) points(day,cumPrecip, type = "l",col = "blue2",lwd =2) polygon(c(day[1],day,day[1]+length(day)-1),c(0,cumPrecip,0),col="grey50") points(day,rep(normal,length(day)),type = "l", col = "blue4",lwd =2) } finalPlot = function(temp, precip){ # The purpose of this function is to create the whole plot # Include here all of the set up that you need for # calling each of the above functions. # temp is the data frame sfoWeather or laxWeather # precip is the data frame sfoMonthlyPrecip or laxMonthlyPrecip # Here are some vectors that you might find handy monthNames = c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December") daysInMonth = c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) cumDays = cumsum(c(1, daysInMonth)) normPrecip = as.numeric(as.character(precip$normal)) ### Fill in the various stages with your code ### Add any additional variables that you will need here days = c(1:365) ### Set up the graphics device to plot to pdf and layout ### the two plots on one canvas pdf("/media/sunnymh/Work/Course_sp13/STAT133/HW1/proj1_pic.pdf", width =9 , height =6 ) layout(matrix(1:2, 2, 1, byrow = TRUE), height = c(2,1)) ### Call makePlotRegion to create the plotting region ### for the temperature plot makePlotRegion(c(0,365), c(20,110), "grey85",seq(20,110,10), c(0, 1.1, 0.6, 0.5), "San Francisco", xtop = TRUE) ### Call drawTempRegion 3 times to add the rectangles for ### the record, normal, and observed temps drawTempRegion(days,temp$RecordHigh,temp$RecordLow, "#E41A1C33") drawTempRegion(days,temp$NormalHigh,temp$NormalLow,"#377EB833") drawTempRegion(days,temp$High,temp$Low,"#FFFF3344") ### Call addGrid to add the grid lines to the plot addGrid(cumDays[2:12]-0.5,"grey30", 2, TRUE) addGrid(seq(20,110,10),"white", 2, FALSE) ### Add the markers for the record breaking days BreakHigh = temp$High>=temp$RecordHigh BreakLow = temp$Low<=temp$RecordLow points(days[days[BreakHigh]], temp$High[days[BreakHigh]],cex = 0.2, pch = 19, col = "gray15") for (i in days[BreakHigh][-2]){ text(days[i], temp$High[i], paste(temp$Month[i], ".",temp$Day[i]), cex = 0.6, pos = 4, offset = 0.2, col ="gray10") } text(days[36], temp$High[36], paste(temp$Month[36], ".",temp$Day[36]), cex = 0.6, pos = 3, offset = 0.2, col ="gray10") points(days[days[BreakLow]], temp$Low[days[BreakLow]],cex = 0.2, pch = 19, col = "gray15") for (i in days[BreakLow]){ text(days[i], temp$Low[i], paste(temp$Month[i], ".",temp$Day[i]), cex = 0.6, pos = 4, offset = 0.2, col ="gray10") } ### Add the titles legend( x = 200, y = 35, cex = 0.5, legend = c("record temperature", "record temperature", "daily temperature"), fill = c("#E41A1C33","#377EB833","#FFFF3344")) title(main ="San Francisco's weather in 2011", ylab = "Temperature, F") ### Call makePlotRegion to create the plotting region ### for the precipitation plot makePlotRegion(c(1,365), c(0,6), "grey85", 0:6, c(0.5, 1.1, 0, 0.6) , "San Francisco", xtop = FALSE) ### Call monthPrecip 12 times to create each months ### cumulative precipitation plot. To do this use sapply(1:12, function(m) { a = cumDays[m] b = cumDays[m]+daysInMonth[m]-1 monthPrecip(days[a:b],temp$Precip[a:b],normPrecip[m]) }) ### the anonymous function calls monthPrecip with the ### appropriate arguments ### Call addGrid to add the grid lines to the plot addGrid(cumDays[2:12]-0.5,"grey30", 2, TRUE) addGrid(1:6,"white", 2, FALSE) ### Add the titles title(ylab = "precipation") dev.off() } finalPlot(temp = sfoWeather, precip = sfoMonthlyPrecip)
/Plot/code.R
no_license
sunnymh/CodeSamples
R
false
false
5,318
r
# Project 1 # YOUR NAME HERE ### He Ma 22348372 load(url("http://www.stat.berkeley.edu/users/nolan/data/weather2011.rda")) makePlotRegion = function(xlim, ylim, bgcolor, ylabels, margins, cityName, xtop = TRUE) { par(bg = bgcolor,mai = margins) plot(NULL,type="n", xlim=xlim, ylim=ylim, xaxt ="n", yaxt="n",xaxs="i", ylab = "", main = "") axis(side = 2, las =2, at = ylabels, labels = ylabels, tick = TRUE) if (xtop){ axis(side = 3, las =1, at = c(1,32,60,91,121,152,182,213,244,274,305,335)+15, labels = c("Jan", "Feb", "Mar", "Apr","May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), tick = FALSE, pos = 108,cex = 0.5) }else if (!xtop){ axis(side = 1, las =1, at = c(1,32,60,91,121,152,182,213,244,274,305,335)+15, labels = c("Jan", "Feb", "Mar", "Apr","May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), tick = FALSE,cex = 0.5) } } drawTempRegion = function(day, high, low, col){ rect(day-0.5, low, day+0.5, high, density =NA, col = col) } addGrid = function(location, col, ltype, vertical = TRUE) { if (vertical){ for (i in location){ abline(v = i, lty = ltype, col = col,lwd = 0.5) } }else if (!vertical){ for (i in location){ abline(h = i, lty = ltype, col = col,lwd = 0.5) } } } monthPrecip = function(day, dailyprecip, normal){ cumPrecip = cumsum(dailyprecip) points(day,cumPrecip, type = "l",col = "blue2",lwd =2) polygon(c(day[1],day,day[1]+length(day)-1),c(0,cumPrecip,0),col="grey50") points(day,rep(normal,length(day)),type = "l", col = "blue4",lwd =2) } finalPlot = function(temp, precip){ # The purpose of this function is to create the whole plot # Include here all of the set up that you need for # calling each of the above functions. # temp is the data frame sfoWeather or laxWeather # precip is the data frame sfoMonthlyPrecip or laxMonthlyPrecip # Here are some vectors that you might find handy monthNames = c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December") daysInMonth = c(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) cumDays = cumsum(c(1, daysInMonth)) normPrecip = as.numeric(as.character(precip$normal)) ### Fill in the various stages with your code ### Add any additional variables that you will need here days = c(1:365) ### Set up the graphics device to plot to pdf and layout ### the two plots on one canvas pdf("/media/sunnymh/Work/Course_sp13/STAT133/HW1/proj1_pic.pdf", width =9 , height =6 ) layout(matrix(1:2, 2, 1, byrow = TRUE), height = c(2,1)) ### Call makePlotRegion to create the plotting region ### for the temperature plot makePlotRegion(c(0,365), c(20,110), "grey85",seq(20,110,10), c(0, 1.1, 0.6, 0.5), "San Francisco", xtop = TRUE) ### Call drawTempRegion 3 times to add the rectangles for ### the record, normal, and observed temps drawTempRegion(days,temp$RecordHigh,temp$RecordLow, "#E41A1C33") drawTempRegion(days,temp$NormalHigh,temp$NormalLow,"#377EB833") drawTempRegion(days,temp$High,temp$Low,"#FFFF3344") ### Call addGrid to add the grid lines to the plot addGrid(cumDays[2:12]-0.5,"grey30", 2, TRUE) addGrid(seq(20,110,10),"white", 2, FALSE) ### Add the markers for the record breaking days BreakHigh = temp$High>=temp$RecordHigh BreakLow = temp$Low<=temp$RecordLow points(days[days[BreakHigh]], temp$High[days[BreakHigh]],cex = 0.2, pch = 19, col = "gray15") for (i in days[BreakHigh][-2]){ text(days[i], temp$High[i], paste(temp$Month[i], ".",temp$Day[i]), cex = 0.6, pos = 4, offset = 0.2, col ="gray10") } text(days[36], temp$High[36], paste(temp$Month[36], ".",temp$Day[36]), cex = 0.6, pos = 3, offset = 0.2, col ="gray10") points(days[days[BreakLow]], temp$Low[days[BreakLow]],cex = 0.2, pch = 19, col = "gray15") for (i in days[BreakLow]){ text(days[i], temp$Low[i], paste(temp$Month[i], ".",temp$Day[i]), cex = 0.6, pos = 4, offset = 0.2, col ="gray10") } ### Add the titles legend( x = 200, y = 35, cex = 0.5, legend = c("record temperature", "record temperature", "daily temperature"), fill = c("#E41A1C33","#377EB833","#FFFF3344")) title(main ="San Francisco's weather in 2011", ylab = "Temperature, F") ### Call makePlotRegion to create the plotting region ### for the precipitation plot makePlotRegion(c(1,365), c(0,6), "grey85", 0:6, c(0.5, 1.1, 0, 0.6) , "San Francisco", xtop = FALSE) ### Call monthPrecip 12 times to create each months ### cumulative precipitation plot. To do this use sapply(1:12, function(m) { a = cumDays[m] b = cumDays[m]+daysInMonth[m]-1 monthPrecip(days[a:b],temp$Precip[a:b],normPrecip[m]) }) ### the anonymous function calls monthPrecip with the ### appropriate arguments ### Call addGrid to add the grid lines to the plot addGrid(cumDays[2:12]-0.5,"grey30", 2, TRUE) addGrid(1:6,"white", 2, FALSE) ### Add the titles title(ylab = "precipation") dev.off() } finalPlot(temp = sfoWeather, precip = sfoMonthlyPrecip)
# .calculateSensitivitiesStar <- # function (pSets = list(), exps=NULL, cap=NA, na.rm=TRUE, area.type=c("Fitted","Actual"), nthread=1) { # if (missing(area.type)) { # area.type <- "Fitted" # } # if (is.null(exps)) { # stop("expriments is empty!") # } # for (study in names(pSets)) { # pSets[[study]]@sensitivity$profiles$auc_recomputed_star <- NA # } # if (!is.na(cap)) { # trunc <- TRUE # }else{ # trunc <- FALSE # } # for(i in 1:nrow(exps)) { # ranges <- list() # for (study in names(pSets)) { # ranges[[study]] <- as.numeric(pSets[[study]]@sensitivity$raw[exps[i,study], ,"Dose"]) # } # ranges <- .getCommonConcentrationRange(ranges) # names(ranges) <- names(pSets) # for(study in names(pSets)) { # myx <- as.numeric(pSets[[study]]@sensitivity$raw[exps[i, study],,"Dose"]) %in% ranges[[study]] # pSets[[study]]@sensitivity$raw[exps[i,study],!myx, ] <- NA # } # } # cl <- makeCluster(nthread) # for(study in names(pSets)){ # auc_recomputed_star <- unlist(parSapply(cl=cl, rownames(pSets[[study]]@sensitivity$raw), function(experiment, exps, study, dataset, area.type){ # if(!experiment %in% exps[,study]){return(NA_real_)} # return(computeAUC(concentration=as.numeric(dataset[experiment,,1]), # viability=as.numeric(dataset[experiment,,2]), # trunc=trunc, conc_as_log=FALSE, viability_as_pct=TRUE, area.type=area.type)/100) # }, exps = exps, study = study, dataset = pSets[[study]]@sensitivity$raw, area.type=area.type)) # pSets[[study]]@sensitivity$profiles$auc_recomputed_star <- auc_recomputed_star # } # stopCluster(cl) # return(pSets) # } ## This function computes pars/AUC/SF2/D10 for the whole raw sensitivity data of a rset .calculateFromRaw <- function(raw.sensitivity, trunc=TRUE, nthread=1, family=c("normal", "Cauchy"), scale = 5, n = 1){ family <- match.arg(family) AUC <- vector(length=dim(raw.sensitivity)[1]) names(AUC) <- dimnames(raw.sensitivity)[[1]] SF2 <- vector(length=dim(raw.sensitivity)[1]) names(SF2) <- dimnames(raw.sensitivity)[[1]] D10 <- vector(length=dim(raw.sensitivity)[1]) names(D10) <- dimnames(raw.sensitivity)[[1]] #pars <- logLogisticRegression(raw.sensitivity[exp, , "Dose"], raw.sensitivity[exp, , "Viability"], conc_as_log=FALSE, viability_as_pct=TRUE, trunc=trunc) # if (!is.na(cap)) {trunc <- TRUE}else{trunc <- FALSE} if (nthread ==1){ pars <- lapply(names(AUC), function(exp, raw.sensitivity, family, scale, n) { if(length(grep("///", raw.sensitivity[exp, , "Dose"])) > 0 | all(is.na(raw.sensitivity[exp, , "Dose"]))) { NA } else { return(unlist(linearQuadraticModel(raw.sensitivity[exp, , "Dose"], raw.sensitivity[exp, , "Response"], trunc=trunc, family=family, scale=scale, median_n=n))) #computeAUC(concentration=raw.sensitivity[exp, , "Dose"], Hill_fit=Hill_fit, trunc=trunc, conc_as_log=FALSE, viability_as_pct=TRUE) } },raw.sensitivity=raw.sensitivity, family = family, scale = scale, n = n) names(pars) <- dimnames(raw.sensitivity)[[1]] AUC <- unlist(lapply(names(pars), function(exp,raw.sensitivity, pars) { if(any(is.na(pars[[exp]]))) { NA } else { computeAUC(D=raw.sensitivity[exp, , "Dose"], pars=pars[[exp]], trunc=trunc) } },raw.sensitivity=raw.sensitivity, pars=pars)) SF2 <- unlist(lapply(names(pars), function(exp, pars) { if(any(is.na(pars[[exp]]))) { NA } else { computeSF2(pars=pars[[exp]]) } }, pars=pars)) D10 <- unlist(lapply(names(pars), function(exp, pars) { if(any(is.na(pars[[exp]]))) { NA } else { computeD10(pars=pars[[exp]]) } }, pars=pars)) } else { pars <- parallel::mclapply(names(AUC), function(exp, raw.sensitivity, family, scale, n, trunc) { if(length(grep("///", raw.sensitivity[exp, , "Dose"])) > 0 | all(is.na(raw.sensitivity[exp, , "Dose"]))) { NA } else { linearQuadraticModel(raw.sensitivity[exp, , "Dose"], raw.sensitivity[exp, , "Response"], trunc=trunc, family=family, scale=scale, median_n=n) #computeAUC(concentration=raw.sensitivity[exp, , "Dose"], Hill_fit=Hill_fit, trunc=trunc, conc_as_log=FALSE, viability_as_pct=TRUE) } },raw.sensitivity=raw.sensitivity, family = family, scale = scale, n = n, trunc = trunc, mc.cores = nthread) names(pars) <- dimnames(raw.sensitivity)[[1]] AUC <- unlist(parallel::mclapply(names(pars), function(exp, raw.sensitivity, pars, trunc) { if(any(is.na(pars[[exp]]))) { NA } else { computeAUC(D=raw.sensitivity[exp, , "Dose"], pars=pars[[exp]], trunc=trunc) } },raw.sensitivity=raw.sensitivity, pars=pars, trunc = trunc, mc.cores = nthread)) SF2 <- unlist(parallel::mclapply(names(pars), function(exp, pars, trunc) { if(any(is.na(pars[[exp]]))) { NA } else { computeSF2(pars=pars[[exp]]) } }, pars=pars, trunc = trunc, mc.cores = nthread)) D10 <- unlist(parallel::mclapply(names(pars), function(exp, pars, trunc) { if(any(is.na(pars[[exp]]))) { NA } else { computeD10(pars=pars[[exp]]) } }, pars=pars, trunc = trunc, mc.cores = nthread)) } names(AUC) <- dimnames(raw.sensitivity)[[1]] names(SF2) <- dimnames(raw.sensitivity)[[1]] names(D10) <- dimnames(raw.sensitivity)[[1]] # pars <- unlist(pars) alpha <- sapply(pars, function(x) return(x[1])) beta <- sapply(pars, function(x) return(x[2])) return(list("AUC"=AUC, "SF2"=SF2, "D10"=D10 ,"alpha"=alpha, "beta"=beta)) } # ## This function computes intersected concentration range between a list of concentration ranges # .getCommonConcentrationRange <- function(doses) # { # min.dose <- 0 # max.dose <- 10^100 # for(i in 1:length(doses)) # { # min.dose <- max(min.dose, min(as.numeric(doses[[i]]), na.rm = TRUE), na.rm = TRUE) # max.dose <- min(max.dose, max(as.numeric(doses[[i]]), na.rm = TRUE), na.rm = TRUE) # } # common.ranges <- list() # for(i in 1:length(doses)) # { # common.ranges[[i]] <- doses[[i]][ # which.min(abs(as.numeric(doses[[i]])-min.dose)):max( # which(abs(as.numeric(doses[[i]]) - max.dose)==min(abs(as.numeric(doses[[i]]) - max.dose), na.rm=TRUE)))] # } # return(common.ranges) # } # ## predict viability from concentration data and curve parameters # .Hill<-function(x, pars) { # return(pars[2] + (1 - pars[2]) / (1 + (10 ^ x / 10 ^ pars[3]) ^ pars[1])) # } # ## calculate residual of fit # .residual<-function(x, y, n, pars, scale = 0.07, family = c("normal", "Cauchy"), trunc = FALSE) { # family <- match.arg(family) # Cauchy_flag = (family == "Cauchy") # if (Cauchy_flag == FALSE) { # # return(sum((.Hill(x, pars) - y) ^ 2)) # diffs <- .Hill(x, pars)-y # if (trunc == FALSE) { # return(sum(-log(.dmednnormals(diffs, n, scale)))) # } else { # down_truncated <- abs(y) >= 1 # up_truncated <- abs(y) <= 0 # # For up truncated, integrate the cauchy dist up until -diff because anything less gets truncated to 0, and thus the residual is -diff, and the prob # # function becomes discrete # # For down_truncated, 1-cdf(diffs) = cdf(-diffs) # return(sum(-log(.dmednnormals(diffs[!(down_truncated | up_truncated)], n, scale))) + sum(-log(.edmednnormals(-diffs[up_truncated | down_truncated], n, scale)))) # } # } else { # diffs <- .Hill(x, pars)-y # if (trunc == FALSE) { # # return(sum(-log(6 * scale / (pi * (scale ^ 2 + diffs ^ 2)) * (1 / 2 + 1 / pi * atan(diffs / scale)) * (1 / 2 - 1 / pi * atan(diffs / scale))))) # return(sum(-log(.dmedncauchys(diffs, n, scale)))) # } else { # down_truncated <- abs(y) >= 1 # up_truncated <- abs(y) <= 0 # # For up truncated, integrate the cauchy dist up until -diff because anything less gets truncated to 0, and thus the residual is -diff, and the prob # # function becomes discrete # # For down_truncated, 1-cdf(diffs) = cdf(-diffs) # return(sum(-log(.dmedncauchys(diffs[!(down_truncated | up_truncated)], n, scale))) + sum(-log(.edmedncauchys(-diffs[up_truncated | down_truncated], n, scale)))) # # return(sum(log(6 * scale / (pi * (scale ^ 2 + diffs ^ 2)) * (1 / 2 + 1 / pi * atan(diffs[setdiff(1:length(y), union(down_truncated, up_truncated))] / scale)) # # * (1 / 2 - 1 / pi * atan(diffs[setdiff(1:length(y), union(down_truncated, up_truncated))] / scale))), # # -log(1 / 2 - 3 / (2 * pi) * atan((1 - diffs[down_truncated] - y[down_truncated]) / scale) + 2 / pi ^ 3 * (atan((1 - diffs[down_truncated] - y[down_truncated]) / scale)) ^ 3), # # -log(-1 / 2 + 3 / (2 * pi) * atan((-diffs[up_truncated] - y[up_truncated]) / scale) - 2 / pi ^ 3 * (atan((- diffs[up_truncated] - y[up_truncated]) / scale)) ^ 3))) # } # } # } # ## generate an initial guess for dose-response curve parameters by evaluating the residuals at different lattice points of the search space # .meshEval<-function(log_conc, # viability, # lower_bounds = c(0, 0, -6), # upper_bounds = c(4, 1, 6), # density = c(2, 10, 2), # scale = 0.07, # n = 1, # family = c("normal", "Cauchy"), # trunc = FALSE) { # family <- match.arg(family) # guess <- c(pmin(pmax(1, lower_bounds[1]), upper_bounds[1]), # pmin(pmax(min(viability), lower_bounds[2]), upper_bounds[2]), # pmin(pmax(log_conc[which.min(abs(viability - 1/2))], lower_bounds[3]), upper_bounds[3])) # guess_residual<- .residual(log_conc, # viability, # pars = guess, # n=n, # scale = scale, # family = family, # trunc = trunc) # for (i in seq(from = lower_bounds[1], to = upper_bounds[1], by = 1 / density[1])) { # for (j in seq(from = lower_bounds[2], to = upper_bounds[2], by = 1 / density[2])) { # for (k in seq(from = lower_bounds[3], to = upper_bounds[3], by = 1 / density[3])) { # test_guess_residual <- .residual(log_conc, # viability, # pars = c(i, j, k), # n=n, # scale = scale, # family = family, # trunc = trunc) # if(!is.finite(test_guess_residual)){ # warning(paste0(" Test Guess Residual is: ", test_guess_residual, "\n Other Pars: log_conc: ", paste(log_conc, collapse=", "), "\n Viability: ", paste(viability, collapse=", "), "\n Scale: ", scale, "\n Family: ", family, "\n Trunc ", trunc, "\n HS: ", i, ", Einf: ", j, ", logEC50: ", k, "\n n: ", n)) # } # if(!length(test_guess_residual)){ # warning(paste0(" Test Guess Residual is: ", test_guess_residual, "\n Other Pars: log_conc: ", paste(log_conc, collapse=", "), "\n Viability: ", paste(viability, collapse=", "), "\n Scale: ", scale, "\n Family: ", family, "\n Trunc ", trunc, "\n HS: ", i, ", Einf: ", j, ", logEC50: ", k, "\n n: ", n)) # } # if (test_guess_residual < guess_residual) { # guess <- c(i, j, k) # guess_residual <- test_guess_residual # } # } # } # } # return(guess) # } # ## get vector of interpolated concentrations for graphing purposes # .GetSupportVec <- function(x, output_length = 1001) { # return(seq(from = min(x), to = max(x), length.out = output_length)) # } # ######## TODO ADD computationg from being passed in params # #' Fits dose-response curves to data given by the user # #' and returns the AUC of the fitted curve, normalized to the length of the concentration range. # #' # #' @param concentration [vector] is a vector of drug concentrations. # #' # #' @param viability [vector] is a vector whose entries are the viability values observed in the presence of the # #' drug concentrations whose logarithms are in the corresponding entries of the log_conc, expressed as percentages # #' of viability in the absence of any drug. # #' # #' @param trunc [logical], if true, causes viability data to be truncated to lie between 0 and 1 before # #' curve-fitting is performed. # .computeAUCUnderFittedCurve <- function(concentration, viability, trunc=TRUE, verbose=FALSE) { # # #CHECK THAT FUNCTION INPUTS ARE APPROPRIATE # # if (prod(is.finite(conc)) != 1) { # # print(conc) # # stop("Concentration vector contains elements which are not real numbers.") # # } # # if (prod(is.finite(viability)) != 1) { # # print(viability) # # stop("Viability vector contains elements which are not real numbers.") # # } # # if (is.logical(trunc) == FALSE) { # # print(trunc) # # stop("'trunc' is not a logical.") # # } # # if (length(conc) != length(viability)) { # # print(conc) # # print(viability) # # stop("Concentration vector is not of same length as viability vector.") # # } # # if (min(conc) < 0) { # # stop("Concentration vector contains negative data.") # # } # # if (min(viability) < 0 && verbose) { # # warning("Warning: Negative viability data.") # # } # # if (max(viability) > 100 && verbose) { # # warning("Warning: Viability data exceeds negative control.") # # } # # #CONVERT DOSE-RESPONSE DATA TO APPROPRIATE INTERNAL REPRESENTATION # # log_conc <- log10(conc) # # viability <- viability / 100 # # if (trunc == TRUE) { # # viability[which(viability < 0)] <- 0 # # viability[which(viability > 1)] <- 1 # # } # log_conc <- concentration # #FIT CURVE AND CALCULATE IC50 # pars <- unlist(logLogisticRegression(log_conc, # viability, # conc_as_log = TRUE, # viability_as_pct = FALSE, # trunc = trunc)) # x <- .GetSupportVec(log_conc) # return(1 - trapz(x, .Hill(x, pars)) / (log_conc[length(log_conc)] - log_conc[1])) # } # #This function is being used in computeSlope # .optimizeRegression <- function(x, y, x0 = -3, y0 = 100) # { # beta1 = (sum(x * y) - y0 * sum(x)) / (sum(x * x) - x0 * sum(x)) # return(beta1) # } updateMaxConc <- function(rSet){ rSet@sensitivity$info$max.conc <- apply(rSet@sensitivity$raw[,,"Dose"], 1, max, na.rm=TRUE) return(rSet) }
/R/computeSensitivity.R
no_license
cran/RadioGx
R
false
false
15,278
r
# .calculateSensitivitiesStar <- # function (pSets = list(), exps=NULL, cap=NA, na.rm=TRUE, area.type=c("Fitted","Actual"), nthread=1) { # if (missing(area.type)) { # area.type <- "Fitted" # } # if (is.null(exps)) { # stop("expriments is empty!") # } # for (study in names(pSets)) { # pSets[[study]]@sensitivity$profiles$auc_recomputed_star <- NA # } # if (!is.na(cap)) { # trunc <- TRUE # }else{ # trunc <- FALSE # } # for(i in 1:nrow(exps)) { # ranges <- list() # for (study in names(pSets)) { # ranges[[study]] <- as.numeric(pSets[[study]]@sensitivity$raw[exps[i,study], ,"Dose"]) # } # ranges <- .getCommonConcentrationRange(ranges) # names(ranges) <- names(pSets) # for(study in names(pSets)) { # myx <- as.numeric(pSets[[study]]@sensitivity$raw[exps[i, study],,"Dose"]) %in% ranges[[study]] # pSets[[study]]@sensitivity$raw[exps[i,study],!myx, ] <- NA # } # } # cl <- makeCluster(nthread) # for(study in names(pSets)){ # auc_recomputed_star <- unlist(parSapply(cl=cl, rownames(pSets[[study]]@sensitivity$raw), function(experiment, exps, study, dataset, area.type){ # if(!experiment %in% exps[,study]){return(NA_real_)} # return(computeAUC(concentration=as.numeric(dataset[experiment,,1]), # viability=as.numeric(dataset[experiment,,2]), # trunc=trunc, conc_as_log=FALSE, viability_as_pct=TRUE, area.type=area.type)/100) # }, exps = exps, study = study, dataset = pSets[[study]]@sensitivity$raw, area.type=area.type)) # pSets[[study]]@sensitivity$profiles$auc_recomputed_star <- auc_recomputed_star # } # stopCluster(cl) # return(pSets) # } ## This function computes pars/AUC/SF2/D10 for the whole raw sensitivity data of a rset .calculateFromRaw <- function(raw.sensitivity, trunc=TRUE, nthread=1, family=c("normal", "Cauchy"), scale = 5, n = 1){ family <- match.arg(family) AUC <- vector(length=dim(raw.sensitivity)[1]) names(AUC) <- dimnames(raw.sensitivity)[[1]] SF2 <- vector(length=dim(raw.sensitivity)[1]) names(SF2) <- dimnames(raw.sensitivity)[[1]] D10 <- vector(length=dim(raw.sensitivity)[1]) names(D10) <- dimnames(raw.sensitivity)[[1]] #pars <- logLogisticRegression(raw.sensitivity[exp, , "Dose"], raw.sensitivity[exp, , "Viability"], conc_as_log=FALSE, viability_as_pct=TRUE, trunc=trunc) # if (!is.na(cap)) {trunc <- TRUE}else{trunc <- FALSE} if (nthread ==1){ pars <- lapply(names(AUC), function(exp, raw.sensitivity, family, scale, n) { if(length(grep("///", raw.sensitivity[exp, , "Dose"])) > 0 | all(is.na(raw.sensitivity[exp, , "Dose"]))) { NA } else { return(unlist(linearQuadraticModel(raw.sensitivity[exp, , "Dose"], raw.sensitivity[exp, , "Response"], trunc=trunc, family=family, scale=scale, median_n=n))) #computeAUC(concentration=raw.sensitivity[exp, , "Dose"], Hill_fit=Hill_fit, trunc=trunc, conc_as_log=FALSE, viability_as_pct=TRUE) } },raw.sensitivity=raw.sensitivity, family = family, scale = scale, n = n) names(pars) <- dimnames(raw.sensitivity)[[1]] AUC <- unlist(lapply(names(pars), function(exp,raw.sensitivity, pars) { if(any(is.na(pars[[exp]]))) { NA } else { computeAUC(D=raw.sensitivity[exp, , "Dose"], pars=pars[[exp]], trunc=trunc) } },raw.sensitivity=raw.sensitivity, pars=pars)) SF2 <- unlist(lapply(names(pars), function(exp, pars) { if(any(is.na(pars[[exp]]))) { NA } else { computeSF2(pars=pars[[exp]]) } }, pars=pars)) D10 <- unlist(lapply(names(pars), function(exp, pars) { if(any(is.na(pars[[exp]]))) { NA } else { computeD10(pars=pars[[exp]]) } }, pars=pars)) } else { pars <- parallel::mclapply(names(AUC), function(exp, raw.sensitivity, family, scale, n, trunc) { if(length(grep("///", raw.sensitivity[exp, , "Dose"])) > 0 | all(is.na(raw.sensitivity[exp, , "Dose"]))) { NA } else { linearQuadraticModel(raw.sensitivity[exp, , "Dose"], raw.sensitivity[exp, , "Response"], trunc=trunc, family=family, scale=scale, median_n=n) #computeAUC(concentration=raw.sensitivity[exp, , "Dose"], Hill_fit=Hill_fit, trunc=trunc, conc_as_log=FALSE, viability_as_pct=TRUE) } },raw.sensitivity=raw.sensitivity, family = family, scale = scale, n = n, trunc = trunc, mc.cores = nthread) names(pars) <- dimnames(raw.sensitivity)[[1]] AUC <- unlist(parallel::mclapply(names(pars), function(exp, raw.sensitivity, pars, trunc) { if(any(is.na(pars[[exp]]))) { NA } else { computeAUC(D=raw.sensitivity[exp, , "Dose"], pars=pars[[exp]], trunc=trunc) } },raw.sensitivity=raw.sensitivity, pars=pars, trunc = trunc, mc.cores = nthread)) SF2 <- unlist(parallel::mclapply(names(pars), function(exp, pars, trunc) { if(any(is.na(pars[[exp]]))) { NA } else { computeSF2(pars=pars[[exp]]) } }, pars=pars, trunc = trunc, mc.cores = nthread)) D10 <- unlist(parallel::mclapply(names(pars), function(exp, pars, trunc) { if(any(is.na(pars[[exp]]))) { NA } else { computeD10(pars=pars[[exp]]) } }, pars=pars, trunc = trunc, mc.cores = nthread)) } names(AUC) <- dimnames(raw.sensitivity)[[1]] names(SF2) <- dimnames(raw.sensitivity)[[1]] names(D10) <- dimnames(raw.sensitivity)[[1]] # pars <- unlist(pars) alpha <- sapply(pars, function(x) return(x[1])) beta <- sapply(pars, function(x) return(x[2])) return(list("AUC"=AUC, "SF2"=SF2, "D10"=D10 ,"alpha"=alpha, "beta"=beta)) } # ## This function computes intersected concentration range between a list of concentration ranges # .getCommonConcentrationRange <- function(doses) # { # min.dose <- 0 # max.dose <- 10^100 # for(i in 1:length(doses)) # { # min.dose <- max(min.dose, min(as.numeric(doses[[i]]), na.rm = TRUE), na.rm = TRUE) # max.dose <- min(max.dose, max(as.numeric(doses[[i]]), na.rm = TRUE), na.rm = TRUE) # } # common.ranges <- list() # for(i in 1:length(doses)) # { # common.ranges[[i]] <- doses[[i]][ # which.min(abs(as.numeric(doses[[i]])-min.dose)):max( # which(abs(as.numeric(doses[[i]]) - max.dose)==min(abs(as.numeric(doses[[i]]) - max.dose), na.rm=TRUE)))] # } # return(common.ranges) # } # ## predict viability from concentration data and curve parameters # .Hill<-function(x, pars) { # return(pars[2] + (1 - pars[2]) / (1 + (10 ^ x / 10 ^ pars[3]) ^ pars[1])) # } # ## calculate residual of fit # .residual<-function(x, y, n, pars, scale = 0.07, family = c("normal", "Cauchy"), trunc = FALSE) { # family <- match.arg(family) # Cauchy_flag = (family == "Cauchy") # if (Cauchy_flag == FALSE) { # # return(sum((.Hill(x, pars) - y) ^ 2)) # diffs <- .Hill(x, pars)-y # if (trunc == FALSE) { # return(sum(-log(.dmednnormals(diffs, n, scale)))) # } else { # down_truncated <- abs(y) >= 1 # up_truncated <- abs(y) <= 0 # # For up truncated, integrate the cauchy dist up until -diff because anything less gets truncated to 0, and thus the residual is -diff, and the prob # # function becomes discrete # # For down_truncated, 1-cdf(diffs) = cdf(-diffs) # return(sum(-log(.dmednnormals(diffs[!(down_truncated | up_truncated)], n, scale))) + sum(-log(.edmednnormals(-diffs[up_truncated | down_truncated], n, scale)))) # } # } else { # diffs <- .Hill(x, pars)-y # if (trunc == FALSE) { # # return(sum(-log(6 * scale / (pi * (scale ^ 2 + diffs ^ 2)) * (1 / 2 + 1 / pi * atan(diffs / scale)) * (1 / 2 - 1 / pi * atan(diffs / scale))))) # return(sum(-log(.dmedncauchys(diffs, n, scale)))) # } else { # down_truncated <- abs(y) >= 1 # up_truncated <- abs(y) <= 0 # # For up truncated, integrate the cauchy dist up until -diff because anything less gets truncated to 0, and thus the residual is -diff, and the prob # # function becomes discrete # # For down_truncated, 1-cdf(diffs) = cdf(-diffs) # return(sum(-log(.dmedncauchys(diffs[!(down_truncated | up_truncated)], n, scale))) + sum(-log(.edmedncauchys(-diffs[up_truncated | down_truncated], n, scale)))) # # return(sum(log(6 * scale / (pi * (scale ^ 2 + diffs ^ 2)) * (1 / 2 + 1 / pi * atan(diffs[setdiff(1:length(y), union(down_truncated, up_truncated))] / scale)) # # * (1 / 2 - 1 / pi * atan(diffs[setdiff(1:length(y), union(down_truncated, up_truncated))] / scale))), # # -log(1 / 2 - 3 / (2 * pi) * atan((1 - diffs[down_truncated] - y[down_truncated]) / scale) + 2 / pi ^ 3 * (atan((1 - diffs[down_truncated] - y[down_truncated]) / scale)) ^ 3), # # -log(-1 / 2 + 3 / (2 * pi) * atan((-diffs[up_truncated] - y[up_truncated]) / scale) - 2 / pi ^ 3 * (atan((- diffs[up_truncated] - y[up_truncated]) / scale)) ^ 3))) # } # } # } # ## generate an initial guess for dose-response curve parameters by evaluating the residuals at different lattice points of the search space # .meshEval<-function(log_conc, # viability, # lower_bounds = c(0, 0, -6), # upper_bounds = c(4, 1, 6), # density = c(2, 10, 2), # scale = 0.07, # n = 1, # family = c("normal", "Cauchy"), # trunc = FALSE) { # family <- match.arg(family) # guess <- c(pmin(pmax(1, lower_bounds[1]), upper_bounds[1]), # pmin(pmax(min(viability), lower_bounds[2]), upper_bounds[2]), # pmin(pmax(log_conc[which.min(abs(viability - 1/2))], lower_bounds[3]), upper_bounds[3])) # guess_residual<- .residual(log_conc, # viability, # pars = guess, # n=n, # scale = scale, # family = family, # trunc = trunc) # for (i in seq(from = lower_bounds[1], to = upper_bounds[1], by = 1 / density[1])) { # for (j in seq(from = lower_bounds[2], to = upper_bounds[2], by = 1 / density[2])) { # for (k in seq(from = lower_bounds[3], to = upper_bounds[3], by = 1 / density[3])) { # test_guess_residual <- .residual(log_conc, # viability, # pars = c(i, j, k), # n=n, # scale = scale, # family = family, # trunc = trunc) # if(!is.finite(test_guess_residual)){ # warning(paste0(" Test Guess Residual is: ", test_guess_residual, "\n Other Pars: log_conc: ", paste(log_conc, collapse=", "), "\n Viability: ", paste(viability, collapse=", "), "\n Scale: ", scale, "\n Family: ", family, "\n Trunc ", trunc, "\n HS: ", i, ", Einf: ", j, ", logEC50: ", k, "\n n: ", n)) # } # if(!length(test_guess_residual)){ # warning(paste0(" Test Guess Residual is: ", test_guess_residual, "\n Other Pars: log_conc: ", paste(log_conc, collapse=", "), "\n Viability: ", paste(viability, collapse=", "), "\n Scale: ", scale, "\n Family: ", family, "\n Trunc ", trunc, "\n HS: ", i, ", Einf: ", j, ", logEC50: ", k, "\n n: ", n)) # } # if (test_guess_residual < guess_residual) { # guess <- c(i, j, k) # guess_residual <- test_guess_residual # } # } # } # } # return(guess) # } # ## get vector of interpolated concentrations for graphing purposes # .GetSupportVec <- function(x, output_length = 1001) { # return(seq(from = min(x), to = max(x), length.out = output_length)) # } # ######## TODO ADD computationg from being passed in params # #' Fits dose-response curves to data given by the user # #' and returns the AUC of the fitted curve, normalized to the length of the concentration range. # #' # #' @param concentration [vector] is a vector of drug concentrations. # #' # #' @param viability [vector] is a vector whose entries are the viability values observed in the presence of the # #' drug concentrations whose logarithms are in the corresponding entries of the log_conc, expressed as percentages # #' of viability in the absence of any drug. # #' # #' @param trunc [logical], if true, causes viability data to be truncated to lie between 0 and 1 before # #' curve-fitting is performed. # .computeAUCUnderFittedCurve <- function(concentration, viability, trunc=TRUE, verbose=FALSE) { # # #CHECK THAT FUNCTION INPUTS ARE APPROPRIATE # # if (prod(is.finite(conc)) != 1) { # # print(conc) # # stop("Concentration vector contains elements which are not real numbers.") # # } # # if (prod(is.finite(viability)) != 1) { # # print(viability) # # stop("Viability vector contains elements which are not real numbers.") # # } # # if (is.logical(trunc) == FALSE) { # # print(trunc) # # stop("'trunc' is not a logical.") # # } # # if (length(conc) != length(viability)) { # # print(conc) # # print(viability) # # stop("Concentration vector is not of same length as viability vector.") # # } # # if (min(conc) < 0) { # # stop("Concentration vector contains negative data.") # # } # # if (min(viability) < 0 && verbose) { # # warning("Warning: Negative viability data.") # # } # # if (max(viability) > 100 && verbose) { # # warning("Warning: Viability data exceeds negative control.") # # } # # #CONVERT DOSE-RESPONSE DATA TO APPROPRIATE INTERNAL REPRESENTATION # # log_conc <- log10(conc) # # viability <- viability / 100 # # if (trunc == TRUE) { # # viability[which(viability < 0)] <- 0 # # viability[which(viability > 1)] <- 1 # # } # log_conc <- concentration # #FIT CURVE AND CALCULATE IC50 # pars <- unlist(logLogisticRegression(log_conc, # viability, # conc_as_log = TRUE, # viability_as_pct = FALSE, # trunc = trunc)) # x <- .GetSupportVec(log_conc) # return(1 - trapz(x, .Hill(x, pars)) / (log_conc[length(log_conc)] - log_conc[1])) # } # #This function is being used in computeSlope # .optimizeRegression <- function(x, y, x0 = -3, y0 = 100) # { # beta1 = (sum(x * y) - y0 * sum(x)) / (sum(x * x) - x0 * sum(x)) # return(beta1) # } updateMaxConc <- function(rSet){ rSet@sensitivity$info$max.conc <- apply(rSet@sensitivity$raw[,,"Dose"], 1, max, na.rm=TRUE) return(rSet) }
testlist <- list(iK = 436207616L) result <- do.call(eDMA:::PowerSet,testlist) str(result)
/eDMA/inst/testfiles/PowerSet/AFL_PowerSet/PowerSet_valgrind_files/1609869910-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
89
r
testlist <- list(iK = 436207616L) result <- do.call(eDMA:::PowerSet,testlist) str(result)
Ldef <- list( mergeForwardReverse = FALSE, minOverlap = 0, maxOverlap = 0, maxFracMismatchOverlap = 0, greedyOverlap = TRUE, revComplForward = FALSE, revComplReverse = FALSE, elementsForward = "SUCV", elementsReverse = "SUCV", elementLengthsForward = c(1, 10, 18, 96), elementLengthsReverse = c(1, 8, 20, 96), adapterForward = "GGAAGAGCACACGTC", adapterReverse = "GGAAGAGCGTCGTGT", primerForward = "", primerReverse = "", wildTypeForward = "ACTGATACACTCCAAGCGGAGACAGACCAACTAGAAGATGAGAAGTCTGCTTTGCAGACCGAGATTGCCAACCTGCTGAAGGAGAAGGAAAAACTA", wildTypeReverse = "ATCGCCCGGCTGGAGGAAAAAGTGAAAACCTTGAAAGCTCAGAACTCGGAGCTGGCGTCCACGGCCAACATGCTCAGGGAACAGGTGGCACAGCTT", constantForward = "AACCGGAGGAGGGAGCTG", constantReverse = "GAAAAAGGAAGCTGGAGAGA", avePhredMinForward = 20.0, avePhredMinReverse = 20.0, variableNMaxForward = 0, variableNMaxReverse = 0, umiNMax = 0, nbrMutatedCodonsMaxForward = 1, nbrMutatedCodonsMaxReverse = 1, nbrMutatedBasesMaxForward = -1, nbrMutatedBasesMaxReverse = -1, forbiddenMutatedCodonsForward = "NNW", forbiddenMutatedCodonsReverse = "NNW", useTreeWTmatch = FALSE, collapseToWTForward = FALSE, collapseToWTReverse = FALSE, mutatedPhredMinForward = 0.0, mutatedPhredMinReverse = 0.0, mutNameDelimiter = ".", constantMaxDistForward = -1, constantMaxDistReverse = -1, variableCollapseMaxDist = 0, variableCollapseMinReads = 0, variableCollapseMinRatio = 0, umiCollapseMaxDist = 0, maxNReads = -1, verbose = FALSE ) Ldef1 <- c( list(fastqForward = system.file("extdata/transInput_1.fastq.gz", package = "mutscan"), fastqReverse = system.file("extdata/transInput_2.fastq.gz", package = "mutscan") ), Ldef) Ldef2 <- c( list(fastqForward = system.file("extdata/transOutput_1.fastq.gz", package = "mutscan"), fastqReverse = system.file("extdata/transOutput_2.fastq.gz", package = "mutscan") ), Ldef) out1 <- do.call(digestFastqs, Ldef1) out2 <- do.call(digestFastqs, Ldef2) coldata <- data.frame(Name = c("sample1", "sample2"), Condition = c("input", "output"), Replicate = c(1, 1), OD = c(0.05, 1.5), stringsAsFactors = FALSE) test_that("summarizeExperiment fails with incorrect arguments", { ## x must be a named list, with names matching coldata$Name expect_error(summarizeExperiment(x = 1, coldata = coldata)) expect_error(summarizeExperiment(x = list(out1), coldata = coldata)) expect_error(summarizeExperiment(x = out1, coldata = coldata)) expect_error(summarizeExperiment(x = list(s1 = out1, s1 = out2), coldata = coldata)) ## coldata must be a data.frame with a column Name expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = 1)) expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata[, c("Condition", "OD")])) expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = as.list(coldata))) ## countType must be reads or umis expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = 1)) expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "umi")) expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = c("reads", "umis"))) ## names must match expect_error(summarizeExperiment(x = list(s1 = out1, s2 = out2), coldata = coldata, countType = "umis")) expect_warning(summarizeExperiment(x = list(sample1 = out1, sample2 = out2, sample3 = out1), coldata = coldata, countType = "umis")) ## samples must have the same mutNameDelimiter tmpout2 <- out2 tmpout2$parameters$mutNameDelimiter <- ":" expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = tmpout2), coldata = coldata, countType = "reads")) }) test_that("summarizeExperiment works as expected with reads output", { se <- summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "reads") expect_equal(nrow(se), length(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(ncol(se), 2) expect_equal(sort(rownames(se)), sort(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(out1$filterSummary$nbrRetained, out2$filterSummary$nbrRetained), ignore_attr = TRUE) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(sum(out1$summaryTable$nbrReads), sum(out2$summaryTable$nbrReads)), ignore_attr = TRUE) for (cn in colnames(out1$filterSummary)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], out1$filterSummary[, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], out2$filterSummary[, cn]) } for (cn in colnames(coldata)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], coldata[1, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], coldata[2, cn]) } expect_equal(S4Vectors::metadata(se)$parameters[["sample1"]], out1$parameters) expect_equal(S4Vectors::metadata(se)$parameters[["sample2"]], out2$parameters) expect_true(all(c("mutantName", "sequence", "nbrMutBases", "minNbrMutBases", "maxNbrMutBases", "nbrMutCodons", "minNbrMutCodons", "maxNbrMutCodons", "nbrMutAAs", "minNbrMutAAs", "maxNbrMutAAs", "sequenceAA", "mutantNameAA", "mutationTypes", "varLengths") %in% colnames(SummarizedExperiment::rowData(se)))) ## Check that the number of mutated codons are correct (=equal to the number of ## entries in the mutant name that don't contain WT) expect_equal(sapply(strsplit(SummarizedExperiment::rowData(se)$mutantName, "_"), function(w) length(w[!grepl("WT", w)])), SummarizedExperiment::rowData(se)$minNbrMutCodons, ignore_attr = TRUE) expect_equal(sapply(strsplit(SummarizedExperiment::rowData(se)$mutantName, "_"), function(w) length(w[!grepl("WT", w)])), SummarizedExperiment::rowData(se)$maxNbrMutCodons, ignore_attr = TRUE) ## Check that the number of mutated bases is not larger than 3x the number of mutated codons expect_true(all(SummarizedExperiment::rowData(se)$maxNbrMutBases <= 3 * SummarizedExperiment::rowData(se)$maxNbrMutCodons)) expect_true(all(SummarizedExperiment::rowData(se)$maxNbrMutAAs <= SummarizedExperiment::rowData(se)$maxNbrMutCodons)) ## variable lengths expect_equal(SummarizedExperiment::rowData(se)$varLengths, rep("96_96", nrow(se)), ignore_attr = TRUE) ## All variants with no mutated AAs must have a WT in the name expect_true(all(grepl("WT", SummarizedExperiment::rowData(se)$mutantNameAA[SummarizedExperiment::rowData(se)$maxNbrMutAAs == 0]))) ## No mutation for the complete WT expect_equal(SummarizedExperiment::rowData(se)["f.0.WT_r.0.WT", ]$mutationTypes, "") ## Mutation types for variants with no mutated AAs, but mutated bases, should be silent expect_true(all(SummarizedExperiment::rowData(se)$mutationTypes[SummarizedExperiment::rowData(se)$maxNbrMutAAs == 0 & SummarizedExperiment::rowData(se)$maxNbrMutBases > 0] == "silent")) ## check translation expect_equal(SummarizedExperiment::rowData(se)$sequenceAA[3], mutscan:::translateString(SummarizedExperiment::rowData(se)$sequence[3])) ## Spot checks expect_equal(SummarizedExperiment::rowData(se)$minNbrMutBases[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], 3) ## WT: GCT expect_equal(SummarizedExperiment::rowData(se)$minNbrMutCodons[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], 1) ## WT: GCT expect_equal(SummarizedExperiment::rowData(se)$minNbrMutAAs[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], 1) ## WT: A expect_equal(SummarizedExperiment::rowData(se)$mutantNameAA[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], "f.0.WT_r.13.L") ## WT: A expect_equal(SummarizedExperiment::rowData(se)$mutationTypes[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], "nonsynonymous") ## WT: A expect_equal(SummarizedExperiment::rowData(se)$minNbrMutBases[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], 1) ## WT: GCT expect_equal(SummarizedExperiment::rowData(se)$minNbrMutCodons[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], 1) ## WT: GCT expect_equal(SummarizedExperiment::rowData(se)$minNbrMutAAs[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], 0) ## WT: A expect_equal(SummarizedExperiment::rowData(se)$mutantNameAA[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], "f.0.WT_r.0.WT") ## WT: A expect_equal(SummarizedExperiment::rowData(se)$mutationTypes[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], "silent") ## WT: A }) test_that("summarizeExperiment works as expected with umis output", { se <- summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "umis") expect_equal(nrow(se), length(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(ncol(se), 2) expect_equal(sort(rownames(se)), sort(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(sum(out1$summaryTable$nbrUmis), sum(out2$summaryTable$nbrUmis)), ignore_attr = TRUE) for (cn in colnames(out1$filterSummary)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], out1$filterSummary[, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], out2$filterSummary[, cn]) } for (cn in colnames(coldata)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], coldata[1, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], coldata[2, cn]) } expect_equal(S4Vectors::metadata(se)$parameters[["sample1"]], out1$parameters) expect_equal(S4Vectors::metadata(se)$parameters[["sample2"]], out2$parameters) }) test_that("summarizeExperiment orders samples equally in count matrix/colData", { se1 <- summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "umis") se2 <- summarizeExperiment(x = list(sample2 = out2, sample1 = out1), coldata = coldata, countType = "umis") m1 <- se1[, match(coldata$Name, colnames(se1))] m2 <- se2[rownames(m1), match(coldata$Name, colnames(se2))] expect_equal(SummarizedExperiment::colData(m1), SummarizedExperiment::colData(m2)) expect_equal(SummarizedExperiment::assay(m1, "counts"), SummarizedExperiment::assay(m2, "counts")) }) test_that("summarizeExperiment recognizes the presence of UMI counts correctly", { L1 <- Ldef1; L1$elementsForward <- "SSCV"; L1$elementsReverse <- "SSCV" L2 <- Ldef2; L2$elementsForward <- "SSCV"; L2$elementsReverse <- "SSCV" outl1 <- do.call(digestFastqs, L1) outl2 <- do.call(digestFastqs, L2) expect_error(summarizeExperiment(x = list(sample1 = outl1, sample2 = outl2), coldata = coldata, countType = "umis")) }) test_that("summarizeExperiment works as expected when collapsing to WT", { Ldef1$collapseToWTForward <- TRUE Ldef2$collapseToWTForward <- TRUE out1 <- do.call(digestFastqs, Ldef1) out2 <- do.call(digestFastqs, Ldef2) se <- summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "reads") expect_equal(nrow(se), length(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(ncol(se), 2) expect_equal(sort(rownames(se)), sort(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(out1$filterSummary$nbrRetained, out2$filterSummary$nbrRetained), ignore_attr = TRUE) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(sum(out1$summaryTable$nbrReads), sum(out2$summaryTable$nbrReads)), ignore_attr = TRUE) for (cn in colnames(out1$filterSummary)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], out1$filterSummary[, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], out2$filterSummary[, cn]) } for (cn in colnames(coldata)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], coldata[1, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], coldata[2, cn]) } expect_equal(S4Vectors::metadata(se)$parameters[["sample1"]], out1$parameters) expect_equal(S4Vectors::metadata(se)$parameters[["sample2"]], out2$parameters) expect_true(all(c("mutantName", "sequence", "nbrMutBases", "minNbrMutBases", "maxNbrMutBases", "nbrMutCodons", "minNbrMutCodons", "maxNbrMutCodons", "nbrMutAAs", "minNbrMutAAs", "maxNbrMutAAs", "sequenceAA", "mutantNameAA", "mutationTypes", "varLengths") %in% colnames(SummarizedExperiment::rowData(se)))) ## variable lengths expect_equal(SummarizedExperiment::rowData(se)$varLengths, rep("96_96", nrow(se)), ignore_attr = TRUE) expect_false(any(grepl("^,", SummarizedExperiment::rowData(se)$mutationTypes))) expect_false(any(grepl(",$", SummarizedExperiment::rowData(se)$mutationTypes))) expect_equal(SummarizedExperiment::rowData(se)$sequenceAA[3], mutscan:::translateString(SummarizedExperiment::rowData(se)$sequence[3])) expect_type(SummarizedExperiment::rowData(se)$nbrMutBases, "character") expect_type(SummarizedExperiment::rowData(se)$nbrMutCodons, "character") expect_type(SummarizedExperiment::rowData(se)$nbrMutAAs, "character") expect_equal(SummarizedExperiment::rowData(se)$nbrMutBases[[which(SummarizedExperiment::rowData(se)$mutantName == "f_r.0.WT")]], "0,1,2,3") expect_equal(SummarizedExperiment::rowData(se)$nbrMutCodons[[which(SummarizedExperiment::rowData(se)$mutantName == "f_r.0.WT")]], "0,1") expect_equal(SummarizedExperiment::rowData(se)$nbrMutAAs[[which(SummarizedExperiment::rowData(se)$mutantName == "f_r.0.WT")]], "0,1") expect_true(all(grepl("stop", SummarizedExperiment::rowData(se)$mutationTypes[grep("\\*", SummarizedExperiment::rowData(se)$mutantNameAA)]))) }) test_that("mergeValues works", { res <- mergeValues(c("A", "B", "C", "A", "D", "B"), c("a,b", "b,c", "c", "b,c", "b,a", "d")) expect_s3_class(res, "data.frame") expect_named(res, c("mutantNameColl", "valueColl")) expect_equal(res$mutantNameColl, c("A", "B", "C", "D")) expect_equal(res$value, c("a,b,c", "b,c,d", "c", "a,b")) res <- mergeValues(c("B", "A", "C", "D"), c("a,b", "c", "c", "d")) expect_s3_class(res, "data.frame") expect_named(res, c("mutantNameColl", "valueColl")) expect_equal(res$mutantNameColl, c("A", "B", "C", "D")) expect_equal(res$value, c("c", "a,b", "c", "d")) res <- mergeValues(c("A", "B", "C", "A", "D", "B"), c("a:b", "b:c", "c", "b:c", "b:a", "d"), delimiter = ":") expect_s3_class(res, "data.frame") expect_named(res, c("mutantNameColl", "valueColl")) expect_equal(res$mutantNameColl, c("A", "B", "C", "D")) expect_equal(res$value, c("a:b:c", "b:c:d", "c", "a:b")) res <- mergeValues(c("A", "B", "C", "A", "D", "B"), c("a,b", "b:c", "c", "b:c", "b:a", "d"), delimiter = ":") expect_s3_class(res, "data.frame") expect_named(res, c("mutantNameColl", "valueColl")) expect_equal(res$mutantNameColl, c("A", "B", "C", "D")) expect_equal(res$value, c("a,b:b:c", "b:c:d", "c", "a:b")) })
/tests/testthat/test_summarizeExperiment.R
permissive
fmicompbio/mutscan
R
false
false
17,596
r
Ldef <- list( mergeForwardReverse = FALSE, minOverlap = 0, maxOverlap = 0, maxFracMismatchOverlap = 0, greedyOverlap = TRUE, revComplForward = FALSE, revComplReverse = FALSE, elementsForward = "SUCV", elementsReverse = "SUCV", elementLengthsForward = c(1, 10, 18, 96), elementLengthsReverse = c(1, 8, 20, 96), adapterForward = "GGAAGAGCACACGTC", adapterReverse = "GGAAGAGCGTCGTGT", primerForward = "", primerReverse = "", wildTypeForward = "ACTGATACACTCCAAGCGGAGACAGACCAACTAGAAGATGAGAAGTCTGCTTTGCAGACCGAGATTGCCAACCTGCTGAAGGAGAAGGAAAAACTA", wildTypeReverse = "ATCGCCCGGCTGGAGGAAAAAGTGAAAACCTTGAAAGCTCAGAACTCGGAGCTGGCGTCCACGGCCAACATGCTCAGGGAACAGGTGGCACAGCTT", constantForward = "AACCGGAGGAGGGAGCTG", constantReverse = "GAAAAAGGAAGCTGGAGAGA", avePhredMinForward = 20.0, avePhredMinReverse = 20.0, variableNMaxForward = 0, variableNMaxReverse = 0, umiNMax = 0, nbrMutatedCodonsMaxForward = 1, nbrMutatedCodonsMaxReverse = 1, nbrMutatedBasesMaxForward = -1, nbrMutatedBasesMaxReverse = -1, forbiddenMutatedCodonsForward = "NNW", forbiddenMutatedCodonsReverse = "NNW", useTreeWTmatch = FALSE, collapseToWTForward = FALSE, collapseToWTReverse = FALSE, mutatedPhredMinForward = 0.0, mutatedPhredMinReverse = 0.0, mutNameDelimiter = ".", constantMaxDistForward = -1, constantMaxDistReverse = -1, variableCollapseMaxDist = 0, variableCollapseMinReads = 0, variableCollapseMinRatio = 0, umiCollapseMaxDist = 0, maxNReads = -1, verbose = FALSE ) Ldef1 <- c( list(fastqForward = system.file("extdata/transInput_1.fastq.gz", package = "mutscan"), fastqReverse = system.file("extdata/transInput_2.fastq.gz", package = "mutscan") ), Ldef) Ldef2 <- c( list(fastqForward = system.file("extdata/transOutput_1.fastq.gz", package = "mutscan"), fastqReverse = system.file("extdata/transOutput_2.fastq.gz", package = "mutscan") ), Ldef) out1 <- do.call(digestFastqs, Ldef1) out2 <- do.call(digestFastqs, Ldef2) coldata <- data.frame(Name = c("sample1", "sample2"), Condition = c("input", "output"), Replicate = c(1, 1), OD = c(0.05, 1.5), stringsAsFactors = FALSE) test_that("summarizeExperiment fails with incorrect arguments", { ## x must be a named list, with names matching coldata$Name expect_error(summarizeExperiment(x = 1, coldata = coldata)) expect_error(summarizeExperiment(x = list(out1), coldata = coldata)) expect_error(summarizeExperiment(x = out1, coldata = coldata)) expect_error(summarizeExperiment(x = list(s1 = out1, s1 = out2), coldata = coldata)) ## coldata must be a data.frame with a column Name expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = 1)) expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata[, c("Condition", "OD")])) expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = as.list(coldata))) ## countType must be reads or umis expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = 1)) expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "umi")) expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = c("reads", "umis"))) ## names must match expect_error(summarizeExperiment(x = list(s1 = out1, s2 = out2), coldata = coldata, countType = "umis")) expect_warning(summarizeExperiment(x = list(sample1 = out1, sample2 = out2, sample3 = out1), coldata = coldata, countType = "umis")) ## samples must have the same mutNameDelimiter tmpout2 <- out2 tmpout2$parameters$mutNameDelimiter <- ":" expect_error(summarizeExperiment(x = list(sample1 = out1, sample2 = tmpout2), coldata = coldata, countType = "reads")) }) test_that("summarizeExperiment works as expected with reads output", { se <- summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "reads") expect_equal(nrow(se), length(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(ncol(se), 2) expect_equal(sort(rownames(se)), sort(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(out1$filterSummary$nbrRetained, out2$filterSummary$nbrRetained), ignore_attr = TRUE) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(sum(out1$summaryTable$nbrReads), sum(out2$summaryTable$nbrReads)), ignore_attr = TRUE) for (cn in colnames(out1$filterSummary)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], out1$filterSummary[, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], out2$filterSummary[, cn]) } for (cn in colnames(coldata)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], coldata[1, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], coldata[2, cn]) } expect_equal(S4Vectors::metadata(se)$parameters[["sample1"]], out1$parameters) expect_equal(S4Vectors::metadata(se)$parameters[["sample2"]], out2$parameters) expect_true(all(c("mutantName", "sequence", "nbrMutBases", "minNbrMutBases", "maxNbrMutBases", "nbrMutCodons", "minNbrMutCodons", "maxNbrMutCodons", "nbrMutAAs", "minNbrMutAAs", "maxNbrMutAAs", "sequenceAA", "mutantNameAA", "mutationTypes", "varLengths") %in% colnames(SummarizedExperiment::rowData(se)))) ## Check that the number of mutated codons are correct (=equal to the number of ## entries in the mutant name that don't contain WT) expect_equal(sapply(strsplit(SummarizedExperiment::rowData(se)$mutantName, "_"), function(w) length(w[!grepl("WT", w)])), SummarizedExperiment::rowData(se)$minNbrMutCodons, ignore_attr = TRUE) expect_equal(sapply(strsplit(SummarizedExperiment::rowData(se)$mutantName, "_"), function(w) length(w[!grepl("WT", w)])), SummarizedExperiment::rowData(se)$maxNbrMutCodons, ignore_attr = TRUE) ## Check that the number of mutated bases is not larger than 3x the number of mutated codons expect_true(all(SummarizedExperiment::rowData(se)$maxNbrMutBases <= 3 * SummarizedExperiment::rowData(se)$maxNbrMutCodons)) expect_true(all(SummarizedExperiment::rowData(se)$maxNbrMutAAs <= SummarizedExperiment::rowData(se)$maxNbrMutCodons)) ## variable lengths expect_equal(SummarizedExperiment::rowData(se)$varLengths, rep("96_96", nrow(se)), ignore_attr = TRUE) ## All variants with no mutated AAs must have a WT in the name expect_true(all(grepl("WT", SummarizedExperiment::rowData(se)$mutantNameAA[SummarizedExperiment::rowData(se)$maxNbrMutAAs == 0]))) ## No mutation for the complete WT expect_equal(SummarizedExperiment::rowData(se)["f.0.WT_r.0.WT", ]$mutationTypes, "") ## Mutation types for variants with no mutated AAs, but mutated bases, should be silent expect_true(all(SummarizedExperiment::rowData(se)$mutationTypes[SummarizedExperiment::rowData(se)$maxNbrMutAAs == 0 & SummarizedExperiment::rowData(se)$maxNbrMutBases > 0] == "silent")) ## check translation expect_equal(SummarizedExperiment::rowData(se)$sequenceAA[3], mutscan:::translateString(SummarizedExperiment::rowData(se)$sequence[3])) ## Spot checks expect_equal(SummarizedExperiment::rowData(se)$minNbrMutBases[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], 3) ## WT: GCT expect_equal(SummarizedExperiment::rowData(se)$minNbrMutCodons[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], 1) ## WT: GCT expect_equal(SummarizedExperiment::rowData(se)$minNbrMutAAs[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], 1) ## WT: A expect_equal(SummarizedExperiment::rowData(se)$mutantNameAA[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], "f.0.WT_r.13.L") ## WT: A expect_equal(SummarizedExperiment::rowData(se)$mutationTypes[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.CTC"], "nonsynonymous") ## WT: A expect_equal(SummarizedExperiment::rowData(se)$minNbrMutBases[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], 1) ## WT: GCT expect_equal(SummarizedExperiment::rowData(se)$minNbrMutCodons[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], 1) ## WT: GCT expect_equal(SummarizedExperiment::rowData(se)$minNbrMutAAs[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], 0) ## WT: A expect_equal(SummarizedExperiment::rowData(se)$mutantNameAA[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], "f.0.WT_r.0.WT") ## WT: A expect_equal(SummarizedExperiment::rowData(se)$mutationTypes[SummarizedExperiment::rowData(se)$mutantName == "f.0.WT_r.13.GCG"], "silent") ## WT: A }) test_that("summarizeExperiment works as expected with umis output", { se <- summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "umis") expect_equal(nrow(se), length(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(ncol(se), 2) expect_equal(sort(rownames(se)), sort(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(sum(out1$summaryTable$nbrUmis), sum(out2$summaryTable$nbrUmis)), ignore_attr = TRUE) for (cn in colnames(out1$filterSummary)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], out1$filterSummary[, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], out2$filterSummary[, cn]) } for (cn in colnames(coldata)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], coldata[1, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], coldata[2, cn]) } expect_equal(S4Vectors::metadata(se)$parameters[["sample1"]], out1$parameters) expect_equal(S4Vectors::metadata(se)$parameters[["sample2"]], out2$parameters) }) test_that("summarizeExperiment orders samples equally in count matrix/colData", { se1 <- summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "umis") se2 <- summarizeExperiment(x = list(sample2 = out2, sample1 = out1), coldata = coldata, countType = "umis") m1 <- se1[, match(coldata$Name, colnames(se1))] m2 <- se2[rownames(m1), match(coldata$Name, colnames(se2))] expect_equal(SummarizedExperiment::colData(m1), SummarizedExperiment::colData(m2)) expect_equal(SummarizedExperiment::assay(m1, "counts"), SummarizedExperiment::assay(m2, "counts")) }) test_that("summarizeExperiment recognizes the presence of UMI counts correctly", { L1 <- Ldef1; L1$elementsForward <- "SSCV"; L1$elementsReverse <- "SSCV" L2 <- Ldef2; L2$elementsForward <- "SSCV"; L2$elementsReverse <- "SSCV" outl1 <- do.call(digestFastqs, L1) outl2 <- do.call(digestFastqs, L2) expect_error(summarizeExperiment(x = list(sample1 = outl1, sample2 = outl2), coldata = coldata, countType = "umis")) }) test_that("summarizeExperiment works as expected when collapsing to WT", { Ldef1$collapseToWTForward <- TRUE Ldef2$collapseToWTForward <- TRUE out1 <- do.call(digestFastqs, Ldef1) out2 <- do.call(digestFastqs, Ldef2) se <- summarizeExperiment(x = list(sample1 = out1, sample2 = out2), coldata = coldata, countType = "reads") expect_equal(nrow(se), length(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(ncol(se), 2) expect_equal(sort(rownames(se)), sort(union(out1$summaryTable$mutantName, out2$summaryTable$mutantName))) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(out1$filterSummary$nbrRetained, out2$filterSummary$nbrRetained), ignore_attr = TRUE) expect_equal(Matrix::colSums(SummarizedExperiment::assay(se, "counts")), c(sum(out1$summaryTable$nbrReads), sum(out2$summaryTable$nbrReads)), ignore_attr = TRUE) for (cn in colnames(out1$filterSummary)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], out1$filterSummary[, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], out2$filterSummary[, cn]) } for (cn in colnames(coldata)) { expect_equal(SummarizedExperiment::colData(se)["sample1", cn], coldata[1, cn]) expect_equal(SummarizedExperiment::colData(se)["sample2", cn], coldata[2, cn]) } expect_equal(S4Vectors::metadata(se)$parameters[["sample1"]], out1$parameters) expect_equal(S4Vectors::metadata(se)$parameters[["sample2"]], out2$parameters) expect_true(all(c("mutantName", "sequence", "nbrMutBases", "minNbrMutBases", "maxNbrMutBases", "nbrMutCodons", "minNbrMutCodons", "maxNbrMutCodons", "nbrMutAAs", "minNbrMutAAs", "maxNbrMutAAs", "sequenceAA", "mutantNameAA", "mutationTypes", "varLengths") %in% colnames(SummarizedExperiment::rowData(se)))) ## variable lengths expect_equal(SummarizedExperiment::rowData(se)$varLengths, rep("96_96", nrow(se)), ignore_attr = TRUE) expect_false(any(grepl("^,", SummarizedExperiment::rowData(se)$mutationTypes))) expect_false(any(grepl(",$", SummarizedExperiment::rowData(se)$mutationTypes))) expect_equal(SummarizedExperiment::rowData(se)$sequenceAA[3], mutscan:::translateString(SummarizedExperiment::rowData(se)$sequence[3])) expect_type(SummarizedExperiment::rowData(se)$nbrMutBases, "character") expect_type(SummarizedExperiment::rowData(se)$nbrMutCodons, "character") expect_type(SummarizedExperiment::rowData(se)$nbrMutAAs, "character") expect_equal(SummarizedExperiment::rowData(se)$nbrMutBases[[which(SummarizedExperiment::rowData(se)$mutantName == "f_r.0.WT")]], "0,1,2,3") expect_equal(SummarizedExperiment::rowData(se)$nbrMutCodons[[which(SummarizedExperiment::rowData(se)$mutantName == "f_r.0.WT")]], "0,1") expect_equal(SummarizedExperiment::rowData(se)$nbrMutAAs[[which(SummarizedExperiment::rowData(se)$mutantName == "f_r.0.WT")]], "0,1") expect_true(all(grepl("stop", SummarizedExperiment::rowData(se)$mutationTypes[grep("\\*", SummarizedExperiment::rowData(se)$mutantNameAA)]))) }) test_that("mergeValues works", { res <- mergeValues(c("A", "B", "C", "A", "D", "B"), c("a,b", "b,c", "c", "b,c", "b,a", "d")) expect_s3_class(res, "data.frame") expect_named(res, c("mutantNameColl", "valueColl")) expect_equal(res$mutantNameColl, c("A", "B", "C", "D")) expect_equal(res$value, c("a,b,c", "b,c,d", "c", "a,b")) res <- mergeValues(c("B", "A", "C", "D"), c("a,b", "c", "c", "d")) expect_s3_class(res, "data.frame") expect_named(res, c("mutantNameColl", "valueColl")) expect_equal(res$mutantNameColl, c("A", "B", "C", "D")) expect_equal(res$value, c("c", "a,b", "c", "d")) res <- mergeValues(c("A", "B", "C", "A", "D", "B"), c("a:b", "b:c", "c", "b:c", "b:a", "d"), delimiter = ":") expect_s3_class(res, "data.frame") expect_named(res, c("mutantNameColl", "valueColl")) expect_equal(res$mutantNameColl, c("A", "B", "C", "D")) expect_equal(res$value, c("a:b:c", "b:c:d", "c", "a:b")) res <- mergeValues(c("A", "B", "C", "A", "D", "B"), c("a,b", "b:c", "c", "b:c", "b:a", "d"), delimiter = ":") expect_s3_class(res, "data.frame") expect_named(res, c("mutantNameColl", "valueColl")) expect_equal(res$mutantNameColl, c("A", "B", "C", "D")) expect_equal(res$value, c("a,b:b:c", "b:c:d", "c", "a:b")) })
df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "select CASE WHEN grouping(job) = 1 THEN \\\'zTotal\\\' ELSE job END as job_name, CASE WHEN grouping(dname) = 1 THEN \\\'yTotal\\\' ELSE dname END as dname, count(*) n from emp e join dept d on(e.deptno=d.deptno) group by cube (job, dname)" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDB1.usuniversi01134.oraclecloud.internal', USER='DV_Scott', PASS='orcl', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) spread(df, DNAME, N) # Basic Tableau Table Calculations # http://onlinehelp.tableau.com/current/pro/online/mac/en-us/calculations_tablecalculations_definebasic.html df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "SELECT empno, deptno, sal, rank() OVER (PARTITION BY deptno order by sal desc) as DEPT_RANK FROM emp order by 2,3 desc" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "SELECT empno, deptno, sal, rank() OVER (order by sal desc) as DEPT_RANK FROM emp " ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "SELECT * FROM (SELECT empno, deptno, sal, rank() OVER (PARTITION BY deptno order by sal desc) as DEPT_RANK FROM emp) where DEPT_RANK = 1 " ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "select empno, deptno, sal, last_value(max_sal) OVER (PARTITION BY deptno order by sal) max_sal, last_value(max_sal) OVER (PARTITION BY deptno order by sal) - sal sal_diff from (SELECT empno, deptno, sal, max(sal) OVER (PARTITION BY deptno) max_sal FROM emp) order by 2,3 desc" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "SELECT empno, deptno, sal, nth_value(sal, 2) OVER (PARTITION BY deptno) nth_sal FROM emp order by 2,3 desc" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "select empno, deptno, sal, cume_dist() OVER (PARTITION BY deptno order by sal) cume_dist from emp order by 2,3 desc" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df)
/RWorkshop/07 Crosstabs/Crosstabs.R
no_license
brauchlen/DataVisualization
R
false
false
3,790
r
df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "select CASE WHEN grouping(job) = 1 THEN \\\'zTotal\\\' ELSE job END as job_name, CASE WHEN grouping(dname) = 1 THEN \\\'yTotal\\\' ELSE dname END as dname, count(*) n from emp e join dept d on(e.deptno=d.deptno) group by cube (job, dname)" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDB1.usuniversi01134.oraclecloud.internal', USER='DV_Scott', PASS='orcl', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) spread(df, DNAME, N) # Basic Tableau Table Calculations # http://onlinehelp.tableau.com/current/pro/online/mac/en-us/calculations_tablecalculations_definebasic.html df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "SELECT empno, deptno, sal, rank() OVER (PARTITION BY deptno order by sal desc) as DEPT_RANK FROM emp order by 2,3 desc" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "SELECT empno, deptno, sal, rank() OVER (order by sal desc) as DEPT_RANK FROM emp " ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "SELECT * FROM (SELECT empno, deptno, sal, rank() OVER (PARTITION BY deptno order by sal desc) as DEPT_RANK FROM emp) where DEPT_RANK = 1 " ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "select empno, deptno, sal, last_value(max_sal) OVER (PARTITION BY deptno order by sal) max_sal, last_value(max_sal) OVER (PARTITION BY deptno order by sal) - sal sal_diff from (SELECT empno, deptno, sal, max(sal) OVER (PARTITION BY deptno) max_sal FROM emp) order by 2,3 desc" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "SELECT empno, deptno, sal, nth_value(sal, 2) OVER (PARTITION BY deptno) nth_sal FROM emp order by 2,3 desc" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df) df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query= "select empno, deptno, sal, cume_dist() OVER (PARTITION BY deptno order by sal) cume_dist from emp order by 2,3 desc" ')),httpheader=c(DB='jdbc:oracle:thin:@129.152.144.84:1521/PDBF15DV.usuniversi01134.oraclecloud.internal', USER='cs329e_UTEid', PASS='orcl_UTEid', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add.tree.r \name{add.tree} \alias{add.tree} \title{Plot tool to add phylogenetic trees to ordination plots} \usage{ add.tree( OP, tree, edge.col = 1, edge.lty = 1, edge.lwd = 1, anc.pts = FALSE, return.ancs = FALSE, ... ) } \arguments{ \item{OP}{An object with class \code{\link{plot.ordinate}}.} \item{tree}{An object of class phylo.} \item{edge.col}{A single value or vector equal to the number of edges for edge colors.} \item{edge.lty}{A single value or vector equal to the number of edges for edge line type} \item{edge.lwd}{A single value or vector equal to the number of edges for edge line weight.} \item{anc.pts}{A logical value for whether to add points for ancestral values.} \item{return.ancs}{A logical value for whether ancestral values should be printed.} \item{...}{Arguments passed onto \code{\link{points}}, used only for ancestral points.} } \description{ Function adds a tree based on a description of edges from a class phylo object to an existing plot made from an ordinate object. } \details{ With some \code{\link{ordinate}} plots, it might be desirable to add a tree connecting points in a prescribed way, which would be tedious using \code{\link{points}} or \code{\link{lines}}. This function will project a tree from an object of class phylo into a plot with class, \code{\link{plot.ordinate}}. Using an edges matrix from a phylo object, this function will systematically connect plot points with lines that pass through estimated ancestral character points in the same plot space. Ancestral states are estimated assuming a Brownian motion model of evolutionary divergence. } \examples{ # Examples use residuals from a regression of salamander morphological # traits against body size (snout to vent length, SVL). # Observations are species means and a phylogenetic covariance matrix # describes the relatedness among observations. data("PlethMorph") Y <- as.data.frame(PlethMorph[c("TailLength", "HeadLength", "Snout.eye", "BodyWidth", "Forelimb", "Hindlimb")]) Y <- as.matrix(Y) R <- lm.rrpp(Y ~ SVL, data = PlethMorph, iter = 0, print.progress = FALSE)$LM$residuals PCA <- ordinate(R, scale. = TRUE) pc.plot <- plot(PCA, pch = 19, col = "blue") add.tree(pc.plot, tree = PlethMorph$tree, anc.pts = TRUE, pch = 19, cex = 0.5, col = "red") } \seealso{ \code{\link{lines}} and \code{\link{points}} } \author{ Michael Collyer } \keyword{graphics}
/man/add.tree.Rd
no_license
mlcollyer/RRPP
R
false
true
2,495
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add.tree.r \name{add.tree} \alias{add.tree} \title{Plot tool to add phylogenetic trees to ordination plots} \usage{ add.tree( OP, tree, edge.col = 1, edge.lty = 1, edge.lwd = 1, anc.pts = FALSE, return.ancs = FALSE, ... ) } \arguments{ \item{OP}{An object with class \code{\link{plot.ordinate}}.} \item{tree}{An object of class phylo.} \item{edge.col}{A single value or vector equal to the number of edges for edge colors.} \item{edge.lty}{A single value or vector equal to the number of edges for edge line type} \item{edge.lwd}{A single value or vector equal to the number of edges for edge line weight.} \item{anc.pts}{A logical value for whether to add points for ancestral values.} \item{return.ancs}{A logical value for whether ancestral values should be printed.} \item{...}{Arguments passed onto \code{\link{points}}, used only for ancestral points.} } \description{ Function adds a tree based on a description of edges from a class phylo object to an existing plot made from an ordinate object. } \details{ With some \code{\link{ordinate}} plots, it might be desirable to add a tree connecting points in a prescribed way, which would be tedious using \code{\link{points}} or \code{\link{lines}}. This function will project a tree from an object of class phylo into a plot with class, \code{\link{plot.ordinate}}. Using an edges matrix from a phylo object, this function will systematically connect plot points with lines that pass through estimated ancestral character points in the same plot space. Ancestral states are estimated assuming a Brownian motion model of evolutionary divergence. } \examples{ # Examples use residuals from a regression of salamander morphological # traits against body size (snout to vent length, SVL). # Observations are species means and a phylogenetic covariance matrix # describes the relatedness among observations. data("PlethMorph") Y <- as.data.frame(PlethMorph[c("TailLength", "HeadLength", "Snout.eye", "BodyWidth", "Forelimb", "Hindlimb")]) Y <- as.matrix(Y) R <- lm.rrpp(Y ~ SVL, data = PlethMorph, iter = 0, print.progress = FALSE)$LM$residuals PCA <- ordinate(R, scale. = TRUE) pc.plot <- plot(PCA, pch = 19, col = "blue") add.tree(pc.plot, tree = PlethMorph$tree, anc.pts = TRUE, pch = 19, cex = 0.5, col = "red") } \seealso{ \code{\link{lines}} and \code{\link{points}} } \author{ Michael Collyer } \keyword{graphics}
#<<BEGIN>> mcstoc <- function(func=runif, type=c("V","U","VU","0"), ..., nsv=ndvar(), nsu=ndunc(), nvariates=1, outm="each", nsample="n", seed=NULL, rtrunc=FALSE, linf=-Inf, lsup=Inf, lhs=FALSE) #TITLE Creates Stochastic mcnode Objects #DESCRIPTION # Creates a \code{\link{mcnode}} object using a random generating function. #KEYWORDS methods #INPUTS #{func}<<A function providing random data or its name as character.>> #[INPUTS] #{type}<<The type of \samp{mcnode} to be built. By default, a \samp{"V"} node. see \code{\link{mcnode}} for details.>> #{\dots}<<All other arguments but the size of the sample to be passed to \samp{func}. These arguments #should be vectors or \samp{mcnode}s (arrays prohibited).>> #{nsv}<<The number of simulations in the variability dimension.>> #{nsu}<<The number of simulations in the uncertainty dimension.>> #{nvariates}<<The number of variates of the output.>> #{outm}<<The output of the \samp{mcnode} for multivariates nodes. May be "each" (default) #if an output should be provided for each variates considered independently, "none" for no output #or a vector of functions (as a character string) that will be applied on the variates dimension #before any output (ex: \samp{"mean"}, \samp{"median"}, \samp{c("min","max")}). Each function should return 1 #value when applied to 1 value (ex. do not use \samp{"range"}). #Note that the \samp{outm} attribute may be changed further using the \code{\link{outm}} function.>> #{nsample}<<The name of the parameter of the function giving the size of the vector. #By default, \samp{n}, as in most of the random sampling distributions # of the \samp{stats} library (with the exceptions of \samp{rhyper} and \samp{rwilcox} where \samp{nsample="nn"} should be used).>> #{seed}<<The random seed used for the evaluation. If \samp{NULL} the \samp{seed} is unchanged.>> #{rtrunc}<<Should the distribution be truncated? See \code{\link{rtrunc}}.>> #{linf}<<If truncated: lower limit. May be a scalar, an array or a mcnode.>> #{lsup}<<If truncated: upper limit. May be a scalar, an array or a mcnode. \samp{lsup} should be pairwise strictly greater then \samp{linf}>> #{lhs}<<Should a Random Latin Hypercube Sampling be used? see \code{\link{lhs}}>> #VALUE #An \samp{mcnode} object. #DETAILS #Note that arguments after \dots must match exactly. # #Any function who accepts vectors/matrix as arguments may be used (notably: all current random generator of the \samp{stats} package). #The arguments may be sent classically but it is STRONGLY recommended to use consistent \samp{mcnode}s #if arguments should be recycled, since a very complex recycling is handled for \samp{mcnode} and not for vectors. #The rules for compliance of \samp{mcnode} arguments are as following (see below for special functions): #{type="V"}<<accepts \samp{"0" mcnode} of dimension \samp{(1 x 1 x nvariates)} or of dimension \samp{(1 x 1 x 1)} (recycled) # and \samp{"V" mcnode} of dimension \samp{(nsv x 1 x nvariates)} or \samp{(nsv x 1 x 1)} (recycled).>> #{type="U"}<<accepts \samp{"0" mcnode} of dimension \samp{(1 x 1 x nvariates)} or of dimension \samp{(1 x 1 x 1)} (recycled) #and \samp{"U" mcnode} of dimension \samp{(1 x nsu x nvariates)} or of dimension \samp{(1 x nsu x 1)} (recycled).>> #{type="VU"}<<accepts \samp{"0" mcnode} of dimension \samp{(1 x 1 x nvariates)} or of dimension \samp{(1 x 1 x 1)} (recycled), #\samp{"V" mcnode} of dimension \samp{(nsv x 1 x nvariates)} (recycled classicaly) or \samp{(nsv x 1 x 1)} (recycled classically), #\samp{"U" mcnode} of dimension \samp{(1 x nsu x nvariates)} (recycled by rows) or \samp{(1 x nsu x 1)} #(recycled by row on the uncertainty dimension and classicaly on variates), #\samp{"VU" mcnode} of dimension \samp{(nsv x nsu x nvariates)} or of dimension \samp{(nsv x nsu x 1)} (recycled).>> #{type="0"}<<accepts \samp{"0" mcnode} of dimension \samp{(1 x 1 x nvariates)} or \samp{(1 x 1 x 1)} #(recycled).>> # #Multivariate nodes and multivariate distributions: # #The number of variates should be provided (not guesses by the function). #A multivariates node may be built using a univariate distribution and #\samp{nvariates!=1}. See examples. # #\code{\link{rdirichlet}} needs for \samp{alpha} a vector or a multivariates nodes and returns a multivariate node. #\code{\link{rmultinomial}} needs for \samp{size} and \samp{prob} vectors and/or multivariate nodes and return a univariate or a multivariate node. #\code{\link{rmultinormal}} needs for \samp{mean} and \samp{sigma} vectors and/or multivariate nodes and return a multivariate node. #\code{\link{rempiricalD}} needs for \samp{values} and \samp{prob} vectors and/or multivariate nodes and return a a univariate or a multivariate node. #See examples. # #\samp{trunc=TRUE} is valid for univariates distributions only. #The distribution will be truncated on \samp{(linf, lsup]}. #The function 'func' should have a 'q' form (with first argument 'p') and a 'p' form, as #all current random generator of the \samp{stats} library. #Example : 'rnorm' (has a 'qnorm' and a 'pnorm' form), 'rbeta', 'rbinom', 'rgamma', ... # #If \samp{lhs=TRUE}, a Random Hypercube Sampling will be used on \samp{nsv} and \samp{nsu} #The function 'func' should have a 'q' form (with argument 'p'). #\samp{lhs=TRUE} is thus not allowed on multivariates distributions. # # #SEE ALSO #\code{\link{mcnode}} for a description of \samp{mcnode} object, methods and functions on \samp{mcnode} objects.</> #\code{\link{Ops.mcnode}} for operations on \samp{mcnode} objects. #\code{\link{rtrunc}} for important warnings on the use of the \samp{trunc} option. #EXAMPLE #Oldnvar <- ndvar() #Oldnunc <- ndunc() #ndvar(5) #ndunc(4) # ### compatibility with mcdata as arguments #x0 <- mcstoc(runif,type="0") #xV <- mcstoc(runif,type="V") #xU <- mcstoc(runif,type="U") #xVU <- mcstoc(runif,type="VU") # ### "0" accepts mcdata "0" #mcstoc(runif,type="0",min=-10,max=x0) # ### "V" accepts "0" mcdata and "V" mcdata #mcstoc(rnorm,type="V",mean=x0,sd=xV) # ### "U" accepts "0" mcdata and "U" mcdata #mcstoc(rnorm,type="U",mean=x0,sd=xU) # ### "VU" accepts "0" mcdata, "U" mcdata ### "V" mcdata and "U" mcdata with correct recycling #mcstoc(rnorm,type="VU",mean=x0,sd=xVU) #mcstoc(rnorm,type="VU",mean=xV,sd=xU) # ### any function giving a set (vector/matrix) of value of length 'size' works #f <- function(popi) 1:popi #mcstoc(f,type="V",nsample="popi") # ###Multivariates # #ndvar(2) #ndunc(5) ###Build a multivariate node with univariate distribution #mcstoc(rnorm,"0",nvariates=3) #mcstoc(rnorm,"V",nvariates=3) #mcstoc(rnorm,"U",nvariates=3) #mcstoc(rnorm,"VU",nvariates=3) # ###Build a multivariate node with multivariates distribution #alpha <- mcdata(c(1,1000,10,100,100,10,1000,1),"V",nvariates=4) #(p <- mcstoc(rdirichlet,"V",alpha=alpha,nvariates=4)) #mcstoc(rmultinomial,"VU",size=10,p,nvariates=4) # ###Build a univariates node with "multivariates" distribution #size <- mcdata(c(1:5),"U") #mcstoc(rmultinomial,"VU",size,p,nvariates=1) #since a multinomial return one value # ###Build a multivariates node with "multivariates" distribution #mcstoc(rmultinomial,"VU",size,p,nvariates=4) #sent 4 times to fill the array # ###Use of rempiricalD with nodes ###A bootstrap #ndunc(5) #ndvar(5) #dataset <- c(1:9) #(b <- mcstoc(rempiricalD,"U",nvariates=9,values=dataset)) #unclass(b) ###Then we build a VU node by sampling in each set of bootstrap ##in the uncertainty dimensions #(node <- mcstoc(rempiricalD,"VU",values=b)) #unclass(node) # ### truncated #ndvar(2) #ndunc(5) #linf <- mcdata(-1:3,"U") #x <- mcstoc(rnorm,"VU",rtrunc=TRUE,linf=linf) #unclass(round(x)) ## lhs and truncated with linf as mcnode #linf <- mcdata(1:5,"U") #mcstoc(rnorm,"VU",nsv=100,rtrunc=TRUE,linf=linf,lhs=TRUE) # #ndvar(Oldnvar) #ndunc(Oldnunc) #CREATED 08-01-25 #-------------------------------------------- { func <- match.fun(func) if(!is.null(seed)) set.seed(seed) if(!is.character(outm) || !(all(outm %in% c("none","each"))) && !all(sapply(outm, exists, mode="function"))) stop("outm should be 'none','each' or a vector of name(s) of valid function(s)") type <- match.arg(type) argsd <- list(...) dimf <- switch(type, "V"=c(nsv,1,nvariates),"U"=c(1,nsu,nvariates),"VU"=c(nsv,nsu,nvariates),"0"=c(1,1,nvariates)) nsv <- dimf[1] nsu <- dimf[2] nva <- dimf[3] if(rtrunc) argsd <- c(argsd,list(linf=linf),list(lsup=lsup)) # launch linf and lsup in the process largsd <- length(argsd) #### A function to deal mcnodes (including linf and lsup) as arguments LAFUNC <- function(argsd,typethismc){ if(!is.null(typethismc)){ #mcnode as arguments if(!(type=="VU" || typethismc=="0" || typethismc==type)) stop("Incompatible type of nodes") # incompatible node dimm <- dim(argsd) if ((typethismc == "V" && dimm[1] != nsv) || (typethismc == "U" && dimm[2] != nsu) || (typethismc == "VU" && (dimm[1] != nsv || dimm[2] != nsu))) stop("Nodes of incompatible dimensions") # incompatible dimension if(maxdim3 > 1){ #at least one multivariate node as parameter, need recycling on the third dimension if(typethismc=="U") argsd <- apply(argsd, 3, matrix, nrow=maxdim1, ncol=maxdim2, byrow=TRUE) # recycling U as matrix (maxdim1*maxdim2) x nvariates else { if(maxdim1 ==1 && maxdim2 ==1) argsd <- matrix(argsd, nrow=1) # Very special case to be added else argsd <- apply(argsd, 3, matrix, nrow = maxdim1, ncol = maxdim2) } # recycling 0, V, VU as matrix (maxdim1*maxdim2) x nvariates } else { dim(argsd) <- NULL # as vector if(typethismc == "U" && maxdim1!=1) argsd <- rep(argsd, each = maxdim1) #recycling U as vector nsv*nsu } } else if(is.array(argsd)) stop("Array prohibited in mcstoc as parameter. Use an mcnode instead") return(unclass(argsd)) } #### typemc <- lapply(argsd, attr, which = "type") yamc <- !is.null(unlist(typemc)) # At least one mcnode if(yamc){ # evaluate the minimal common to build the minimal recycling level... maxdim1 <- unlist(lapply(argsd, function(x) dim(x)[1])) maxdim1 <- ifelse(is.null(maxdim1), 1, max(maxdim1)) maxdim2 <- unlist(lapply(argsd, function(x) dim(x)[2])) maxdim2 <- ifelse(is.null(maxdim2), 1, max(maxdim2)) maxdim3 <- unlist(lapply(argsd, function(x) dim(x)[3])) maxdim3 <- ifelse(is.null(maxdim3), 1, max(maxdim3)) } if(largsd != 0){ argsd <- mapply(LAFUNC, argsd, typemc, SIMPLIFY=FALSE) #print(argsd) } ##################################### # If lhs or rtrunc, redefine the function to draw random variables ######### #keep a copy of original function funcorigin <- func if(lhs || rtrunc){ #define good function for the random sampling distr <- as.character(match.call()$func) #retrieve the name of the function distr <- substr(distr, 2, 1000) #remove the r qfun <- paste("q",distr,sep="") #define "qfunc" if(rtrunc){ pfun <- paste("p",distr,sep="") #define pfunc func <- function(...){ argsd <- list(...) nnfin <- argsd[[nsample]] linf <- if(length(argsd$linf) <= nnfin) as.vector(argsd$linf) else rep(argsd$linf, length.out=nnfin) # solve a problem when linf was multivariate lsup <- if(length(argsd$lsup) <= nnfin) as.vector(argsd$lsup) else rep(argsd$lsup, length.out=nnfin) # solve a problem when linf was multivariate lmax <- max(length(linf),length(lsup)) if(any(rep(linf, length.out = lmax) >= rep(lsup, length.out = lmax))) stop("linf should be < lsup") #recycle vectors argsd$linf <- argsd$lsup <- argsd[[nsample]] <- NULL #find the p of the limit pinf <- as.vector(do.call(pfun,c(list(q=linf),argsd),quote=TRUE)) psup <- as.vector(do.call(pfun,c(list(q=lsup),argsd),quote=TRUE)) #sample uniformely between the limits if(!lhs) lesp <- runif(nnfin,min=pinf,max=psup) else lesp <- lhs(distr="runif", nsv=dimf[1], nsu=dimf[2], nvariates=dimf[3], min=pinf, max=psup) #get the q data <- (do.call(qfun,c(list(p=lesp),argsd)))[1:nnfin] data[pinf==0 & data > lsup] <- NaN #ex: rtrunc("lnorm",10,linf=-2,lsup=-1) data[psup==1 & data < linf] <- NaN #ex: rtrunc("unif",10,linf=2,lsup=4,max=1) data[is.na(linf) | is.na(lsup)] <- NaN #ex: rtrunc("norm",10,sd=-2) #Two tests for extreme situations. None Catch all possibilities. THe error is first to avoid the warning if(any(data <= linf | data > lsup, na.rm=TRUE)) stop("Error in rtrunc: some values are not in the expected range (maybe due to rounding errors)") if(isTRUE(all.equal(pinf,1)) | isTRUE(all.equal(psup,0)) ) warning("Warning: check the results from rtrunc. It may have reached rounding errors") return(data) }#end redefinition func } else func <- function(...) { # LHS only argsd <- list(...) argsd[[nsample]] <- NULL lesp <- lhs(distr="runif", nsv=dimf[1], nsu=dimf[2], nvariates=dimf[3], min=0, max=1) return(do.call(qfun,c(list(p=lesp),argsd)))} } # do a try to test the length if nvariates != 1 if(nvariates != 1){ if(largsd != 0) argsdtest <- mapply(function(x,typemc){ if(is.null(typemc)) return(unclass(x)) if(is.matrix(x)) return(x[1,,drop=FALSE]) # mc (they have been unclassed) return(x[1])}, argsd, typemc, SIMPLIFY=FALSE) else argsdtest <- vector(mode="list",length=0) argsdtest[[nsample]] <- 1 if(rtrunc) argsdtest$linf <- argsdtest$lsup <- NULL dimf <- c(1,1,1) data <- do.call(funcorigin,argsdtest,quote=TRUE) l <- length(data) if(l == nvariates) { if(rtrunc | lhs) stop("mcstoc does not handle rtrunc and lhs for multivariate distributions") dimf <- c(nsv,nsu,1)} # If it returns a vector else if(l == 1) dimf <- c(nsv,nsu,nvariates) # if it returns a number else stop("the function should return a vector of size 1 or nvariates if",nsample,"=1") argsd[[nsample]] <- prod(dimf) data <- do.call(func, argsd, quote = TRUE) #Post Production, multivariate if (yamc){ if(l==1){ # univariate distribution if(maxdim1 == 1 && maxdim2 == 1) data <- aperm(array(data, dim = c(nvariates, nsv, nsu)), c(2, 3, 1)) else if(maxdim1 == 1 && nsv!=1) data <- aperm(array(data, dim = c(nsu, nvariates, nsv)), c(3, 1, 2)) else if(maxdim2 == 1 && nsu!=1) data <- aperm(array(data, dim = c(nsv, nvariates, nsu)), c(1, 3, 2)) else data <- array(data, dim = c(nsv, nsu, nvariates)) } else { # l != 1 : multivariate if(maxdim1 == 1 && nsv != 1) data <- aperm(array(data, dim = c(nsu, nsv, nvariates)), c(2, 1, 3)) else data <- array(data, dim = c(nsv, nsu, nvariates)) } } else data <- array(data, dim = c(nsv, nsu, nvariates)) } #end multivariates else{ # univariate argsd[[nsample]] <- prod(dimf) data <- do.call(func, argsd, quote = TRUE) if (yamc && maxdim1 == 1 && nsv != 1) data <- aperm(array(data, dim = c(nsu, nsv, nvariates)), c(2, 1, 3)) else data <- array(data, dim = c(nsv, nsu, nvariates)) } class(data) <- "mcnode" attr(data,"type") <- type attr(data,"outm") <- outm return(data) }
/R/mcstoc.R
no_license
cran/mc2d
R
false
false
16,610
r
#<<BEGIN>> mcstoc <- function(func=runif, type=c("V","U","VU","0"), ..., nsv=ndvar(), nsu=ndunc(), nvariates=1, outm="each", nsample="n", seed=NULL, rtrunc=FALSE, linf=-Inf, lsup=Inf, lhs=FALSE) #TITLE Creates Stochastic mcnode Objects #DESCRIPTION # Creates a \code{\link{mcnode}} object using a random generating function. #KEYWORDS methods #INPUTS #{func}<<A function providing random data or its name as character.>> #[INPUTS] #{type}<<The type of \samp{mcnode} to be built. By default, a \samp{"V"} node. see \code{\link{mcnode}} for details.>> #{\dots}<<All other arguments but the size of the sample to be passed to \samp{func}. These arguments #should be vectors or \samp{mcnode}s (arrays prohibited).>> #{nsv}<<The number of simulations in the variability dimension.>> #{nsu}<<The number of simulations in the uncertainty dimension.>> #{nvariates}<<The number of variates of the output.>> #{outm}<<The output of the \samp{mcnode} for multivariates nodes. May be "each" (default) #if an output should be provided for each variates considered independently, "none" for no output #or a vector of functions (as a character string) that will be applied on the variates dimension #before any output (ex: \samp{"mean"}, \samp{"median"}, \samp{c("min","max")}). Each function should return 1 #value when applied to 1 value (ex. do not use \samp{"range"}). #Note that the \samp{outm} attribute may be changed further using the \code{\link{outm}} function.>> #{nsample}<<The name of the parameter of the function giving the size of the vector. #By default, \samp{n}, as in most of the random sampling distributions # of the \samp{stats} library (with the exceptions of \samp{rhyper} and \samp{rwilcox} where \samp{nsample="nn"} should be used).>> #{seed}<<The random seed used for the evaluation. If \samp{NULL} the \samp{seed} is unchanged.>> #{rtrunc}<<Should the distribution be truncated? See \code{\link{rtrunc}}.>> #{linf}<<If truncated: lower limit. May be a scalar, an array or a mcnode.>> #{lsup}<<If truncated: upper limit. May be a scalar, an array or a mcnode. \samp{lsup} should be pairwise strictly greater then \samp{linf}>> #{lhs}<<Should a Random Latin Hypercube Sampling be used? see \code{\link{lhs}}>> #VALUE #An \samp{mcnode} object. #DETAILS #Note that arguments after \dots must match exactly. # #Any function who accepts vectors/matrix as arguments may be used (notably: all current random generator of the \samp{stats} package). #The arguments may be sent classically but it is STRONGLY recommended to use consistent \samp{mcnode}s #if arguments should be recycled, since a very complex recycling is handled for \samp{mcnode} and not for vectors. #The rules for compliance of \samp{mcnode} arguments are as following (see below for special functions): #{type="V"}<<accepts \samp{"0" mcnode} of dimension \samp{(1 x 1 x nvariates)} or of dimension \samp{(1 x 1 x 1)} (recycled) # and \samp{"V" mcnode} of dimension \samp{(nsv x 1 x nvariates)} or \samp{(nsv x 1 x 1)} (recycled).>> #{type="U"}<<accepts \samp{"0" mcnode} of dimension \samp{(1 x 1 x nvariates)} or of dimension \samp{(1 x 1 x 1)} (recycled) #and \samp{"U" mcnode} of dimension \samp{(1 x nsu x nvariates)} or of dimension \samp{(1 x nsu x 1)} (recycled).>> #{type="VU"}<<accepts \samp{"0" mcnode} of dimension \samp{(1 x 1 x nvariates)} or of dimension \samp{(1 x 1 x 1)} (recycled), #\samp{"V" mcnode} of dimension \samp{(nsv x 1 x nvariates)} (recycled classicaly) or \samp{(nsv x 1 x 1)} (recycled classically), #\samp{"U" mcnode} of dimension \samp{(1 x nsu x nvariates)} (recycled by rows) or \samp{(1 x nsu x 1)} #(recycled by row on the uncertainty dimension and classicaly on variates), #\samp{"VU" mcnode} of dimension \samp{(nsv x nsu x nvariates)} or of dimension \samp{(nsv x nsu x 1)} (recycled).>> #{type="0"}<<accepts \samp{"0" mcnode} of dimension \samp{(1 x 1 x nvariates)} or \samp{(1 x 1 x 1)} #(recycled).>> # #Multivariate nodes and multivariate distributions: # #The number of variates should be provided (not guesses by the function). #A multivariates node may be built using a univariate distribution and #\samp{nvariates!=1}. See examples. # #\code{\link{rdirichlet}} needs for \samp{alpha} a vector or a multivariates nodes and returns a multivariate node. #\code{\link{rmultinomial}} needs for \samp{size} and \samp{prob} vectors and/or multivariate nodes and return a univariate or a multivariate node. #\code{\link{rmultinormal}} needs for \samp{mean} and \samp{sigma} vectors and/or multivariate nodes and return a multivariate node. #\code{\link{rempiricalD}} needs for \samp{values} and \samp{prob} vectors and/or multivariate nodes and return a a univariate or a multivariate node. #See examples. # #\samp{trunc=TRUE} is valid for univariates distributions only. #The distribution will be truncated on \samp{(linf, lsup]}. #The function 'func' should have a 'q' form (with first argument 'p') and a 'p' form, as #all current random generator of the \samp{stats} library. #Example : 'rnorm' (has a 'qnorm' and a 'pnorm' form), 'rbeta', 'rbinom', 'rgamma', ... # #If \samp{lhs=TRUE}, a Random Hypercube Sampling will be used on \samp{nsv} and \samp{nsu} #The function 'func' should have a 'q' form (with argument 'p'). #\samp{lhs=TRUE} is thus not allowed on multivariates distributions. # # #SEE ALSO #\code{\link{mcnode}} for a description of \samp{mcnode} object, methods and functions on \samp{mcnode} objects.</> #\code{\link{Ops.mcnode}} for operations on \samp{mcnode} objects. #\code{\link{rtrunc}} for important warnings on the use of the \samp{trunc} option. #EXAMPLE #Oldnvar <- ndvar() #Oldnunc <- ndunc() #ndvar(5) #ndunc(4) # ### compatibility with mcdata as arguments #x0 <- mcstoc(runif,type="0") #xV <- mcstoc(runif,type="V") #xU <- mcstoc(runif,type="U") #xVU <- mcstoc(runif,type="VU") # ### "0" accepts mcdata "0" #mcstoc(runif,type="0",min=-10,max=x0) # ### "V" accepts "0" mcdata and "V" mcdata #mcstoc(rnorm,type="V",mean=x0,sd=xV) # ### "U" accepts "0" mcdata and "U" mcdata #mcstoc(rnorm,type="U",mean=x0,sd=xU) # ### "VU" accepts "0" mcdata, "U" mcdata ### "V" mcdata and "U" mcdata with correct recycling #mcstoc(rnorm,type="VU",mean=x0,sd=xVU) #mcstoc(rnorm,type="VU",mean=xV,sd=xU) # ### any function giving a set (vector/matrix) of value of length 'size' works #f <- function(popi) 1:popi #mcstoc(f,type="V",nsample="popi") # ###Multivariates # #ndvar(2) #ndunc(5) ###Build a multivariate node with univariate distribution #mcstoc(rnorm,"0",nvariates=3) #mcstoc(rnorm,"V",nvariates=3) #mcstoc(rnorm,"U",nvariates=3) #mcstoc(rnorm,"VU",nvariates=3) # ###Build a multivariate node with multivariates distribution #alpha <- mcdata(c(1,1000,10,100,100,10,1000,1),"V",nvariates=4) #(p <- mcstoc(rdirichlet,"V",alpha=alpha,nvariates=4)) #mcstoc(rmultinomial,"VU",size=10,p,nvariates=4) # ###Build a univariates node with "multivariates" distribution #size <- mcdata(c(1:5),"U") #mcstoc(rmultinomial,"VU",size,p,nvariates=1) #since a multinomial return one value # ###Build a multivariates node with "multivariates" distribution #mcstoc(rmultinomial,"VU",size,p,nvariates=4) #sent 4 times to fill the array # ###Use of rempiricalD with nodes ###A bootstrap #ndunc(5) #ndvar(5) #dataset <- c(1:9) #(b <- mcstoc(rempiricalD,"U",nvariates=9,values=dataset)) #unclass(b) ###Then we build a VU node by sampling in each set of bootstrap ##in the uncertainty dimensions #(node <- mcstoc(rempiricalD,"VU",values=b)) #unclass(node) # ### truncated #ndvar(2) #ndunc(5) #linf <- mcdata(-1:3,"U") #x <- mcstoc(rnorm,"VU",rtrunc=TRUE,linf=linf) #unclass(round(x)) ## lhs and truncated with linf as mcnode #linf <- mcdata(1:5,"U") #mcstoc(rnorm,"VU",nsv=100,rtrunc=TRUE,linf=linf,lhs=TRUE) # #ndvar(Oldnvar) #ndunc(Oldnunc) #CREATED 08-01-25 #-------------------------------------------- { func <- match.fun(func) if(!is.null(seed)) set.seed(seed) if(!is.character(outm) || !(all(outm %in% c("none","each"))) && !all(sapply(outm, exists, mode="function"))) stop("outm should be 'none','each' or a vector of name(s) of valid function(s)") type <- match.arg(type) argsd <- list(...) dimf <- switch(type, "V"=c(nsv,1,nvariates),"U"=c(1,nsu,nvariates),"VU"=c(nsv,nsu,nvariates),"0"=c(1,1,nvariates)) nsv <- dimf[1] nsu <- dimf[2] nva <- dimf[3] if(rtrunc) argsd <- c(argsd,list(linf=linf),list(lsup=lsup)) # launch linf and lsup in the process largsd <- length(argsd) #### A function to deal mcnodes (including linf and lsup) as arguments LAFUNC <- function(argsd,typethismc){ if(!is.null(typethismc)){ #mcnode as arguments if(!(type=="VU" || typethismc=="0" || typethismc==type)) stop("Incompatible type of nodes") # incompatible node dimm <- dim(argsd) if ((typethismc == "V" && dimm[1] != nsv) || (typethismc == "U" && dimm[2] != nsu) || (typethismc == "VU" && (dimm[1] != nsv || dimm[2] != nsu))) stop("Nodes of incompatible dimensions") # incompatible dimension if(maxdim3 > 1){ #at least one multivariate node as parameter, need recycling on the third dimension if(typethismc=="U") argsd <- apply(argsd, 3, matrix, nrow=maxdim1, ncol=maxdim2, byrow=TRUE) # recycling U as matrix (maxdim1*maxdim2) x nvariates else { if(maxdim1 ==1 && maxdim2 ==1) argsd <- matrix(argsd, nrow=1) # Very special case to be added else argsd <- apply(argsd, 3, matrix, nrow = maxdim1, ncol = maxdim2) } # recycling 0, V, VU as matrix (maxdim1*maxdim2) x nvariates } else { dim(argsd) <- NULL # as vector if(typethismc == "U" && maxdim1!=1) argsd <- rep(argsd, each = maxdim1) #recycling U as vector nsv*nsu } } else if(is.array(argsd)) stop("Array prohibited in mcstoc as parameter. Use an mcnode instead") return(unclass(argsd)) } #### typemc <- lapply(argsd, attr, which = "type") yamc <- !is.null(unlist(typemc)) # At least one mcnode if(yamc){ # evaluate the minimal common to build the minimal recycling level... maxdim1 <- unlist(lapply(argsd, function(x) dim(x)[1])) maxdim1 <- ifelse(is.null(maxdim1), 1, max(maxdim1)) maxdim2 <- unlist(lapply(argsd, function(x) dim(x)[2])) maxdim2 <- ifelse(is.null(maxdim2), 1, max(maxdim2)) maxdim3 <- unlist(lapply(argsd, function(x) dim(x)[3])) maxdim3 <- ifelse(is.null(maxdim3), 1, max(maxdim3)) } if(largsd != 0){ argsd <- mapply(LAFUNC, argsd, typemc, SIMPLIFY=FALSE) #print(argsd) } ##################################### # If lhs or rtrunc, redefine the function to draw random variables ######### #keep a copy of original function funcorigin <- func if(lhs || rtrunc){ #define good function for the random sampling distr <- as.character(match.call()$func) #retrieve the name of the function distr <- substr(distr, 2, 1000) #remove the r qfun <- paste("q",distr,sep="") #define "qfunc" if(rtrunc){ pfun <- paste("p",distr,sep="") #define pfunc func <- function(...){ argsd <- list(...) nnfin <- argsd[[nsample]] linf <- if(length(argsd$linf) <= nnfin) as.vector(argsd$linf) else rep(argsd$linf, length.out=nnfin) # solve a problem when linf was multivariate lsup <- if(length(argsd$lsup) <= nnfin) as.vector(argsd$lsup) else rep(argsd$lsup, length.out=nnfin) # solve a problem when linf was multivariate lmax <- max(length(linf),length(lsup)) if(any(rep(linf, length.out = lmax) >= rep(lsup, length.out = lmax))) stop("linf should be < lsup") #recycle vectors argsd$linf <- argsd$lsup <- argsd[[nsample]] <- NULL #find the p of the limit pinf <- as.vector(do.call(pfun,c(list(q=linf),argsd),quote=TRUE)) psup <- as.vector(do.call(pfun,c(list(q=lsup),argsd),quote=TRUE)) #sample uniformely between the limits if(!lhs) lesp <- runif(nnfin,min=pinf,max=psup) else lesp <- lhs(distr="runif", nsv=dimf[1], nsu=dimf[2], nvariates=dimf[3], min=pinf, max=psup) #get the q data <- (do.call(qfun,c(list(p=lesp),argsd)))[1:nnfin] data[pinf==0 & data > lsup] <- NaN #ex: rtrunc("lnorm",10,linf=-2,lsup=-1) data[psup==1 & data < linf] <- NaN #ex: rtrunc("unif",10,linf=2,lsup=4,max=1) data[is.na(linf) | is.na(lsup)] <- NaN #ex: rtrunc("norm",10,sd=-2) #Two tests for extreme situations. None Catch all possibilities. THe error is first to avoid the warning if(any(data <= linf | data > lsup, na.rm=TRUE)) stop("Error in rtrunc: some values are not in the expected range (maybe due to rounding errors)") if(isTRUE(all.equal(pinf,1)) | isTRUE(all.equal(psup,0)) ) warning("Warning: check the results from rtrunc. It may have reached rounding errors") return(data) }#end redefinition func } else func <- function(...) { # LHS only argsd <- list(...) argsd[[nsample]] <- NULL lesp <- lhs(distr="runif", nsv=dimf[1], nsu=dimf[2], nvariates=dimf[3], min=0, max=1) return(do.call(qfun,c(list(p=lesp),argsd)))} } # do a try to test the length if nvariates != 1 if(nvariates != 1){ if(largsd != 0) argsdtest <- mapply(function(x,typemc){ if(is.null(typemc)) return(unclass(x)) if(is.matrix(x)) return(x[1,,drop=FALSE]) # mc (they have been unclassed) return(x[1])}, argsd, typemc, SIMPLIFY=FALSE) else argsdtest <- vector(mode="list",length=0) argsdtest[[nsample]] <- 1 if(rtrunc) argsdtest$linf <- argsdtest$lsup <- NULL dimf <- c(1,1,1) data <- do.call(funcorigin,argsdtest,quote=TRUE) l <- length(data) if(l == nvariates) { if(rtrunc | lhs) stop("mcstoc does not handle rtrunc and lhs for multivariate distributions") dimf <- c(nsv,nsu,1)} # If it returns a vector else if(l == 1) dimf <- c(nsv,nsu,nvariates) # if it returns a number else stop("the function should return a vector of size 1 or nvariates if",nsample,"=1") argsd[[nsample]] <- prod(dimf) data <- do.call(func, argsd, quote = TRUE) #Post Production, multivariate if (yamc){ if(l==1){ # univariate distribution if(maxdim1 == 1 && maxdim2 == 1) data <- aperm(array(data, dim = c(nvariates, nsv, nsu)), c(2, 3, 1)) else if(maxdim1 == 1 && nsv!=1) data <- aperm(array(data, dim = c(nsu, nvariates, nsv)), c(3, 1, 2)) else if(maxdim2 == 1 && nsu!=1) data <- aperm(array(data, dim = c(nsv, nvariates, nsu)), c(1, 3, 2)) else data <- array(data, dim = c(nsv, nsu, nvariates)) } else { # l != 1 : multivariate if(maxdim1 == 1 && nsv != 1) data <- aperm(array(data, dim = c(nsu, nsv, nvariates)), c(2, 1, 3)) else data <- array(data, dim = c(nsv, nsu, nvariates)) } } else data <- array(data, dim = c(nsv, nsu, nvariates)) } #end multivariates else{ # univariate argsd[[nsample]] <- prod(dimf) data <- do.call(func, argsd, quote = TRUE) if (yamc && maxdim1 == 1 && nsv != 1) data <- aperm(array(data, dim = c(nsu, nsv, nvariates)), c(2, 1, 3)) else data <- array(data, dim = c(nsv, nsu, nvariates)) } class(data) <- "mcnode" attr(data,"type") <- type attr(data,"outm") <- outm return(data) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils-sf.R \name{sf_build_user_emails_to_id} \alias{sf_build_user_emails_to_id} \title{User ids and email list} \usage{ sf_build_user_emails_to_id(conn1) } \arguments{ \item{conn1}{connection to the anahita DB} } \value{ a tibble } \description{ Sometimes, mixpanel stores emails instead of SF ids. We need to replace emails by ids using a reference list of email to id mappings. We need to note more than 1 email can be associated to an single id since we have the person emnail and the toplink email. In principle, the toplink email is the right one but we never now. } \examples{ \dontrun{ conn1 <- connect_anahita() df <- mxp_build_user_ids_emails(conn1) } }
/man/sf_build_user_emails_to_id.Rd
permissive
koad7/anahita
R
false
true
742
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils-sf.R \name{sf_build_user_emails_to_id} \alias{sf_build_user_emails_to_id} \title{User ids and email list} \usage{ sf_build_user_emails_to_id(conn1) } \arguments{ \item{conn1}{connection to the anahita DB} } \value{ a tibble } \description{ Sometimes, mixpanel stores emails instead of SF ids. We need to replace emails by ids using a reference list of email to id mappings. We need to note more than 1 email can be associated to an single id since we have the person emnail and the toplink email. In principle, the toplink email is the right one but we never now. } \examples{ \dontrun{ conn1 <- connect_anahita() df <- mxp_build_user_ids_emails(conn1) } }
data = NULL for(i in 2010:2011){ # the data goes up to 2017, but the files are extremely large from 2011 onwards - you can decide to just use a subset file = paste0('https://s3.amazonaws.com/capitalbikeshare-data/',i,'-capitalbikeshare-tripdata.zip') download.file(file,destfile='bikedata.zip') unzip('bikedata.zip') data = rbind(data,read.csv(paste0(i,'-capitalbikeshare-tripdata.csv'))) } n = dim(data)[1] starttime = as.numeric(strsplit(toString(data[,2]),split='[-:, ]')[[1]][-7*(1:n)]) # start time of ride #i dim(starttime) = c(6,n); starttime = t(starttime) # row i = year/month/date/hour/minute/second for ride #i duration = data[,1] # duration of the ride in seconds station_start = data[,4] # station ID where the bike was checked out station_end = data[,6] # station ID where the bike was returned member = (data[,9]=='Member') # member (1) or nonmember (0) bikenum = as.numeric((strsplit(toString(data[,8]),'[?wW, ]')[[1]][3*(1:n)-1])) # some are NA, the data is messy for this one stations = NULL # stations[i,1] = station ID for the i-th station, stations[i,2] = station location for the i-th station for(i in unique(c(station_start,station_end))){ if(any(data[,4]==i)){ ind = min(which(data[,4]==i)) location = toString(data[ind,5]) }else{ ind = min(which(data[,6]==i)) location = toString(data[ind,7]) } stations = rbind(stations,c(i,location)) } # note that stations get added to the program over time days_in_month = rep(c(31,28,31,30,31,30,31,31,30,31,30,31),2) # Jan 2010, ..., Dec 2011 days_since_Jan1_2010 = (starttime[,1]-2010)*365 + cumsum(days_in_month)[starttime[,2]] - days_in_month[starttime[,2]] + (starttime[,3]-1) day_of_week = c('Monday','Tuesday','Wednesday','Thursday','Friday', 'Saturday','Sunday')[(days_since_Jan1_2010 + 4)%% 7 + 1] save('starttime','duration','bikenum','stations','station_start','station_end', 'member','days_since_Jan1_2010','day_of_week',file='bikedata.RData')
/GroupProject1_Permutation Test/getdata_bikeshare.R
no_license
gulishana/UChicago_STAT30850_Multiple_Testing_and_Modern_Inference
R
false
false
1,953
r
data = NULL for(i in 2010:2011){ # the data goes up to 2017, but the files are extremely large from 2011 onwards - you can decide to just use a subset file = paste0('https://s3.amazonaws.com/capitalbikeshare-data/',i,'-capitalbikeshare-tripdata.zip') download.file(file,destfile='bikedata.zip') unzip('bikedata.zip') data = rbind(data,read.csv(paste0(i,'-capitalbikeshare-tripdata.csv'))) } n = dim(data)[1] starttime = as.numeric(strsplit(toString(data[,2]),split='[-:, ]')[[1]][-7*(1:n)]) # start time of ride #i dim(starttime) = c(6,n); starttime = t(starttime) # row i = year/month/date/hour/minute/second for ride #i duration = data[,1] # duration of the ride in seconds station_start = data[,4] # station ID where the bike was checked out station_end = data[,6] # station ID where the bike was returned member = (data[,9]=='Member') # member (1) or nonmember (0) bikenum = as.numeric((strsplit(toString(data[,8]),'[?wW, ]')[[1]][3*(1:n)-1])) # some are NA, the data is messy for this one stations = NULL # stations[i,1] = station ID for the i-th station, stations[i,2] = station location for the i-th station for(i in unique(c(station_start,station_end))){ if(any(data[,4]==i)){ ind = min(which(data[,4]==i)) location = toString(data[ind,5]) }else{ ind = min(which(data[,6]==i)) location = toString(data[ind,7]) } stations = rbind(stations,c(i,location)) } # note that stations get added to the program over time days_in_month = rep(c(31,28,31,30,31,30,31,31,30,31,30,31),2) # Jan 2010, ..., Dec 2011 days_since_Jan1_2010 = (starttime[,1]-2010)*365 + cumsum(days_in_month)[starttime[,2]] - days_in_month[starttime[,2]] + (starttime[,3]-1) day_of_week = c('Monday','Tuesday','Wednesday','Thursday','Friday', 'Saturday','Sunday')[(days_since_Jan1_2010 + 4)%% 7 + 1] save('starttime','duration','bikenum','stations','station_start','station_end', 'member','days_since_Jan1_2010','day_of_week',file='bikedata.RData')
###### --------- > calculating seasonal contemporary layers + projections library(raster) library(tidyverse) library(maps) ######### ----------------------------> create contemporary rasters averaged by season (run once) #### contempDir_monthly="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project" ## monthly current rasters contempDir_seaonal="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" #;dir.create(contempDir_seaonal) ## folder to house current rasters averaged by season winter=c("m12","m01","m02") spring=c("m03","m04","m05") summer=c("m06","m07","m08") fall=c("m09","m10","m11") seasons=c("DJF","MAM","JJA","SON") for(season in seasons){ print(season) a=list() assign(season,a) } vars=c("bt","bs","st","SS","sh") allfiles=list.files(contempDir_monthly,recursive = T,full.names = T) for(file in allfiles){ print(file) a=strsplit(file,"/")[[1]][9] b=strsplit(a,"_") month=b[[1]][1] year=b[[1]][2] ########## 20-40 if(month %in% winter){ DJF=unlist(list(file,DJF)) } if(month %in% spring){ MAM=unlist(list(file,MAM)) } if(month %in% summer){ JJA=unlist(list(file,JJA)) } if(month %in% fall){ SON=unlist(list(file,SON)) } } master=list() for(season in seasons){ print(season) master=unlist(list(master,season)) } for(mas in master){ print(mas) a=paste0(contempDir_seaonal,"/contemp_",mas);dir.create(a) bs=grep("bs",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(bs,paste0(contempDir_seaonal,"/contemp_",mas,"/bs.tif"),format="GTiff",overwrite=T) bt=grep("bt",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(bt,paste0(contempDir_seaonal,"/contemp_",mas,"/bt.tif"),format="GTiff",overwrite=T) st=grep("st",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(st,paste0(contempDir_seaonal,"/contemp_",mas,"/st.tif"),format="GTiff",overwrite=T) SS=grep("SS",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(SS,paste0(contempDir_seaonal,"/contemp_",mas,"/SS.tif"),format="GTiff",overwrite=T) sh=grep("sh.tif",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(sh,paste0(contempDir_seaonal,"/contemp_",mas,"/sh.tif"),format="GTiff",overwrite=T) } ######### (end run once) ######### ----------------------------> projecting species over seasonal contemporary layers ##### ----------------------------> copying over the static bathymetric variables #### seasonalDir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" monthlyDir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_future" folders=list.files(seasonalDir,full.names = T) depth=paste0(monthlyDir,"/m01_2000/Depth.tif") rugosity=paste0(monthlyDir,"/m01_2000/Rugosity.tif") copy=function(x){ file.copy(depth,x) file.copy(rugosity,x) } lapply(folders,FUN=copy) ##### ----------------------------> Project full and partial models over these surfaces #### seasonalDir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" years=list.files(seasonalDir) spp_dir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/species" species=list.files(spp_dir)%>%lapply(.,function(x)gsub(".csv","",x))%>%unlist proj_full="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/species_projections_full" #;dir.create(proj_full) proj_partial="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/species_projections_partial" #;dir.create(proj_partial) model_full="/Volumes/SeaGate/ClimatePaperCM2.6/species_models" model_partial="/Volumes/SeaGate/ClimatePaperCM2.6/species_models_partial" proj_dir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" #####partial models first#### for(sp in species){ print(sp) modelRDS=readRDS(paste0(model_partial,"/",sp,".rds")) path=paste0(proj_partial,"/",sp);dir.create(path) for(year in years){ print(year) layers_dir=paste0(proj_dir,"/",year) print(layers_dir) Depth<-raster(paste(layers_dir,"/Depth.tif",sep="")) Rugosity<-raster(paste(layers_dir,"/Rugosity.tif",sep="")) SS<-raster(paste(layers_dir,"/SS.tif",sep="")) bs<-raster(paste(layers_dir,"/bs.tif",sep="")) st<-raster(paste(layers_dir,"/st.tif",sep="")) bt<-raster(paste(layers_dir,"/bt.tif",sep="")) sh<-raster(paste(layers_dir,"/sh.tif",sep="")) rasterStack<-stack(Depth,Rugosity,SS,bs,st,bt,sh) names(rasterStack)<-c("Depth","Rugosity","SS","bs","st","bt","sh") ## make a raster stack for the missing date filename=paste0(path,"/",sp,"_",year,".tif") print(filename) raster::predict(rasterStack,modelRDS,filename=paste0(path,"/",sp,"_",year,".tif"),fun=predict,format="GTiff", type="response",na.rm=TRUE,overwrite=TRUE,progress='text') } } #####full models second#### for(sp in species){ print(sp) modelRDS=readRDS(paste0(model_full,"/",sp,".rds")) path=paste0(proj_full,"/",sp);dir.create(path) for(year in years){ print(year) layers_dir=paste0(proj_dir,"/",year) print(layers_dir) Depth<-raster(paste(layers_dir,"/Depth.tif",sep="")) Rugosity<-raster(paste(layers_dir,"/Rugosity.tif",sep="")) SS<-raster(paste(layers_dir,"/SS.tif",sep="")) bs<-raster(paste(layers_dir,"/bs.tif",sep="")) st<-raster(paste(layers_dir,"/st.tif",sep="")) bt<-raster(paste(layers_dir,"/bt.tif",sep="")) sh<-raster(paste(layers_dir,"/sh.tif",sep="")) rasterStack<-stack(Depth,Rugosity,SS,bs,st,bt,sh) names(rasterStack)<-c("Depth","Rugosity","SS","bs","st","bt","sh") ## make a raster stack for the missing date filename=paste0(path,"/",sp,"_",year,".tif") print(filename) raster::predict(rasterStack,modelRDS,filename=paste0(path,"/",sp,"_",year,".tif"),fun=predict,format="GTiff", type="response",na.rm=TRUE,overwrite=TRUE,progress='text') } } make_png=function(r,year,species,model_type){ ### does what it says png(paste0("/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/pngs/",species,"_",year,"_",model_type,".png"), width=7, height=5, units="in", res=400) par(ps=10) #settings before layout layout(matrix(c(1,2), nrow=2, ncol=1, byrow=TRUE), heights=c(4,1), widths=7) #layout.show(2) # run to see layout; comment out to prevent plotting during .pdf par(cex=1) # layout has the tendency change par()$cex, so this step is important for control par(mar=c(4,4,1,1)) # I usually set my margins before each plot #pal <- colorRampPalette(c("blue", "grey", "red")) pal <- colorRampPalette(c("darkblue","blue", "cyan", "yellow", "red","dark red")) #pal <- colorRampPalette(c("purple4", "white", "blue")) ncolors <- 200 breaks <- seq(0,1,,ncolors+1) image(r, col=pal(ncolors), breaks=breaks) map("world", add=TRUE, lwd=2) #contour(r, add=TRUE, col="black",levels=c(-.75,-.5,.5,.75)) box() par(mar=c(4,4,0,1)) # I usually set my margins before each plot levs <- breaks[-1] - diff(breaks)/2 image(x=levs, y=1, z=as.matrix(levs), col=pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n") # if(species=="d193"){ # common_name="ocean pout" # } # # if(species=="d171"){ # common_name="northern sea robin" # } # # if(species=="d24"){ # common_name="clearnose skate" # } # # if(species=="bp15"){ # common_name="spiny dogfish" # } # # if(species=="d103"){ # common_name="summer flounder" # } # # if(species=="d106"){ # common_name="winter flounder" # } # # if(species=="d73"){ # common_name="atlantic cod" # } # # if(species=="d74"){ # common_name="haddock" # } # # if(species=="e301"){ # common_name="american lobster" # } # # if(species=="bp502"){ # common_name="longfin squid" # } # # if(species=="d141"){ # common_name="black sea bass" # } # # if(species=="d143"){ # common_name="scup" # } # # if(species=="p135"){ # common_name="bluefish" # } # # if(species=="bp131"){ # common_name="butterfish" # } # # if(species=="d84"){ # common_name="cusk" # } # if(species=="d631"){ # common_name="sheepshead" # } # if(species=="p121"){ # common_name="Atlantic mackerel" # } # if(species=="bp32"){ # common_name="Atlantic herring" # } # if(species=="d654"){ # common_name="red drum" # } # if(species=="d147"){ # common_name="black drum" # } # if(species=="e401"){ # common_name="sea scallop" # } # if(species=="d139"){ # common_name="striped bass" # } common_name=species ### will need to remove this for paper to use real names mtext(paste0("Habitat suitability for ",common_name,". ",year,". Model type = ",model_type), side=1, line=2.5) box() dev.off() # closes device } for(sp in species){ path=paste0(proj_full,"/",sp) for(year in years){ ras=paste0(path,"/",sp,"_",year,".tif") r=raster(ras) make_png(r=r,year=year,species=sp,model_type = "full") } } for(sp in species){ path=paste0(proj_partial,"/",sp) for(year in years){ ras=paste0(path,"/",sp,"_",year,".tif") r=raster(ras) make_png(r=r,year=year,species=sp,model_type = "partial") } }
/Scripts/var_comparison_seasonal_contemporary.R
no_license
CM26-Climate-Paper/Analysis-code
R
false
false
9,446
r
###### --------- > calculating seasonal contemporary layers + projections library(raster) library(tidyverse) library(maps) ######### ----------------------------> create contemporary rasters averaged by season (run once) #### contempDir_monthly="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project" ## monthly current rasters contempDir_seaonal="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" #;dir.create(contempDir_seaonal) ## folder to house current rasters averaged by season winter=c("m12","m01","m02") spring=c("m03","m04","m05") summer=c("m06","m07","m08") fall=c("m09","m10","m11") seasons=c("DJF","MAM","JJA","SON") for(season in seasons){ print(season) a=list() assign(season,a) } vars=c("bt","bs","st","SS","sh") allfiles=list.files(contempDir_monthly,recursive = T,full.names = T) for(file in allfiles){ print(file) a=strsplit(file,"/")[[1]][9] b=strsplit(a,"_") month=b[[1]][1] year=b[[1]][2] ########## 20-40 if(month %in% winter){ DJF=unlist(list(file,DJF)) } if(month %in% spring){ MAM=unlist(list(file,MAM)) } if(month %in% summer){ JJA=unlist(list(file,JJA)) } if(month %in% fall){ SON=unlist(list(file,SON)) } } master=list() for(season in seasons){ print(season) master=unlist(list(master,season)) } for(mas in master){ print(mas) a=paste0(contempDir_seaonal,"/contemp_",mas);dir.create(a) bs=grep("bs",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(bs,paste0(contempDir_seaonal,"/contemp_",mas,"/bs.tif"),format="GTiff",overwrite=T) bt=grep("bt",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(bt,paste0(contempDir_seaonal,"/contemp_",mas,"/bt.tif"),format="GTiff",overwrite=T) st=grep("st",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(st,paste0(contempDir_seaonal,"/contemp_",mas,"/st.tif"),format="GTiff",overwrite=T) SS=grep("SS",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(SS,paste0(contempDir_seaonal,"/contemp_",mas,"/SS.tif"),format="GTiff",overwrite=T) sh=grep("sh.tif",get(mas),value = T)%>%stack(.)%>%calc(.,fun = mean);writeRaster(sh,paste0(contempDir_seaonal,"/contemp_",mas,"/sh.tif"),format="GTiff",overwrite=T) } ######### (end run once) ######### ----------------------------> projecting species over seasonal contemporary layers ##### ----------------------------> copying over the static bathymetric variables #### seasonalDir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" monthlyDir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_future" folders=list.files(seasonalDir,full.names = T) depth=paste0(monthlyDir,"/m01_2000/Depth.tif") rugosity=paste0(monthlyDir,"/m01_2000/Rugosity.tif") copy=function(x){ file.copy(depth,x) file.copy(rugosity,x) } lapply(folders,FUN=copy) ##### ----------------------------> Project full and partial models over these surfaces #### seasonalDir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" years=list.files(seasonalDir) spp_dir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/species" species=list.files(spp_dir)%>%lapply(.,function(x)gsub(".csv","",x))%>%unlist proj_full="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/species_projections_full" #;dir.create(proj_full) proj_partial="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/species_projections_partial" #;dir.create(proj_partial) model_full="/Volumes/SeaGate/ClimatePaperCM2.6/species_models" model_partial="/Volumes/SeaGate/ClimatePaperCM2.6/species_models_partial" proj_dir="/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/contemporary" #####partial models first#### for(sp in species){ print(sp) modelRDS=readRDS(paste0(model_partial,"/",sp,".rds")) path=paste0(proj_partial,"/",sp);dir.create(path) for(year in years){ print(year) layers_dir=paste0(proj_dir,"/",year) print(layers_dir) Depth<-raster(paste(layers_dir,"/Depth.tif",sep="")) Rugosity<-raster(paste(layers_dir,"/Rugosity.tif",sep="")) SS<-raster(paste(layers_dir,"/SS.tif",sep="")) bs<-raster(paste(layers_dir,"/bs.tif",sep="")) st<-raster(paste(layers_dir,"/st.tif",sep="")) bt<-raster(paste(layers_dir,"/bt.tif",sep="")) sh<-raster(paste(layers_dir,"/sh.tif",sep="")) rasterStack<-stack(Depth,Rugosity,SS,bs,st,bt,sh) names(rasterStack)<-c("Depth","Rugosity","SS","bs","st","bt","sh") ## make a raster stack for the missing date filename=paste0(path,"/",sp,"_",year,".tif") print(filename) raster::predict(rasterStack,modelRDS,filename=paste0(path,"/",sp,"_",year,".tif"),fun=predict,format="GTiff", type="response",na.rm=TRUE,overwrite=TRUE,progress='text') } } #####full models second#### for(sp in species){ print(sp) modelRDS=readRDS(paste0(model_full,"/",sp,".rds")) path=paste0(proj_full,"/",sp);dir.create(path) for(year in years){ print(year) layers_dir=paste0(proj_dir,"/",year) print(layers_dir) Depth<-raster(paste(layers_dir,"/Depth.tif",sep="")) Rugosity<-raster(paste(layers_dir,"/Rugosity.tif",sep="")) SS<-raster(paste(layers_dir,"/SS.tif",sep="")) bs<-raster(paste(layers_dir,"/bs.tif",sep="")) st<-raster(paste(layers_dir,"/st.tif",sep="")) bt<-raster(paste(layers_dir,"/bt.tif",sep="")) sh<-raster(paste(layers_dir,"/sh.tif",sep="")) rasterStack<-stack(Depth,Rugosity,SS,bs,st,bt,sh) names(rasterStack)<-c("Depth","Rugosity","SS","bs","st","bt","sh") ## make a raster stack for the missing date filename=paste0(path,"/",sp,"_",year,".tif") print(filename) raster::predict(rasterStack,modelRDS,filename=paste0(path,"/",sp,"_",year,".tif"),fun=predict,format="GTiff", type="response",na.rm=TRUE,overwrite=TRUE,progress='text') } } make_png=function(r,year,species,model_type){ ### does what it says png(paste0("/Volumes/SDM /Lacie backup October 2016/Lacie share/Climate_paper/GAM_1/project_20y_avs_seasonal/pngs/",species,"_",year,"_",model_type,".png"), width=7, height=5, units="in", res=400) par(ps=10) #settings before layout layout(matrix(c(1,2), nrow=2, ncol=1, byrow=TRUE), heights=c(4,1), widths=7) #layout.show(2) # run to see layout; comment out to prevent plotting during .pdf par(cex=1) # layout has the tendency change par()$cex, so this step is important for control par(mar=c(4,4,1,1)) # I usually set my margins before each plot #pal <- colorRampPalette(c("blue", "grey", "red")) pal <- colorRampPalette(c("darkblue","blue", "cyan", "yellow", "red","dark red")) #pal <- colorRampPalette(c("purple4", "white", "blue")) ncolors <- 200 breaks <- seq(0,1,,ncolors+1) image(r, col=pal(ncolors), breaks=breaks) map("world", add=TRUE, lwd=2) #contour(r, add=TRUE, col="black",levels=c(-.75,-.5,.5,.75)) box() par(mar=c(4,4,0,1)) # I usually set my margins before each plot levs <- breaks[-1] - diff(breaks)/2 image(x=levs, y=1, z=as.matrix(levs), col=pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n") # if(species=="d193"){ # common_name="ocean pout" # } # # if(species=="d171"){ # common_name="northern sea robin" # } # # if(species=="d24"){ # common_name="clearnose skate" # } # # if(species=="bp15"){ # common_name="spiny dogfish" # } # # if(species=="d103"){ # common_name="summer flounder" # } # # if(species=="d106"){ # common_name="winter flounder" # } # # if(species=="d73"){ # common_name="atlantic cod" # } # # if(species=="d74"){ # common_name="haddock" # } # # if(species=="e301"){ # common_name="american lobster" # } # # if(species=="bp502"){ # common_name="longfin squid" # } # # if(species=="d141"){ # common_name="black sea bass" # } # # if(species=="d143"){ # common_name="scup" # } # # if(species=="p135"){ # common_name="bluefish" # } # # if(species=="bp131"){ # common_name="butterfish" # } # # if(species=="d84"){ # common_name="cusk" # } # if(species=="d631"){ # common_name="sheepshead" # } # if(species=="p121"){ # common_name="Atlantic mackerel" # } # if(species=="bp32"){ # common_name="Atlantic herring" # } # if(species=="d654"){ # common_name="red drum" # } # if(species=="d147"){ # common_name="black drum" # } # if(species=="e401"){ # common_name="sea scallop" # } # if(species=="d139"){ # common_name="striped bass" # } common_name=species ### will need to remove this for paper to use real names mtext(paste0("Habitat suitability for ",common_name,". ",year,". Model type = ",model_type), side=1, line=2.5) box() dev.off() # closes device } for(sp in species){ path=paste0(proj_full,"/",sp) for(year in years){ ras=paste0(path,"/",sp,"_",year,".tif") r=raster(ras) make_png(r=r,year=year,species=sp,model_type = "full") } } for(sp in species){ path=paste0(proj_partial,"/",sp) for(year in years){ ras=paste0(path,"/",sp,"_",year,".tif") r=raster(ras) make_png(r=r,year=year,species=sp,model_type = "partial") } }
## Format a number for better printing ## Print "- 0.3" instead of "+ -0.3" cf <- function(x, noPlus = FALSE){ if(noPlus) ifelse(x > 0, paste0(x), paste0("-", -x)) else ifelse(x > 0, paste0(" + ", x), paste0(" - ", -x)) }
/R/cf.R
no_license
weibokong27/netSEM
R
false
false
242
r
## Format a number for better printing ## Print "- 0.3" instead of "+ -0.3" cf <- function(x, noPlus = FALSE){ if(noPlus) ifelse(x > 0, paste0(x), paste0("-", -x)) else ifelse(x > 0, paste0(" + ", x), paste0(" - ", -x)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calculate.R \name{BNPR} \alias{BNPR} \alias{BNPR_PS} \title{Bayesian nonparametric phylodynamic reconstruction.} \usage{ BNPR(data, lengthout = 100, pref = FALSE, prec_alpha = 0.01, prec_beta = 0.01, beta1_prec = 0.001, fns = NULL, log_fns = TRUE, simplify = TRUE, derivative = FALSE, forward = TRUE) BNPR_PS(data, lengthout = 100, prec_alpha = 0.01, prec_beta = 0.01, beta1_prec = 0.001, fns = NULL, log_fns = TRUE, simplify = TRUE, derivative = FALSE, forward = TRUE) } \arguments{ \item{data}{\code{phylo} object or list containing vectors of coalescent times \code{coal_times}, sampling times \code{samp_times}, and number sampled per sampling time \code{n_sampled}.} \item{lengthout}{numeric specifying number of grid points.} \item{pref}{logical. Should the preferential sampling model be used?} \item{prec_alpha, prec_beta}{numerics specifying gamma prior for precision \eqn{\tau}.} \item{beta1_prec}{numeric specifying precision for normal prior on \eqn{\beta_1}.} \item{fns}{list containing functions of covariates.} \item{log_fns}{logical whether or not to to apply a log-transformation to the output of the functions in \code{fns}.} \item{simplify}{logical whether to fully bucket all Poisson points.} \item{derivative}{logical whether to calculate estimates of the log-derivative.} \item{forward}{logical whether to use the finite difference approximations of the log-derivative as a forward or backward derivative.} } \value{ Phylodynamic reconstruction of effective population size at grid points. \code{result} contains the INLA output, \code{data} contains the information passed to INLA, \code{grid} contains the grid end points, \code{x} contains the grid point centers, \code{effpop} contains a vector of the posterior median effective population size estimates, \code{effpop025} and \code{effpop975} contain the 2.5th and 97.5th posterior percentiles, \code{summary} contains a data.frame of the estimates, and \code{derivative} (if \code{derivative = TRUE}) contains a data.frame summarizing the log-derivative. } \description{ Bayesian nonparametric phylodynamic reconstruction. } \section{Functions}{ \itemize{ \item \code{BNPR_PS}: Uses preferential sampling model. }} \examples{ data("NY_flu") res = BNPR(NY_flu) plot_BNPR(res) }
/man/BNPR.Rd
no_license
lucyxia/phylodyn
R
false
true
2,380
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calculate.R \name{BNPR} \alias{BNPR} \alias{BNPR_PS} \title{Bayesian nonparametric phylodynamic reconstruction.} \usage{ BNPR(data, lengthout = 100, pref = FALSE, prec_alpha = 0.01, prec_beta = 0.01, beta1_prec = 0.001, fns = NULL, log_fns = TRUE, simplify = TRUE, derivative = FALSE, forward = TRUE) BNPR_PS(data, lengthout = 100, prec_alpha = 0.01, prec_beta = 0.01, beta1_prec = 0.001, fns = NULL, log_fns = TRUE, simplify = TRUE, derivative = FALSE, forward = TRUE) } \arguments{ \item{data}{\code{phylo} object or list containing vectors of coalescent times \code{coal_times}, sampling times \code{samp_times}, and number sampled per sampling time \code{n_sampled}.} \item{lengthout}{numeric specifying number of grid points.} \item{pref}{logical. Should the preferential sampling model be used?} \item{prec_alpha, prec_beta}{numerics specifying gamma prior for precision \eqn{\tau}.} \item{beta1_prec}{numeric specifying precision for normal prior on \eqn{\beta_1}.} \item{fns}{list containing functions of covariates.} \item{log_fns}{logical whether or not to to apply a log-transformation to the output of the functions in \code{fns}.} \item{simplify}{logical whether to fully bucket all Poisson points.} \item{derivative}{logical whether to calculate estimates of the log-derivative.} \item{forward}{logical whether to use the finite difference approximations of the log-derivative as a forward or backward derivative.} } \value{ Phylodynamic reconstruction of effective population size at grid points. \code{result} contains the INLA output, \code{data} contains the information passed to INLA, \code{grid} contains the grid end points, \code{x} contains the grid point centers, \code{effpop} contains a vector of the posterior median effective population size estimates, \code{effpop025} and \code{effpop975} contain the 2.5th and 97.5th posterior percentiles, \code{summary} contains a data.frame of the estimates, and \code{derivative} (if \code{derivative = TRUE}) contains a data.frame summarizing the log-derivative. } \description{ Bayesian nonparametric phylodynamic reconstruction. } \section{Functions}{ \itemize{ \item \code{BNPR_PS}: Uses preferential sampling model. }} \examples{ data("NY_flu") res = BNPR(NY_flu) plot_BNPR(res) }