blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
fd1a31d54ea1ade4810eddc77b835047cb1f3c57
d234c1625aad71230609b61fad75de61c263b84c
/functions/download_wnv.R
52e0c820b5ec76bb7dafa57c311d2cb9362b3012
[]
no_license
geneorama/wnv_map_demo
1cf355a97264b59fbee47beab38a8311eaeaa3f6
1d16e29441071cd5c3494763f0981a63bec11aeb
refs/heads/master
2020-12-25T14:13:42.666594
2017-10-16T16:24:49
2017-10-16T16:24:49
65,930,491
4
4
null
null
null
null
UTF-8
R
false
false
636
r
download_wnv.R
download_wnv <- function(infile = "data/wnv.csv", inurl = "https://data.cityofchicago.org/api/views/jqe8-8r6s/rows.csv?accessType=DOWNLOAD"){ if(!file.exists(infile)){ download.file(url = inurl, destfile = infile) } dat <- data.table::fread(infile) setnames(dat, tolower(colnames(dat))) setnames(dat, gsub(" ", "_", colnames(dat))) setnames(dat, "test_date", "date") dat <- dat[ , date := as.IDate(date, "%m/%d/%Y")][] dat <- dat[ , result := result == "positive"][] dat <- dat[ , location := NULL][] setkey(dat, date, trap, species, result) return(dat) }
327d1793dc053f5ab832526d43a4c7c6a9f38bd3
2a2d3489886a0e4bd5b76ca726adc3b7f44386cb
/shiny/MagicWeb/ui.R
9145377d373d7b1d7b75ed19951f3ad65af2dc2e
[ "MIT" ]
permissive
liufan-creat/magic
68d51fdf847dda49500f5a963d4fce74198c9462
a672b94c9262335cbec68e6817cd4de8eb701c65
refs/heads/master
2021-10-23T16:11:02.362069
2019-03-18T18:29:45
2019-03-18T18:29:45
null
0
0
null
null
null
null
UTF-8
R
false
false
3,389
r
ui.R
# Copyright (C) 2017 Dana-Farber Cancer Institute Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Questions, comments and concerns can be directed to # Alexander Gimelbrant: alexander_gimelbrant@dfci.harvard.edu # Sebastien Vigneau: Sebastien_Vigneau@dfci.harvard.edu # Svetlana Vinogradova: Svetlana_Vinogradova@dfci.harvard.edu # Henry Ward: henry.neil.ward@gmail.com # Sachit Saksena: sachitdsaksena@utexas.edu ###### ### UI LIBRARIES ###### # Gets custom install directory if used in install.R lib <- get_install_dir(paste0(getwd(), "/../../")) if (is.null(lib)) lib <- NA if (is.na(lib)) { library(shiny) library(markdown) library(shinythemes) library(GGally) library(shinyFiles) library(bsplus) }else { library(shiny, lib.loc = lib) library(markdown, lib.loc = lib) library(shinythemes, lib.loc = lib) library(GGally, lib.loc = lib) library(shinyFiles, lib.loc = lib) library(bsplus, lib.loc = lib) } ###### ### UI GLOBALS ###### # All ui-specific global variables organism <- c("human", "mouse", "other") assembly <- c("mm9","mm10", "other") assembly <- c(assembly, "hg19", "hg38", "other") tg_names <- get_names(reference_folder, pattern = "*_tg.tsv") tg_names <- c("human", "mouse", "none", "other") model_names <- get_names(models_folder, pattern = "*_model.rds") acceptable_file_types <- c("text/plain", "text/csv", "text/comma-separated-values", ".csv", "text/tsv", "text/tab-separated-values", ".tsv") selection_rules <- c("best", "oneSE", "tolerance") metric_names <- c("Kappa", "Accuracy", "ROC") sampling_method_names <- c("none", "down", "up") positive_classes <- c("MAE", "BAE", "other") model_list <- c("ada", "svmPoly", "rf", "nnet", "rpart", "mlpML", "knn", "evtree", "glmStepAIC") filtering <- c("olfactory receptor genes", "sex chromosomes", "imprinted genes") if (!is.na(lib)) { load_process_libraries(lib) load_analyze_libraries(lib) load_generate_libraries(lib) load_shiny_libraries(lib) } else { load_process_libraries() load_analyze_libraries() load_generate_libraries() load_shiny_libraries() } ###### ### UI ###### shinyUI( tagList( # make navbar look cool navbarPage( title = "", id="main_panel", theme = shinytheme("flatly"), # source tabPanels source("ui/ui-main-tab.R", local=TRUE)$value, source("ui/ui-process-tab.R", local=TRUE)$value, source("ui/ui-generate-tab.R", local=TRUE)$value, source("ui/ui-analyze-tab.R", local=TRUE)$value, source("ui/ui-tutorial-tab.R", local=TRUE)$value ), # activate tooltips, popovers use_bs_tooltip(), use_bs_popover() ) )
4e0a54d1261bf92da116913ef8c7991daea453f4
64b0d18eb0e78a963ef19599c2dec448da6603d3
/man/test_engine.Rd
69fa2273731f1593c8856a69ecde265ec42e0859
[ "MIT" ]
permissive
Chicago-R-User-Group/2017-n4-Meetup-Syberia
0bb8cf04112ba236e373e89b01db8f92b857b000
dc248c8702fc851ae50335ad6406f14e414c0744
refs/heads/master
2021-01-01T04:22:57.806786
2017-07-14T04:19:50
2017-07-14T04:19:50
97,166,590
5
0
null
null
null
null
UTF-8
R
false
true
3,293
rd
test_engine.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test.R \name{test_engine} \alias{test_engine} \title{Run all tests in a syberia project or engine.} \usage{ test_engine(engine = syberia_engine(), base = "test", config = file.path("config", "environments", "test"), ignored_tests = ignored_tests_from_config(engine, base, config), optional_tests = optional_tests_from_config(engine, base, config), required = TRUE, reporter = c("summary", "check", "list", "minimal", "multi", "rstudio", "silent", "stop", "tap", "teamcity")[1L], error_on_failure = TRUE) } \arguments{ \item{engine}{syberia_engine. The syberia engine to test. If a \code{character}, it will be passed to \code{\link{syberia_engine}} first.} \item{base}{character. Any subdirectory to test specifically. By default, \code{"test"}.} \item{config}{character. The relative path to the configuration resource, by default \code{"config/environments/test"}.} \item{ignored_tests}{character. The list of tests to ignore, by default the local variable \code{ignored_tests} extracted from the configuration resource specific by the \code{config} parameter.} \item{optional_tests}{character. The list of tests to ignore, by default the local variable \code{optional_tests} extracted from the configuration resource specific by the \code{config} parameter.} \item{required}{logical. Whether or not all tests are required to have resources, by default \code{TRUE}. If \code{TRUE}, the \code{ignored_tests} resources will not be required to have an accompanying test. It is highly recommended that all your projects have full test coverage.} \item{reporter}{character. The testthat package test reporter to use. The options are \code{c("check", "list", "summary", "minimal", "multi", "rstudio", "silent", "stop", "tap", "teamcity")}, with the default being \code{"summary"}.} \item{error_on_failure}{logical. Whether or not to raise an error if there are any failures. By default, \code{TRUE}.} } \value{ A list of \code{testthat_results} objects giving the details for the tests executed on each tested resource. If \code{error_on_failure} is \code{TRUE}, error instead if there are any failures. } \description{ The tests that will be run are all those in the \code{test} subdirectory of the root of the syberia engine, unless otherwise specified. } \details{ It is possible to introduce additional behavior prior to and after tests. This can be used to perform additional testing not covered by sourcing all files in the "test/" directory of the syberia engine. To provide a setup or teardown hook, simply place a function or list of functions in a local variable \code{setup} or \code{teardown}, respectively, in \code{config/environments/test} relative to the root of the syberia engine, or pass the relevant \code{setup} or \code{teardown} parameters to this function. For example, creating a file \code{config/environments/test.R} with the code \code{setup <- function(env) cat("Running all tests.")} will print a message before all the tests are run. The one parameter the function must take is an environment which will contain a single key, \code{director}, pointing to the object returned by calling \code{\link{syberia_engine}}. } \seealso{ \code{\link{syberia_engine}} }
fb8b7e396872589cd6cdde713a97e24aedc4b2e3
a91c8d6928115e7ba12c76db197bc61fff3eab85
/Visuals/Scatter.R
7dfbb2611c6248f7cfb7c0015d7ec97cd79790ef
[]
no_license
no33mis/MSc-Dissertation
f308799cdbce8f780bcbee4dbb7ee7145b286f6e
f7d5676872d2d7388a556a79a79e3a8aa62e480f
refs/heads/master
2022-11-30T19:35:02.659772
2020-08-12T04:02:48
2020-08-12T04:02:48
null
0
0
null
null
null
null
UTF-8
R
false
false
5,631
r
Scatter.R
######################################## VISUALISING THE RESULTS ############################################# ### SCATTERPLOTS ############################################################################################################## ##CLEAR R MEMORY rm(list = ls()) ##call packages library(ggplot2) library(ggpubr) ##set the working directory and check the files within setwd("//../") list.files() ##read the files pop <- read.csv("final_pop.csv", stringsAsFactors = FALSE) avrg <- read.csv("final_avrg.csv", stringsAsFactors = FALSE) elder <- read.csv("final_elder.csv", stringsAsFactors = FALSE) ######################################################################## ### population count ##scatterplot for LM pop1 <- ggplot(pop, aes(x=estimations_lm, y=HDB_pop)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted LM", breaks = seq(from = 0, to = 125000, by = 25000), limits=c(0,135000)) + scale_y_continuous(name ="Observed", breaks = seq(from = 0, to = 125000, by = 25000), limits=c(0,135000)) + labs(title = "Population Estimation", subtitle = "LM without model tuning")+ stat_cor(label.x = 15000, label.y = 125000, size = 3) ##scatterplot for SVM pop2 <- ggplot(pop, aes(x=estimations_svm, y=HDB_pop)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted SVM", breaks = seq(from = 0, to = 125000, by = 25000), limits=c(0,135000)) + scale_y_continuous(name ="Observed", breaks = seq(from = 0, to = 125000, by = 25000), limits=c(0,135000)) + labs(title = "", subtitle = "SVM with feature combination")+ stat_cor(label.x = 15000, label.y = 125000, size = 3) ##scatterplot of SVM vs. LM pop3 <- ggplot(pop, aes(x=estimations_svm, y=estimations_lm)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted SVM", breaks = seq(from = 0, to = 125000, by = 25000), limits=c(0,135000)) + scale_y_continuous(name ="Predicted LM", breaks = seq(from = 0, to = 125000, by = 25000), limits=c(0,135000)) + labs(title = "", subtitle = "SVM vs. LM")+ stat_cor(label.x = 15000, label.y = 125000, size = 3) ######################################################################## ### average age ##scatterplot for LM avrg1 <- ggplot(avrg, aes(x=estimations_lm, y=avrg)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted LM", breaks = seq(from = 30, to = 50, by = 5), limits=c(28,52)) + scale_y_continuous(name ="Observed", breaks = seq(from = 30, to = 50, by = 5), limits=c(28,52)) + labs(title = "Average Age Estimation", subtitle = "LM without model tuning") + stat_cor(label.x = 30, label.y = 50, size = 3) ##scatterplot for SVM avrg2 <- ggplot(avrg, aes(x=estimations_svm, y=avrg)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted SVM", breaks = seq(from = 30, to = 50, by = 5), limits=c(28,52)) + scale_y_continuous(name ="Observed", breaks = seq(from = 30, to = 50, by = 5), limits=c(28,52)) + labs(title ="", subtitle = "SVM with feature combination")+ stat_cor(label.x = 30, label.y = 50, size = 3) ##scatterplot SVM vs. LM avrg3 <- ggplot(avrg, aes(x=estimations_svm, y=estimations_lm)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted SVM", breaks = seq(from = 30, to = 50, by = 5), limits=c(28,52)) + scale_y_continuous(name ="Predicted LM", breaks = seq(from = 30, to = 50, by = 5), limits=c(28,52)) + labs(title ="", subtitle = "SVM vs. LM") + stat_cor(label.x = 30, label.y = 50, size = 3) ######################################################################## ### elderly ##scatterplot for LM elder1 <- ggplot(elder, aes(x=estimations_lm, y=elder)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted LM", breaks = seq(from = 0, to = 0.35, by = 0.05), limits=c(0,0.35)) + scale_y_continuous(name ="Observed", breaks = seq(from = 0, to = 0.35, by = 0.05), limits=c(0,0.35)) + labs(title = "Elderly Proportion Estimation", subtitle = "LM without model tuning") + stat_cor(label.x = 0.025, label.y = 0.32, size = 3) ##scatterplot for RF elder2 <- ggplot(elder, aes(x=estimations_rf, y=elder)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted RF", breaks = seq(from = 0, to = 0.35, by = 0.05), limits=c(0,0.35)) + scale_y_continuous(name ="Observed", breaks = seq(from = 0, to = 0.35, by = 0.05), limits=c(0,0.35)) + labs(title ="", subtitle = "RF with feature combination") + stat_cor(label.x = 0.025, label.y = 0.32, size = 3) ##scatterplot RF vs. LM elder3 <- ggplot(elder, aes(x=estimations_rf, y=estimations_lm)) + geom_point()+ geom_abline(slope = 1) + scale_x_continuous(name ="Predicted RF", breaks = seq(from = 0, to = 0.35, by = 0.05), limits=c(0,0.35)) + scale_y_continuous(name ="Predicted LM", breaks = seq(from = 0, to = 0.35, by = 0.05), limits=c(0,0.35)) + labs(title ="", subtitle = "RF vs. LM")+ stat_cor(label.x = 0.025, label.y = 0.32, size = 3) ######################################################################## ##combine the plots ggarrange(pop1, pop2, pop3, avrg1, avrg2, avrg3, elder1, elder2, elder3, nrow = 3, ncol = 3)
35bd948b21b1660f64c57ec2cb4f40baf4df8a64
9cbc8d7ae4c57f4948d47f11e2edcba21a1ba334
/sources/modules/VEPowertrainsAndFuels/man/calcAverageFuelCI.Rd
c6e6bb650450b57047ec167fbd47ff9c6330a1ca
[ "Apache-2.0" ]
permissive
rickdonnelly/VisionEval-Dev
c01c7aa9ff669af75765d1dfed763a23216d4c66
433c3d407727dc5062ec4bf013abced4f8f17b10
refs/heads/master
2022-11-28T22:31:31.772517
2020-04-29T17:53:33
2020-04-29T17:53:33
285,674,503
0
0
Apache-2.0
2020-08-06T21:26:05
2020-08-06T21:26:05
null
UTF-8
R
false
true
2,192
rd
calcAverageFuelCI.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CalculateCarbonIntensity.R \name{calcAverageFuelCI} \alias{calcAverageFuelCI} \title{Calculate average fuel carbon intensity of a transportation mode and type} \usage{ calcAverageFuelCI(FuelCI_, FuelProp_, BiofuelProp_) } \arguments{ \item{FuelCI_}{a named numeric vector of carbon intensity of fuel types where the values are grams of carbon dioxide equivalents per megajoule and the names are Gasoline, Diesel, Cng (compressed natural gas), Lng (liquified natural gas), Ethanol, Biodiesel, and Rng (renewable natural gas).} \item{FuelProp_}{a named vector of fuel proportions used by the mode and type, or in the case of transit with multiple metropolitan area data, a matrix of fuel proportions by type and metropolitan area. The names must be the names of the base fuel types consistent with the names used in FuelCI_ although only the names of fuels used by the mode and type need to be included.} \item{BiofuelProp_}{a named vector of the biofuel proportions of base fuels, or in the case of transit with multiple metropolitan area data, a matrix of biofuel proportions by type and metropolitan area. The names must be in form of the biofuel name concatenated with 'Prop' and concatenated with the base fuel name (e.g. EthanolPropGasoline).} } \description{ \code{calcAverageFuelCI} calculates the average carbon intensity of fuels used by a transportation mode and type considering the carbon intensities of the base fuels, biofuel mixtures, and the proportions of fuels used. } \details{ The function calculates the average carbon intensity of fuels used by a transportation mode (e.g. household, car service, commercial service, public transit, freight) and type (e.g. auto, light truck, van, bus, rail, heavy truck). The average carbon intensity is calculated from the base fuel mix for the mode and type (e.g. gasoline, diesel, compressed natural gas), the mix of biofuels used for the mode and type (e.g. ethanol mix in gasoline), and the mix of powertrains geared to the different base fuel types (e.g. proportion of light-duty vehicles that run on gasoline vs. the proportion running on diesel). }
317b1d30bddb1ac729e411ad2c0b467748ac026b
dafcf71d115e09846d3f901af97e19c0b56abc9e
/2_pops_2.R
1d8e77c81199d9c8d492e0a0d132f4ad505a3f5e
[]
no_license
mgrundler/Sonora-Code
4a4e78cb03357651119fd0e382ce39f33537d64a
3ea23b40e95780a610d3badc3fd117ebb2801b1c
refs/heads/master
2020-05-20T04:31:08.222926
2015-09-29T14:23:44
2015-09-29T14:23:44
29,192,278
0
1
null
null
null
null
UTF-8
R
false
false
27,705
r
2_pops_2.R
# I made most of the chunks into indpendent functions so that we can run them on as many # populations as we want start.pop <- 50 LF <- 0.3 percent.breed <- 0.5 carrying.capacity <- 2000 baseAttack <- c(.5, .5, .5, .5) n.off <- 4 s1=c(1,.1,.1,.1) s2=c(.1,1,.1,.1) s3=c(.1,.1,1,.1) s4=c(.1,.1,.1,1) sim <- rbind(s1,s2,s3,s4) T1=c(1,1,1,1) T2=c(1,1,1,1) T3=c(1,1,1,1) T4=c(1,1,1,1) hand <- rbind(T1,T2,T3,T4) # make the starting matrices - we'll make the linked allele later geno1 <- matrix(rbinom(start.pop*6, 1, (1/3)), ncol=6) colnames(geno1) <- c("bands1", "bands2", "red1", "red2", "neutral1", "neutral2") geno2 <- matrix(rbinom(start.pop*6, 1, (1/3)), ncol=6) colnames(geno2) <- c("bands1", "bands2", "red1", "red2", "neutral1", "neutral2") # set recombination frequency, select which individuals will recombine recombination=rbinom(start.pop, 1, LF) # do the recombination linked1 <- matrix(NA, nrow=start.pop, ncol=2) for(i in 1:start.pop){ if(recombination[i]==0){linked1[i,] <- geno1[,3:4][i,]} else linked1[i,]<- geno1[,3:4][i,c(2,1)] } linked2 <- matrix(NA, nrow=start.pop, ncol=2) for(i in 1:start.pop){ if(recombination[i]==0){linked2[i,] <- geno2[,3:4][i,]} else linked2[i,] <- geno2[,3:4][i,c(2,1)] } # the function for getting phenotypes from genotypes phenotype=function(offspring.phenotype){ offspring.phenotype1=ifelse(offspring.phenotype==0, 1, offspring.phenotype) offspring.phenotype2=ifelse(offspring.phenotype==1, 2, offspring.phenotype1) offspring.phenotype3=ifelse(offspring.phenotype==2, 2, offspring.phenotype2) offspring.phenotype4=ifelse(offspring.phenotype==3, 3, offspring.phenotype3) offspring.phenotype5=ifelse(offspring.phenotype==4, 4, offspring.phenotype4) offspring.phenotype6=ifelse(offspring.phenotype==5, 4, offspring.phenotype5) offspring.phenotype7=ifelse(offspring.phenotype==6, 3, offspring.phenotype6) offspring.phenotype8=ifelse(offspring.phenotype==7, 4, offspring.phenotype7) offspring.phenotype9=ifelse(offspring.phenotype==8, 4, offspring.phenotype8) offspring.phenotype10=ifelse(offspring.phenotype==9, 1, offspring.phenotype9) offspring.phenotype11=ifelse(offspring.phenotype==10, 2, offspring.phenotype10) offspring.phenotype12=ifelse(offspring.phenotype==11, 3, offspring.phenotype11) offspring.phenotype13=ifelse(offspring.phenotype==12, 4, offspring.phenotype12) return(offspring.phenotype13) } geno1 <- cbind(geno1[,1:4], linked1, geno1[,5:6]) g1ph <- phenotype(rowSums(cbind(geno1[,1:2], geno1[,3:4]*3))) geno1 <- cbind(g1ph, geno1[,1:4], linked1, geno1[,5:6]) colnames(geno1) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") geno2 <- cbind(geno2[,1:4], linked2, geno2[,5:6]) g2ph <- phenotype(rowSums(cbind(geno2[,1:2], geno2[,3:4]*3))) geno2 <- cbind(g2ph, geno2[,1:4], linked2, geno2[,5:6]) colnames(geno2) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") # breeding make.off <- function(n.off, mat, start.pop, percent.breed){ lucky <- sample(start.pop, percent.breed*start.pop) pairs <- mat[lucky,] pair1 <- pairs[1:(nrow(pairs)/2),] pair2 <- pairs[(1+nrow(pairs)/2):nrow(pairs),] bands.off1 <- matrix(nrow=n.off, ncol=nrow(pair1)) red.off1 <- matrix(nrow=n.off, ncol=nrow(pair1)) linked.off1 <- matrix(nrow=n.off, ncol=nrow(pair1)) neutral.off1 <- matrix(nrow=n.off, ncol=nrow(pair1)) for(i in 1:nrow(pair1)){ which.bands <- rbinom(n.off, 1, 0.5)+1 bands.off1[,i] <- pair1[i,1:2][which.bands] which.allele <- rbinom(n.off, 1, 0.5)+1 red.off1[,i] <- pair1[i,3:4][which.allele] linked.off1[,i] <- pair1[i,5:6][which.allele] which.neu <- rbinom(n.off, 1, 0.5)+1 neutral.off1[,i] <- pair1[i,7:8][which.neu] } bands.off2 <- matrix(nrow=n.off, ncol=nrow(pair1)) red.off2 <- matrix(nrow=n.off, ncol=nrow(pair1)) linked.off2 <- matrix(nrow=n.off, ncol=nrow(pair1)) neutral.off2 <- matrix(nrow=n.off, ncol=nrow(pair1)) for(i in 1:nrow(pair1)){ which.bands <- rbinom(n.off, 1, 0.5)+1 bands.off2[,i] <- pair2[i,1:2][which.bands] which.allele <- rbinom(n.off, 1, 0.5)+1 red.off2[,i] <- pair2[i,3:4][which.allele] linked.off2[,i] <- pair2[i,5:6][which.allele] which.neu <- rbinom(n.off, 1, 0.5)+1 neutral.off2[,i] <- pair2[i,7:8][which.neu] } offspring <- cbind(as.vector(bands.off1), as.vector(bands.off2), as.vector(red.off1), as.vector(red.off2), as.vector(linked.off1), as.vector(linked.off2), as.vector(neutral.off1), as.vector(neutral.off2)) return(offspring) } # negative frequency dependent selection NFDS <- function(pgmat, base.attack, similarity, handling){ pt <- c(sum(pgmat[,1]==1), sum(pgmat[,1]==2), sum(pgmat[,1]==3), sum(pgmat[,1]==4)) pheno1 <- pgmat[pgmat[,1]==1,] pheno2 <- pgmat[pgmat[,1]==2,] pheno3 <- pgmat[pgmat[,1]==3,] pheno4 <- pgmat[pgmat[,1]==4,] denom1=matrix(NA, nrow=4, ncol=4) for(k in 1:4){ for (j in 1:4){ denom1[j,k]=base.attack[k]*pt[k]*(1+similarity[k,j]*handling[k,j]*base.attack[j]*pt[j]) } } denom=sum(denom1) f1=matrix(NA, nrow=4, ncol=4) for(l in 1:4){ for(m in 1:4){ f1[l,m]=base.attack[l]*pt[l]*similarity[l,m]*base.attack[m]*pt[m] } } f=colSums(f1) surv1=round(pt-pt*(f/denom)) surv=(abs(surv1)+surv1)/2 both=ifelse(pt<surv, pt, surv) phenolist=list(pheno1, pheno2, pheno3,pheno4) phenolist2=list() phenosub=c() for(q in 1:4){ if(both[q]>1){phenolist2[[q]] <- phenolist[[q]][1:both[q],]} else if(both[q]==1){phenolist2[[q]] <- phenolist[[q]]} else if(both[q]==0){phenolist2[[q]] <- phenosub} else{phenolist2[[q]] <- phenosub} } # now we have a matrix of individuals that survived the morph-specific # predation next.gen.2 <- do.call(rbind, phenolist2) return(next.gen.2) } # normal LV selection LV <- function(NFmat, carrying.capacity, percent.breed, n.off){ rate.inc <- percent.breed*n.off nt <- nrow(NFmat) threshold <- nt*exp(rate.inc*(1-nt/carrying.capacity)) if(threshold > nrow(NFmat)){ rand <- sample(nt) next.gen.1 <- NFmat[rand,] next.gen <- next.gen.1 }else{ rand1 <- sample(nt) next.gen.1 <- NFmat[rand1,] next.gen <- next.gen.1[1:threshold,] } return(next.gen) } # make two alleles worth of genotypes - don't differentiate sexes - these are the first elements in a list # this is for later, to get the average difference in allele frequency between the two populations freqDiffs <- function(list){ a1 <- colMeans(list[[1]]) m1 <- cbind(mean(a1[2], a1[3]), mean(a1[4], a1[5]), mean(a1[6], a1[7]), mean(a1[8], a1[9])) a2 <- colMeans(list[[2]]) m2 <- cbind(mean(a2[2], a2[3]), mean(a2[4], a2[5]), mean(a2[6], a2[7]), mean(a2[8], a2[9])) # I include an absolute value because we care about the magnitude of the distance, not the # sign diff <- abs(m1-m2) } #################################### # test for loop #################### #################################### pops <- list() pops[[1]] <- list(geno1, geno2) for(i in 1:n.gen){ g1 <- pops[[i]][[1]][,2:9] g2 <- pops[[i]][[2]][,2:9] # exchange migrants n.mig <- round(nrow(g1)*percent.migrate) geno1m <- rbind(g2[1:n.mig,], g1[(n.mig+1):start.pop,]) geno2m <- rbind(g1[1:n.mig,], g2[(n.mig+1):start.pop,]) off1 <- make.off(4, geno1m, start.pop, percent.breed) off2 <- make.off(4, geno2m, start.pop, percent.breed) # make phenotypes g1 <- rbind(geno1m, off1) pheno1 <- phenotype(rowSums(cbind(g1[,1:2], g1[,3:4]*3))) pg1 <- cbind(pheno1, g1) order1 <- order(pg1[,1]) pg1 <- pg1[order1,] g2 <- rbind(geno2m, off2) pheno2 <- phenotype(rowSums(cbind(g2[,1:2], g2[,3:4]*3))) pg2 <- cbind(pheno2, g2) order2 <- order(pg2[,1]) pg2 <- pg2[order2,] pt1 <- c(sum(pg1[,1]==1), sum(pg1[,1]==2), sum(pg1[,1]==3), sum(pg1[,1]==4)) pheno1.1 <- pg1[pg1[,1]==1,] pheno1.2 <- pg1[pg1[,1]==2,] pheno1.3 <- pg1[pg1[,1]==3,] pheno1.4 <- pg2[pg2[,1]==4,] pt2 <- c(sum(pg2[,1]==1), sum(pg2[,1]==2), sum(pg2[,1]==3), sum(pg2[,1]==4)) pheno2.1 <- pg2[pg2[,1]==1,] pheno2.2 <- pg2[pg2[,1]==2,] pheno2.3 <- pg2[pg2[,1]==3,] pheno2.4 <- pg2[pg2[,1]==4,] ############################################## # NFDS ####################################### ############################################## # this needs more thought - should frequencies in one population affect what the predator sees? # we'll probably need two separate functions for that NF1 <- NFDS(pg1, baseAttack, sim, hand) NF2 <- NFDS(pg2, baseAttack, sim, hand) # randomize NF1 <- NF1[sample(nrow(NF1)),] NF2 <- NF2[sample(nrow(NF2)),] # normal selection fin1 <- LV(NF1, carrying.capacity, percent.breed, n.off) fin2 <- LV(NF2, carrying.capacity, percent.breed, n.off) fin <- list(fin1, fin2) # output this final pop to a list and pull it back to start over pops[[i+1]] <- fin } allele.freq <- function(list){ a1 <- colSums(list[[1]])/nrow(list[[1]]) af1 <- c(mean(a1[2], a1[3]), mean(a1[4], a1[5]), mean(a1[6], a1[7]), mean(a1[8], a1[9])) a2 <- colSums(list[[2]])/nrow(list[[2]]) af2 <- c(mean(a2[2], a2[3]), mean(a2[4], a2[5]), mean(a2[6], a2[7]), mean(a2[8], a2[9])) return(list(af1, af2)) } ################################################################### # bands vs. red plot ############################################## ################################################################### allele.freq.br <- function(list){ a1 <- colSums(list[[1]])/nrow(list[[1]]) af1 <- c(mean(a1[2], a1[3]), mean(a1[4], a1[5])) a2 <- colSums(list[[2]])/nrow(list[[2]]) af2 <- c(mean(a2[2], a2[3]), mean(a2[4], a2[5])) return(list(af1, af2)) } br.freq <- lapply(pops, allele.freq.br) plot(x=seq(0,1,by=0.1), y=seq(0,1,by=0.1), type="n", xlab="bands frequency", ylab="red frequency") bands.x <- c() red.y <- c() for(i in 1:length(pops)){ bands.x[i] <- br.freq[[i]][[1]][1] red.y[i] <- br.freq[[i]][[1]][2] } bands.x2 <- c() red.y2 <- c() for(i in 1:length(pops)){ bands.x2[i] <- br.freq[[i]][[2]][1] red.y2[i] <- br.freq[[i]][[2]][2] } points(bands.x, red.y, col='red') points(bands.x2, red.y2, pch=15) ################################### # function ######################## ################################### # set the parameters. The purpose of this function is to compare average # differences in allele frequencies between the two populations, so n.gen # should be >50 to get a decent average percent.breed <- 0.5 carrying.capacity <- 100 start.pop <- 50 n.gen <- 50 # the function - takes a two element vector of percent migrating and recomb. frequency # everything else is set. This is so we can feed it a wide range of parameter values # quickly and easily migLD <- function(vec){ # get the starting genotypes - this needs to be inside the function because # we will do multiple iterations later - so we need independent starting populations # for each run of the simulation #vec=c(0.1,0.1) geno1 <- matrix(rbinom(start.pop*6, 1, (2/3)), ncol=6) colnames(geno1) <- c("bands1", "bands2", "red1", "red2", "neutral1", "neutral2") geno2 <- matrix(rbinom(start.pop*6, 1, (1/3)), ncol=6) colnames(geno2) <- c("bands1", "bands2", "red1", "red2", "neutral1", "neutral2") # do the recombination recombination1 <- rbinom(start.pop, 1, vec[2]) recombination2 <- rbinom(start.pop, 1, vec[2]) linked1 <- matrix(NA, nrow=start.pop, ncol=2) for(i in 1:start.pop){ if(recombination1[i]==0){linked1[i,] <- geno1[,3:4][i,]} else linked1[i,]<- geno1[,3:4][i,c(2,1)] } linked2 <- matrix(NA, nrow=start.pop, ncol=2) for(i in 1:start.pop){ if(recombination2[i]==0){linked2[i,] <- geno2[,3:4][i,]} else linked2[i,] <- geno2[,3:4][i,c(2,1)] } geno1 <- cbind(geno1[,1:4], linked1, geno1[,5:6]) g1ph <- phenotype(rowSums(cbind(geno1[,1:2], geno1[,3:4]*3))) geno1 <- cbind(g1ph, geno1[,1:4], linked1, geno1[,5:6]) colnames(geno1) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") geno2 <- cbind(geno2[,1:4], linked2, geno2[,5:6]) g2ph <- phenotype(rowSums(cbind(geno2[,1:2], geno2[,3:4]*3))) geno2 <- cbind(g2ph, geno2[,1:4], linked2, geno2[,5:6]) colnames(geno2) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") pops <- list() pops[[1]] <- list(geno1, geno2) # now we do the for loop to fill the list #i=1 for(i in 1:n.gen){ g1 <- pops[[i]][[1]][,2:9] g2 <- pops[[i]][[2]][,2:9] # exchange migrants n.mig <- round(nrow(g1)*vec[1]) if(n.mig==0){ geno1m <- g1 geno2m <- g2 }else{ geno1m <- rbind(g2[1:n.mig,], g1[(n.mig+1):nrow(g1),]) geno2m <- rbind(g1[1:n.mig,], g2[(n.mig+1):nrow(g2),]) } off1 <- make.off(4, geno1m, nrow(geno1m), percent.breed) off2 <- make.off(4, geno2m, nrow(geno2m), percent.breed) # make phenotypes G1 <- rbind(geno1m, off1) pheno1 <- phenotype(rowSums(cbind(G1[,1:2], G1[,3:4]*3))) pg1 <- cbind(pheno1, G1) order1 <- order(pg1[,1]) pg1 <- pg1[order1,] G2 <- rbind(geno2m, off2) pheno2 <- phenotype(rowSums(cbind(G2[,1:2], G2[,3:4]*3))) pg2 <- cbind(pheno2, G2) order2 <- order(pg2[,1]) pg2 <- pg2[order2,] pt1 <- c(sum(pg1[,1]==1), sum(pg1[,1]==2), sum(pg1[,1]==3), sum(pg1[,1]==4)) pheno1.1 <- pg1[pg1[,1]==1,] pheno1.2 <- pg1[pg1[,1]==2,] pheno1.3 <- pg1[pg1[,1]==3,] pheno1.4 <- pg1[pg1[,1]==4,] pt2 <- c(sum(pg2[,1]==1), sum(pg2[,1]==2), sum(pg2[,1]==3), sum(pg2[,1]==4)) pheno2.1 <- pg2[pg2[,1]==1,] pheno2.2 <- pg2[pg2[,1]==2,] pheno2.3 <- pg2[pg2[,1]==3,] pheno2.4 <- pg2[pg2[,1]==4,] ############################################## # NFDS ####################################### ############################################## # this needs more thought - should frequencies in one population affect what the predator sees? # we'll probably need two separate functions for that NF1 <- NFDS(pg1, baseAttack, sim, hand) NF2 <- NFDS(pg2, baseAttack, sim, hand) # randomize NF1 <- NF1[sample(nrow(NF1)),] NF2 <- NF2[sample(nrow(NF2)),] # normal selection fin1 <- LV(NF1, carrying.capacity, percent.breed, n.off) fin2 <- LV(NF2, carrying.capacity, percent.breed, n.off) # make sure they recombine again r1 <- rbinom(nrow(fin1), 1, vec[2]) r2 <- rbinom(nrow(fin2), 1, vec[2]) l1 <- matrix(NA, nrow=nrow(fin1), ncol=2) for(k in 1:nrow(fin1)){ if(r1[k]==0){l1[k,] <- fin1[,6:7][k,]} else l1[k,]<- fin1[,6:7][k,c(2,1)] } l2 <- matrix(NA, nrow=nrow(fin2), ncol=2) for(k in 1:nrow(fin2)){ if(r2[k]==0){l2[k,] <- fin2[,6:7][k,]} else l2[k,] <- fin2[,6:7][k,c(2,1)] } FIN1 <- cbind(fin1[,2:5], l1, fin1[,8:9]) FINPH1 <- phenotype(rowSums(cbind(FIN1[,1:2], FIN1[,3:4]*3))) fin.1 <- cbind(FINPH1, FIN1) colnames(fin.1) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") FIN2 <- cbind(fin2[,2:5], l2, fin2[,8:9]) FINPH2 <- phenotype(rowSums(cbind(FIN2[,1:2], FIN2[,3:4]*3))) fin.2 <- cbind(FINPH2, FIN2) colnames(fin.2) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") fin <- list(fin.1, fin.2) # output this final pop to a list and pull it back to start over pops[[i+1]] <- fin } # once the list is made, we find the difference in allele frequency between the # two populations at each generation diffs <- lapply(pops, freqDiffs) fMat <- matrix(unlist(diffs), ncol=4, byrow=T) return(list(fMat,pops)) } # decide on the ranges of the migration % and recomb frequency we want to test pm1 <- seq(0, 0.1, by=0.01) rf1 <- seq(0, 0.5, by=0.1) # now repeat the complete first vector the same number of times as the length of second vector pm <- rep(pm1, length(rf1)) # repeat each element of the second vector the same number of times as the length of the first vector rf <- rep(rf1, each=length(pm1)) # now make a matrix of the two vectors bound together - this way each value of migration # is paired with each value of recomb frequency to test the entire range of parameters test <- cbind(pm, rf) # make each row of the matrix into an element in a list - just makes the apply easier ltest <- list() for(i in 1:nrow(test)){ ltest[[i]] <- test[i,] } # now iterate the function and lapply multiple times to get averages of behavior # of the model at each paramter value repLD <- list() for(j in 1:10){ # ltest is the same for each iteration, but re-running migLD will get us different # starting points and progression through the generations repLD[[j]] <- lapply(ltest, migLD) # get colmeans for each run - the columns are the loci, the rows are the # difference in allele frequencies between population 1 and population 2 # at each generation, so taking colmeans gets you the mean difference between # populations at that locus across mutliple generations #means <- lapply(af, function(mat){x <- colMeans(mat); return(x)}) # this gets the list of means into a matrix, which is output into a list #repLD[[j]] <- matrix(unlist(means), ncol=4, byrow=T) } # get the mean of the means across runs - each row is an allele # bands, red, linked, unlinked # each row is a set of parameter values mean <- Reduce('+', repLD, repLD[[1]])/10 # take the mean values for the "band" locus, make them into a matrix # with values of pm along the rows and values of rf for the columns xbandMeans <- matrix(mean[,1], ncol=length(rf1)) xredMeans <- matrix(mean[,2], ncol=length(rf1)) xlMeans <- matrix(mean[,3], ncol=length(rf1)) xulMeans <- matrix(mean[,4], ncol=length(rf1)) # plots! par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) persp(pm1, rf1, xbandMeans,theta=30, phi=30, col="lightblue", shade=0.4, ticktype="detailed", zlim=c(0,0.25)) persp(pm1, rf1, xredMeans,theta=30, phi=30, col="lightblue", shade=0.4, ticktype="detailed", zlim=c(0,0.25)) persp(pm1, rf1, xlMeans, theta=30, phi=30, col="lightblue", shade=0.4, ticktype="detailed", zlim=c(0,0.5)) persp(pm1, rf1, xulMeans,theta=30, phi=30, col="lightblue", shade=0.4, ticktype="detailed", zlim=c(0,0.5)) # allele freq plot aflist1 <- list() aflist2 <- list() for(j in 1:10){ aflist1[[j]] <- matrix(NA, nrow=50, ncol=4) aflist2[[j]] <- matrix(NA, nrow=50, ncol=4) for(i in 1:50){ mat1 <- repLD[[j]][[1]][[2]][[i]][[1]] mat2 <- repLD[[j]][[1]][[2]][[i]][[2]] af <- colSums(mat1) af2 <- colSums(mat2) aflist1[[j]][i,] <- c(sum(af[2], af[3])/(2*nrow(mat1)), sum(af[4], af[5])/(2*nrow(mat1)), sum(af[6], af[7])/(2*nrow(mat1)), sum(af[8], af[9])/(2*nrow(mat1))) aflist2[[j]][i,] <- c(sum(af2[2], af2[3])/(2*nrow(mat2)), sum(af2[4], af2[5])/(2*nrow(mat2)), sum(af2[6], af2[7])/(2*nrow(mat2)), sum(af2[8], af2[9])/(2*nrow(mat2))) } } ALmean1 <- Reduce('+', aflist1, aflist1[[1]])/10 ALmean2 <- Reduce('+', aflist2, aflist2[[1]])/10 plot(x=1:50, y=seq(0,1,1/49), type="n") lines(ALmean1[,1]) lines(ALmean1[,2], col="red") lines(ALmean1[,4], col="yellow") lines(ALmean2[,1], col="grey") lines(ALmean2[,2], col="pink") lines(ALmean2[,4], col="orange") ######################################################### # predators see both populations ######################## ######################################################### NFDS2 <- function(fullpg, poppg, base.attack, similarity, handling){ #fullpg <- pg #poppg <- pg1 #base.attack <- baseAttack #similarity <- sim #handling <- hand pt <- c(sum(fullpg[,1]==1), sum(fullpg[,1]==2), sum(fullpg[,1]==3), sum(fullpg[,1]==4)) pheno1 <- fullpg[fullpg[,1]==1,] pheno2 <- fullpg[fullpg[,1]==2,] pheno3 <- fullpg[fullpg[,1]==3,] pheno4 <- fullpg[fullpg[,1]==4,] denom1 <- matrix(NA, nrow=4, ncol=4) for(k in 1:4){ for (j in 1:4){ denom1[j,k]=base.attack[k]*pt[k]*(1+similarity[k,j]*handling[k,j]*base.attack[j]*pt[j]) } } denom <- sum(denom1) f1 <- matrix(NA, nrow=4, ncol=4) for(l in 1:4){ for(m in 1:4){ f1[l,m]=base.attack[l]*pt[l]*similarity[l,m]*base.attack[m]*pt[m] } } f <- colSums(f1) # apply to actual matrices pt2 <- c(sum(poppg[,1]==1), sum(poppg[,1]==2), sum(poppg[,1]==3), sum(poppg[,1]==4)) pheno1.2 <- poppg[poppg[,1]==1,] pheno2.2 <- poppg[poppg[,1]==2,] pheno3.2 <- poppg[poppg[,1]==3,] pheno4.2 <- poppg[poppg[,1]==4,] sur2 <- round(pt2-pt2*(f/denom)) surv2 <- (abs(sur2)+sur2)/2 both <- ifelse(pt2<surv2, pt2, surv2) phenolist=list(pheno1.2, pheno2.2, pheno3.2, pheno4.2) phenolist2=list() phenosub=c() for(q in 1:4){ if(both[q]>1){phenolist2[[q]] <- phenolist[[q]][1:both[q],]} else if(both[q]==1){phenolist2[[q]] <- phenolist[[q]]} else if(both[q]==0){phenolist2[[q]] <- phenosub} else{phenolist2[[q]] <- phenosub} } # now we have a matrix of individuals that survived the morph-specific # predation next.gen.2 <- do.call(rbind, phenolist2) return(next.gen.2) } migLD2 <- function(vec){ # get the starting genotypes - this needs to be inside the function because # we will do multiple iterations later - so we need independent starting populations # for each run of the simulation #vec=c(0.1,0.1) geno1 <- matrix(rbinom(start.pop*6, 1, (1/3)), ncol=6) colnames(geno1) <- c("bands1", "bands2", "red1", "red2", "neutral1", "neutral2") geno2 <- matrix(rbinom(start.pop*6, 1, (2/3)), ncol=6) colnames(geno2) <- c("bands1", "bands2", "red1", "red2", "neutral1", "neutral2") # do the recombination recombination1 <- rbinom(start.pop, 1, vec[2]) recombination2 <- rbinom(start.pop, 1, vec[2]) linked1 <- matrix(NA, nrow=start.pop, ncol=2) for(i in 1:start.pop){ if(recombination1[i]==0){linked1[i,] <- geno1[,3:4][i,]} else linked1[i,]<- geno1[,3:4][i,c(2,1)] } linked2 <- matrix(NA, nrow=start.pop, ncol=2) for(i in 1:start.pop){ if(recombination2[i]==0){linked2[i,] <- geno2[,3:4][i,]} else linked2[i,] <- geno2[,3:4][i,c(2,1)] } geno1 <- cbind(geno1[,1:4], linked1, geno1[,5:6]) g1ph <- phenotype(rowSums(cbind(geno1[,1:2], geno1[,3:4]*3))) geno1 <- cbind(g1ph, geno1[,1:4], linked1, geno1[,5:6]) colnames(geno1) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") geno2 <- cbind(geno2[,1:4], linked2, geno2[,5:6]) g2ph <- phenotype(rowSums(cbind(geno2[,1:2], geno2[,3:4]*3))) geno2 <- cbind(g2ph, geno2[,1:4], linked2, geno2[,5:6]) colnames(geno2) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") pops <- list() pops[[1]] <- list(geno1, geno2) # now we do the for loop to fill the list #i=1 for(i in 1:n.gen){ g1 <- pops[[i]][[1]][,2:9] g2 <- pops[[i]][[2]][,2:9] # exchange migrants n.mig <- round(nrow(g1)*vec[1]) if(n.mig==0){ geno1m <- g1 geno2m <- g2 }else{ geno1m <- rbind(g2[1:n.mig,], g1[(n.mig+1):nrow(g1),]) geno2m <- rbind(g1[1:n.mig,], g2[(n.mig+1):nrow(g2),]) } off1 <- make.off(4, geno1m, nrow(geno1m), percent.breed) off2 <- make.off(4, geno2m, nrow(geno2m), percent.breed) # make phenotypes G1 <- rbind(geno1m, off1) pheno1 <- phenotype(rowSums(cbind(G1[,1:2], G1[,3:4]*3))) pg1 <- cbind(pheno1, G1) order1 <- order(pg1[,1]) pg1 <- pg1[order1,] G2 <- rbind(geno2m, off2) pheno2 <- phenotype(rowSums(cbind(G2[,1:2], G2[,3:4]*3))) pg2 <- cbind(pheno2, G2) order2 <- order(pg2[,1]) pg2 <- pg2[order2,] G <- rbind(G1, G2) ph <- phenotype(rowSums(cbind(G[,1:2], G[,3:4]*3))) pg <- cbind(ph, G) order <- order(pg[,1]) pg <- pg[order,] pt1 <- c(sum(pg1[,1]==1), sum(pg1[,1]==2), sum(pg1[,1]==3), sum(pg1[,1]==4)) pheno1.1 <- pg1[pg1[,1]==1,] pheno1.2 <- pg1[pg1[,1]==2,] pheno1.3 <- pg1[pg1[,1]==3,] pheno1.4 <- pg1[pg1[,1]==4,] pt2 <- c(sum(pg2[,1]==1), sum(pg2[,1]==2), sum(pg2[,1]==3), sum(pg2[,1]==4)) pheno2.1 <- pg2[pg2[,1]==1,] pheno2.2 <- pg2[pg2[,1]==2,] pheno2.3 <- pg2[pg2[,1]==3,] pheno2.4 <- pg2[pg2[,1]==4,] # NFDS ####################################### # this needs more thought - should frequencies in one population affect what the predator sees? # we'll probably need two separate functions for that NF1 <- NFDS2(pg,pg1, baseAttack, sim, hand) NF2 <- NFDS2(pg,pg2, baseAttack, sim, hand) # randomize NF1 <- NF1[sample(nrow(NF1)),] NF2 <- NF2[sample(nrow(NF2)),] # normal selection fin1 <- LV(NF1, carrying.capacity, percent.breed, n.off) fin2 <- LV(NF2, carrying.capacity, percent.breed, n.off) # make sure they recombine again r1 <- rbinom(nrow(fin1), 1, vec[2]) r2 <- rbinom(nrow(fin2), 1, vec[2]) l1 <- matrix(NA, nrow=nrow(fin1), ncol=2) for(k in 1:nrow(fin1)){ if(r1[k]==0){l1[k,] <- fin1[,6:7][k,]} else l1[k,]<- fin1[,6:7][k,c(2,1)] } l2 <- matrix(NA, nrow=nrow(fin2), ncol=2) for(k in 1:nrow(fin2)){ if(r2[k]==0){l2[k,] <- fin2[,6:7][k,]} else l2[k,] <- fin2[,6:7][k,c(2,1)] } FIN1 <- cbind(fin1[,2:5], l1, fin1[,8:9]) FINPH1 <- phenotype(rowSums(cbind(FIN1[,1:2], FIN1[,3:4]*3))) fin.1 <- cbind(FINPH1, FIN1) colnames(fin.1) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") FIN2 <- cbind(fin2[,2:5], l2, fin2[,8:9]) FINPH2 <- phenotype(rowSums(cbind(FIN2[,1:2], FIN2[,3:4]*3))) fin.2 <- cbind(FINPH2, FIN2) colnames(fin.2) <- c("phenotype","bands1", "bands2", "red1", "red2", "linked1", "linked2","neutral1", "neutral2") fin <- list(fin.1, fin.2) # output this final pop to a list and pull it back to start over pops[[i+1]] <- fin } # once the list is made, we find the difference in allele frequency between the # two populations at each generation diffs <- lapply(pops, freqDiffs) fMat <- matrix(unlist(diffs), ncol=4, byrow=T) return(list(fMat, pops)) } repLD2 <- list() for(j in 1:10){ # ltest is the same for each iteration, but re-running migLD will get us different # starting points and progression through the generations repLD2[[j]] <- lapply(ltest, migLD2) # get colmeans for each run - the columns are the loci, the rows are the # difference in allele frequencies between population 1 and population 2 # at each generation, so taking colmeans gets you the mean difference between # populations at that locus across mutliple generations #means <- lapply(af, function(mat){x <- colMeans(mat); return(x)}) # this gets the list of means into a matrix, which is output into a list #repLD2[[j]] <- matrix(unlist(means), ncol=4, byrow=T) } # get the mean of the means across runs - each row is an allele # bands, red, linked, unlinked # each row is a set of parameter values mean <- Reduce('+', repLD2, repLD2[[1]])/10 # take the mean values for the "band" locus, make them into a matrix # with values of pm along the rows and values of rf for the columns xbandMeans <- matrix(mean[,1], ncol=length(rf1)) xredMeans <- matrix(mean[,2], ncol=length(rf1)) xlMeans <- matrix(mean[,3], ncol=length(rf1)) xulMeans <- matrix(mean[,4], ncol=length(rf1)) # plots! par(mfrow=c(2,2)) par(mar=c(1,1,1,1)) persp(pm1, rf1, xbandMeans,theta=30, phi=30, col="lightblue", shade=0.4, ticktype="detailed", zlim=c(0,0.5)) persp(pm1, rf1, xredMeans,theta=30, phi=30, col="lightblue", shade=0.4, ticktype="detailed", zlim=c(0,0.5)) persp(pm1, rf1, xlMeans, theta=30, phi=30, col="lightblue", shade=0.4, ticktype="detailed", zlim=c(0,0.5)) persp(pm1, rf1, xulMeans,theta=30, phi=30, col="lightblue", shade=0.4, ticktype="detailed", zlim=c(0,0.5)) # allele freqs aflist1loc <- list() aflist2loc <- list() for(j in 1:10){ aflist1loc[[j]] <- matrix(NA, nrow=50, ncol=4) aflist2loc[[j]] <- matrix(NA, nrow=50, ncol=4) for(i in 1:50){ mat1 <- repLD2[[j]][[1]][[2]][[i]][[1]] mat2 <- repLD2[[j]][[1]][[2]][[i]][[2]] af <- colSums(mat1) af2 <- colSums(mat2) aflist1loc[[j]][i,] <- c(sum(af[2], af[3])/(2*nrow(mat1)), sum(af[4], af[5])/(2*nrow(mat1)), sum(af[6], af[7])/(2*nrow(mat1)), sum(af[8], af[9])/(2*nrow(mat1))) aflist2loc[[j]][i,] <- c(sum(af2[2], af2[3])/(2*nrow(mat2)), sum(af2[4], af2[5])/(2*nrow(mat2)), sum(af2[6], af2[7])/(2*nrow(mat2)), sum(af2[8], af2[9])/(2*nrow(mat2))) } } ALmean1loc <- Reduce('+', aflist1loc, aflist1loc[[1]])/10 ALmean2loc <- Reduce('+', aflist2loc, aflist2loc[[1]])/10 plot(x=1:50, y=seq(0,1,1/49), type="n") lines(ALmean1loc[,1]) lines(ALmean1loc[,2], col="red") lines(ALmean1loc[,4], col="yellow") lines(ALmean2loc[,1], col="grey") lines(ALmean2loc[,2], col="pink") lines(ALmean2loc[,4], col="orange")
72e3089b262523734ee9d3129d553fd417a8a74a
303cec757865d4187456554b6c8fff032e6ada19
/inst/tests/testthat/test-out_faux_InitChoose.R
7b2548ec9d1c2f343ac6a90598868dece9f0c2dd
[ "MIT" ]
permissive
shamindras/ars
591363b88d56ff2540996b7a4ba9c8d311baa146
d76b9d0f60743212beba2377729c25548c3f9d52
refs/heads/master
2020-12-26T04:16:03.406484
2015-12-17T19:55:02
2015-12-17T19:55:02
47,591,568
0
0
null
null
null
null
UTF-8
R
false
false
4,189
r
test-out_faux_InitChoose.R
context("test-out_faux_InitChoose: Check Initially chosen 2 sample points are reasonable") test_that("test-out_faux_InitChoose: Outputs are Validated", { # Test 1 - Check that we correctly sample 2 points as a default set.seed(0) g <- function(x) dnorm(x = x, mean = 0, sd = 1) # valid function Dvec <- c(-Inf, Inf) # valid Support y_test <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec) expect_equal(length(y_test$init_sample_points), 2) # Test 2 - Check that we correctly sample 4 points if specified # Should pass as 4 points is an even integer set.seed(0) g <- function(x) dnorm(x = x, mean = 0, sd = 1) # valid function Dvec <- c(-Inf, Inf) # valid Support y_test <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec, inp_Initnumsampvec = 4) expect_equal(length(y_test$init_sample_points), 4) # Test 3 - Check that we get an error if we try and initialise with # a positive decimal number instead of a positive even integer set.seed(0) g <- function(x) dnorm(x = x, mean = 0, sd = 1) # valid function Dvec <- c(-Inf, Inf) # valid Support # expect_that(faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec # , inp_Initnumsampvec = 3.5), throws_error()) expect_error(faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec , inp_Initnumsampvec = 3.5)) # Test 4 Check that the mode found in the function is correct for the standard # normal distribution set.seed(0) g <- function(x) dnorm(x) # valid function Dvec <- c(-Inf, Inf) # valid Support out <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec) expect_equal(out$mode,0, tolerance=.00001) # Test 5 Check that the mode found in the function is correct for the chisquare # distribution with 5 df, which means the mode should be 2. set.seed(0) g <- function(x) dchisq(x,10) # valid function Dvec <- c(0, Inf) # valid Support out <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec) expect_equal(out$mode,8, tolerance=.00001) # Test 6 Check that the points chosen have correcty sloped tangent lines for # the standard normal distribution set.seed(0) g <- function(x) dnorm(x) # valid function Dvec <- c(-Inf, Inf) # valid Support out <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec) expect_that(faux_hPrimex(function(x) dnorm(x),out$init_sample_points[1])>0, is_true()) expect_that(faux_hPrimex(function(x) dnorm(x),out$init_sample_points[2])<0, is_true()) # Test 7 Check that the points chosen have correcty sloped tangent lines set.seed(0) g <- function(x) {2*exp(-2*x)} # valid function Dvec <- c(0, Inf) # valid Support out <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec) expect_that(faux_hPrimex(function(x) 2*exp(-2*x),out$init_sample_points[2])<0, is_true()) # Test 8 Check that the points chosen have correcty sloped tangent lines for # the chisquare distribution with df=5 set.seed(0) g <- function(x) dchisq(x, df=5) # valid function Dvec <- c(0, Inf) # valid Support out <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec) expect_that(faux_hPrimex(function(x) 2*exp(-2*x),out$init_sample_points[2])<0, is_true()) # g <- function(x) dnorm(x, mean = 100, sd = 10) # valid function # g <- function(x) dchisq(x,10) # valid function # Dvec <- c(0, Inf) # valid Support # out <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec, inp_Initnumsampvec = 4) # out # g <- function(x) 2*exp(-2*x) # valid function # Dvec <- c(0, Inf) # valid Support # out <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec, inp_Initnumsampvec = 4) # unique(out[[1]]) # g <- function(x) dunif(x, min = 0, max = 1) # valid function # Dvec <- c(0, 1) # valid Support # out <- faux_InitChoose(inp_gfun = g, inp_Dvec = Dvec, inp_Initnumsampvec = 4) # unique(out[[1]]) # UPDATE: Come back and finish this! # inp_gfun <- function(x) dnorm(x = x, mean = 7000, sd = 45) # inp_gfun <- function(x) {2 - (x-5)^2} # inp_gfun <- function(x) {2*exp(-2*x)} # inp_Dvec <- c(0, Inf) # test_faux_InitChoose <- faux_InitChoose(inp_gfun = inp_gfun, inp_Dvec = inp_Dvec) })
1fff341911feae15827ba31679d4a881022757f0
ab5b7b9030cf9f0aa7ca85ecb76182f7efbb680e
/run_analysis.R
8482da80350863c3195df36108836555c9ded8a9
[]
no_license
ptomasa/Getting-and-cleaning-data
660236c617f52dd095765e6108b4cefe244ed6ca
63b132e500296d7bd5557e2812432b43bf54cb64
refs/heads/master
2019-01-02T04:55:14.307070
2015-01-25T21:07:00
2015-01-25T21:07:00
29,830,620
0
0
null
null
null
null
UTF-8
R
false
false
2,984
r
run_analysis.R
# set the working directory (containing .txt files) setwd("/Users/MariaRamos/Documents/Coursera/Getting and cleaning data/project") # FIRST STEP ################################################################ ##### Merges the training and the test sets to create one data set. ######### ############################################################################# # read data subject_train <- read.table("subject_train.txt") subject_test <- read.table("subject_test.txt") X_train <- read.table("X_train.txt") X_test <- read.table("X_test.txt") y_train <- read.table("y_train.txt") y_test <- read.table("y_test.txt") # add column names ## add column name for subject files names(subject_train) <- "subjectID" names(subject_test) <- "subjectID" ## add column names for measurement files featureNames <- read.table("features.txt") names(X_train) <- featureNames$V2 names(X_test) <- featureNames$V2 ## add column name for label files names(y_train) <- "activity" names(y_test) <- "activity" # merge files into one dataset train <- cbind(subject_train, y_train, X_train) test <- cbind(subject_test, y_test, X_test) rundata <- rbind(train, test) # SECOND STEP ############################################################### ##### Extracts only the measurements on the mean and standard deviation ##### ##### for each measurement. ################################################# ############################################################################# # determine the columns containing "mean()" & "std()" meanstdcols <- grepl("mean\\(\\)", names(rundata)) | grepl("std\\(\\)", names(rundata)) # keep the subjectID and activity columns meanstdcols[1:2] <- TRUE # remove unnecessary columns rundata <- rundata[, meanstdcols] # THIRD AND FOURTH STEPS ############################################################### ##### Uses descriptive activity names to name the activities in the data set, and ###### ##### appropriately labels the data set with descriptive ############################### # describe and label activities rundata$activity <- factor(rundata$activity, labels=c("Walking", "Walking Upstairs", "Walking Downstairs", "Sitting", "Standing", "Laying")) ## STEP 5: Creates a second, independent tidy data set with the ## average of each variable for each activity and each subject. library(reshape2) # melt the data frame id_vars = c("activity", "subjectID") measure_vars = setdiff(colnames(rundata), id_vars) melted_data <- melt(rundata, id=id_vars, measure.vars=measure_vars) # recast the data frame dcast(melted_data, activity + subjectID ~ variable, mean) # FIFTH STEP ########################################################################### ##### From the data set in step 4, creates a second, independent tidy data set with ### ##### the average of each variable for each activity and each subject.################## tidy <- dcast(melted_data, activity + subjectID ~ variable, mean) write.csv(tidy, "tidy.csv", row.names=FALSE)
0815cb58e0ff8cf0bc64f9d4103ac3545dfc18ab
c1f6c0c5e8760cdf215ea00a033742d5d1a7c6c0
/users/oscarm524/exploring.R
599e1aede30ee98a344fb2382b71bf7007afd65f
[]
no_license
abhicc/dmc2016
0cc0e547ca6fac568138cefa7fab396d2d841c81
ab2d68168f4f40a6816fba9420780e995afee7a2
refs/heads/master
2021-04-06T01:55:57.398373
2016-08-10T18:18:13
2016-08-10T18:18:13
null
0
0
null
null
null
null
UTF-8
R
false
false
144
r
exploring.R
#################### ## Exploring Data ## #################### rm(list=ls()) data=read.csv(file="orders_class.txt",header=T,sep=";") head(data)
b4c4e040aedc56cb75b3a9b56278459a0b0a6878
7b2d324ed7b9985957d62eddb818869350463413
/man/compute_growth.Rd
366cf1ab0777d5f5bfefeffe089d38727cf73255
[ "MIT" ]
permissive
rudeboybert/forestecology
a0fe7168cb18841ec953e22342778542ed6ab9c3
cbae4d97155ee8d07f245ec17c082080b894b960
refs/heads/master
2023-05-23T20:55:54.097983
2021-10-21T12:27:39
2021-10-21T12:27:39
144,327,964
12
3
NOASSERTION
2021-10-02T14:06:24
2018-08-10T20:09:00
TeX
UTF-8
R
false
true
1,191
rd
compute_growth.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_processing_functions.R \name{compute_growth} \alias{compute_growth} \title{Compute growth of trees} \usage{ compute_growth(census_1, census_2, id) } \arguments{ \item{census_1}{A data frame of the first census.} \item{census_2}{A data frame of the second (later) census} \item{id}{Name of variable that uniquely identifies each tree common to \code{census_1} and \code{census_2} allowing you to join/merge both data frames.} } \value{ An \code{sf} data frame with column \code{growth} giving the average annual growth in \code{dbh}. } \description{ Based on two tree censuses, compute the average annual growth in \code{dbh} for all trees. } \examples{ library(dplyr) library(stringr) growth_ex <- compute_growth( census_1 = census_1_ex \%>\% mutate(sp = to_any_case(sp) \%>\% factor()), census_2 = census_2_ex \%>\% filter(!str_detect(codes, "R")) \%>\% mutate(sp = to_any_case(sp) \%>\% factor()), id = "ID" ) } \seealso{ Other data processing functions: \code{\link{create_bayes_lm_data}()}, \code{\link{create_focal_vs_comp}()} } \concept{data processing functions}
5f0df1254970e3b133d4d5e86f8b2a124a0c50ec
e2701f16b4b5b9791e6c587716fe7436caf47d7e
/Network examples/network.R
555cb4403aea426b3fc1c9460c9b80a01f128970
[]
no_license
itaguas/Mathematical-modelization-of-disease-propagation
5bb11e08d98aa683c0d4bfe7edc7e06108abfc68
bdd0a3400a815169e30c56f606266ff4ad1cffdd
refs/heads/master
2023-01-04T13:46:48.859793
2020-10-02T10:44:17
2020-10-02T10:44:17
300,036,427
0
0
null
null
null
null
UTF-8
R
false
false
1,552
r
network.R
library(ggplot2) setwd ("D:/Users/Nacho/Desktop/TFM/conectivity/network") data <- read.csv("components_g.txt", header = T, sep = ";") data$i <- NULL head(data) probs <- unique(data$prob) mean <- c() for (i in probs) { mean <- c(mean, mean(subset(data, data$prob == i)$comp)) } data <- data.frame(probs, mean) data <- subset(data, data$probs <= 0.4) plot <- ggplot(data, aes(x = probs, y = mean)) + geom_line(size = 2, color = "blue") + theme_minimal() + labs(title = paste0(""), x = "R", y = "Number of nodes in the main component") + theme(plot.title = element_text (hjust = 0.5, size = 28, face = "bold"), axis.title.x = element_text (size = 45, face = "bold", vjust = 0), axis.title.y = element_text (size = 45, face = "bold", vjust = 1.5), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_line(colour = "black", size=1), panel.border = element_rect(colour = "black", fill=NA, size=1)) + theme(legend.title = element_text(size = 25), legend.text = element_text(size = 25), axis.text = element_text(size = 25)) + theme(axis.title.x = element_text(margin = margin(t = 0, r = 0, b = 8, l = 0))) + scale_x_continuous(breaks = seq(0, 0.4, by = 0.05)) + scale_y_continuous(breaks = seq(0, 100, by = 5)) mypath <- file.path("D:/Users/Nacho/Desktop/TFM/conectivity/network/geom_network.jpg") jpeg(mypath, width = 1100, height = 1100) print(plot) dev.off()
e554d095546e9ac6d249157de9d0ef586c9adf7c
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
/tests/next/test_operations.R
9411daae18dc39ae77bed52de599870181e780d4
[]
no_license
phani-srikar/AdapteR
39c6995853198f01d17a85ac60f319de47637f89
81c481df487f3cbb3d5d8b3787441ba1f8a96580
refs/heads/master
2020-08-09T10:33:28.096123
2017-09-07T09:39:25
2017-09-07T09:39:25
214,069,176
0
1
null
null
null
null
UTF-8
R
false
false
551
r
test_operations.R
## gk: these need fixing!!!! ## Testing M_Subtraction test_that("-: vector and matrix subtraction, integer and double", { M1 <- initF.FLMatrix(n=5,isSquare=TRUE) M2 <- FLMatrix("tblmatrixMulti", 5,"MATRIX_ID","ROW_ID","COL_ID","CELL_VAL") M2R <- as.matrix(M2) V1 <- as.FLVector(sample(1:100,10)) V1R <- as.vector(V1) V2 <- as.FLVector(sample(1:100,10)) V2R <- as.vector(V2) P1 <- initF.FLVector(n=10,isRowVec=TRUE) FLexpect_equal((V1-M2),V1R-M2R,check.attributes=FALSE) FLexpect_equal((V1/V2), V1R/V2R, check.attributes=FALSE) })
b56c01f458608474c8c96335e469d3f7f8ba76fb
b9e54258e540f0a0447045729bb4eecb0e490426
/Bölüm 19 - Makine Öğrenmesi II - Regresyon Modelleri/21.16 - Regresyon - Regresyon Uygulama VII - Mean Absolute Percentage Error (MAPE) .R
8991f5d10390a67d47104f7902364172815dc823
[]
no_license
sudedanisman/RUdemy
b36b67b9e875206a5424f33cc784fd13506f8d8d
28a9814706873f5d2e5985e4ba795354144d52c4
refs/heads/master
2023-01-30T01:54:26.321218
2020-12-14T11:36:00
2020-12-14T11:36:00
null
0
0
null
null
null
null
UTF-8
R
false
false
2,937
r
21.16 - Regresyon - Regresyon Uygulama VII - Mean Absolute Percentage Error (MAPE) .R
### Regresyon Uygulama 1 - Train ve Test Ayırma model_data <- kc_house_data[c("price" ,"sqft_living")] View(model_data) # random sample set.seed(145) sampleIndex <- sample(1:nrow(model_data) , size = 0.8*nrow(model_data)) trainSet <- model_data[sampleIndex , ] testSet <- model_data[-sampleIndex , ] nrow(trainSet) nrow(testSet) ### Train Veri Seti Incelemeleri ve Aykırı Değer Kontrolü cor(trainSet) hist(trainSet$price) hist(trainSet$sqft_living) library(ggplot2) fig <- ggplot(data = trainSet , aes(x = sqft_living , y = price)) + geom_point(size = 2) + ylab("Fiyatlar") + xlab("Salon Büyüklüğü") fig library(outliers) scores <- scores(trainSet , type = "z" , prob = 0.95) anyTrue <- apply(scores , 1 , FUN = function(x) { any(x) } ) index <- which(anyTrue) trainSetRemovedOut <- trainSet[-index , ] nrow(trainSet) nrow(trainSetRemovedOut) fig2 <- ggplot(data = trainSetRemovedOut , aes(x = sqft_living , y = price)) + geom_point(size = 2) + ylab("Fiyatlar") + xlab("Salon Büyüklüğü") fig2 cor(trainSetRemovedOut) # Kayıp gözlem kontrolü library(mice) md.pattern(trainSet) ### Model Oluşturma ve Değerlendirme model1 <- lm(price ~ sqft_living , data = trainSet) model2 <- lm(price ~ sqft_living , data = trainSetRemovedOut) summary(model1) summary(model2) AIC(model1) AIC(model2) BIC(model1) BIC(model2) ## Prediction model1Pred <- predict(model1, testSet) model2Pred <- predict(model2, testSet) model1PredData <- data.frame("actuals" = testSet$price , "predictions" = model1Pred) model2PredData <- data.frame("actuals" = testSet$price , "predictions" = model2Pred) View(model1PredData) View(model2PredData) model1Hata <- model1PredData$actuals - model1PredData$predictions model2Hata <- model2PredData$actuals - model2PredData$predictions mse1 <- sum(model1Hata^2) / nrow(model1PredData) mse2 <- sum(model2Hata^2) / nrow(model2PredData) sqrt(mse1);sqrt(mse2) ## R2 RMSE VE MAE install.packages("caret") library(caret) R2(model1PredData$predictions , model1PredData$actuals ) R2(model2PredData$predictions , model2PredData$actuals ) RMSE(model1PredData$predictions , model1PredData$actuals ) RMSE(model2PredData$predictions , model2PredData$actuals ) MAE(model1PredData$predictions , model1PredData$actuals ) MAE(model2PredData$predictions , model2PredData$actuals ) ## Min - Max Accuracy model1MinMaxAccur <- mean(apply(model1PredData , 1 , min) / apply(model1PredData , 1 , max) ) model1MinMaxAccur model2MinMaxAccur <- mean(apply(model2PredData , 1 , min) / apply(model2PredData , 1 , max) ) model2MinMaxAccur ## Mean Absolute Percentage Error (MAPE) model1MAPE <- mean( abs(model1PredData$actuals - model1PredData$predictions) / model1PredData$actuals) model2MAPE <- mean( abs(model2PredData$actuals - model2PredData$predictions) / model2PredData$actuals) model1MAPE;model2MAPE
6c9f16808367a426cc6aa34f3160a4d5d5ee663e
d859174ad3cb31ab87088437cd1f0411a9d7449b
/autonomics.support/man/is_max.Rd
3215e50390cde20f45a3f68eae0832cb68ccd438
[]
no_license
bhagwataditya/autonomics0
97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c
c7ca7b69161e5181409c6b1ebcbeede4afde9974
refs/heads/master
2023-02-24T21:33:02.717621
2021-01-29T16:30:54
2021-01-29T16:30:54
133,491,102
0
0
null
null
null
null
UTF-8
R
false
true
304
rd
is_max.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract_max.R \name{is_max} \alias{is_max} \title{Is maximal} \usage{ is_max(x) } \arguments{ \item{x}{numeric vector} } \value{ logical vector } \description{ Is maximal } \examples{ x <- c(A=1,B=3,C=2,D=3, E=NA) is_max(x) }
ad9d4c1de15394155b42fe7a1b7d5c1e6153f54d
e5604981a0ae5102f33e58218946e625e1e25fd3
/R/sp_tidiers.R
5e13c82b334aed96d9577e1c35203f00bd06dd89
[]
no_license
talgalili/broom
d77633d58ba81ddae2e65328fc487b1943e91020
8bb9902b62a566ec2b7a4c37a36c32ef4a6ecfb6
refs/heads/master
2021-01-12T09:19:56.804074
2018-06-14T18:40:33
2018-06-14T18:40:33
81,334,167
0
1
null
2017-02-08T13:44:59
2017-02-08T13:44:59
null
UTF-8
R
false
false
2,751
r
sp_tidiers.R
#' tidying methods for classes from the sp package. #' #' Tidy classes from the sp package to allow them to be plotted using ggplot2. #' To figure out the correct variable name for region, inspect #' `as.data.frame(x)`. #' #' These functions originated in the ggplot2 package as "fortify" functions. #' #' @param x `SpatialPolygonsDataFrame` to convert into a dataframe. #' @param region name of variable used to split up regions #' @param ... not used by this method #' #' @name sp_tidiers #' #' @examples #' if (require("maptools")) { #' sids <- system.file("shapes/sids.shp", package="maptools") #' nc1 <- readShapePoly(sids, #' proj4string = CRS("+proj=longlat +datum=NAD27")) #' nc1_df <- tidy(nc1) #' } #' #' @importFrom plyr ldply NULL #' @rdname sp_tidiers #' @export #' @method tidy SpatialPolygonsDataFrame tidy.SpatialPolygonsDataFrame <- function(x, region = NULL, ...) { attr <- as.data.frame(x) # If not specified, split into regions based on polygons if (is.null(region)) { coords <- ldply(x@polygons, tidy) message("Regions defined for each Polygons") } else { cp <- sp::polygons(x) # Union together all polygons that make up a region unioned <- maptools::unionSpatialPolygons(cp, attr[, region]) coords <- tidy(unioned) coords$order <- 1:nrow(coords) } coords } #' @rdname sp_tidiers #' @export #' @method tidy SpatialPolygons tidy.SpatialPolygons <- function(x, ...) { ldply(x@polygons, tidy) } #' @rdname sp_tidiers #' @export #' @method tidy Polygons tidy.Polygons <- function(x, ...) { subpolys <- x@Polygons pieces <- ldply(seq_along(subpolys), function(i) { df <- tidy(subpolys[[x@plotOrder[i]]]) df$piece <- i df }) within(pieces, { order <- 1:nrow(pieces) id <- x@ID piece <- factor(piece) group <- interaction(id, piece) }) } #' @rdname sp_tidiers #' @export #' @method tidy Polygon tidy.Polygon <- function(x, ...) { df <- as.data.frame(x@coords) names(df) <- c("long", "lat") df$order <- 1:nrow(df) df$hole <- x@hole df } #' @rdname sp_tidiers #' @export #' @method tidy SpatialLinesDataFrame tidy.SpatialLinesDataFrame <- function(x, ...) { ldply(x@lines, tidy) } #' @rdname sp_tidiers #' @export #' @method tidy Lines tidy.Lines <- function(x, ...) { lines <- x@Lines pieces <- ldply(seq_along(lines), function(i) { df <- tidy(lines[[i]]) df$piece <- i df }) within(pieces, { order <- 1:nrow(pieces) id <- x@ID piece <- factor(piece) group <- interaction(id, piece) }) } #' @rdname sp_tidiers #' @export #' @method tidy Line tidy.Line <- function(x, ...) { df <- as.data.frame(x@coords) names(df) <- c("long", "lat") df$order <- 1:nrow(df) unrowname(df) }
4c477ea6c0a48f7b8c3b712d5070fda795b9dc37
7f28759b8f7d4e2e4f0d00db8a051aecb5aa1357
/R/Preliminary_analysis/Control_workflow.R
de1a085859014a44f3fea26a144b809a6b9aa946
[]
no_license
DataFusion18/TreeRings
02b077d7ed2a5980ae35be7c04a60c28f0ba3928
e57f6ee4d774d2bda943f009b148e6e054e6c1d1
refs/heads/master
2023-03-29T02:44:34.186155
2021-03-31T00:01:13
2021-03-31T00:01:13
null
0
0
null
null
null
null
UTF-8
R
false
false
29,493
r
Control_workflow.R
# This is a script that runs the bootstrapped climate correlations over all sites & all woood types: library(dplR) # ------------------------------load all the files needed: wood <- "WW" #read in whole ring width file for site #Bonanza <L- read.tucson("./cofecha/BON_out/BONall.rwl", header = T) #for EW if(wood == "EW"){ Bonanza <- read.tucson("cleanrwl/BONew.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HICew.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STCew.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWww.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLEew.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORew.rwl') Uncas <- read.tucson("cleanrwl/UNCew.rwl") Glacial <- read.tucson("cleanrwl/GLAew.rwl") Englund <- read.tucson("cleanrwl/ENGew.rwl") Mound <- read.tucson("cleanrwl/MOUew.rwl") GLL1 <- read.tucson("cleanrwl/GLL1ew.rwl") GLL2 <- read.tucson("cleanrwl/GLL1ew.rwl") GLL3 <- read.tucson("cleanrwl/GLL2ew.rwl") GLL4 <- read.tucson("cleanrwl/GLL3ew.rwl") PVC <- read.tucson("cleanrwl/PVCew.rwl") }else{if(wood == "LW"){ Bonanza <- read.tucson("cleanrwl/BONlw.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HIClw.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STClw.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWlw.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLElw.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORlw.rwl') Uncas <- read.tucson("cleanrwl/UNClw.rwl") Glacial <- read.tucson("cleanrwl/GLAlw.rwl") Englund <- read.tucson("cleanrwl/ENGlw.rwl") Mound <- read.tucson("cleanrwl/MOUlw.rwl") GLL1 <- read.tucson("cleanrwl/GLL1lw.rwl") GLL2 <- read.tucson("cleanrwl/GLL1lw.rwl") GLL3 <- read.tucson("cleanrwl/GLL2lw.rwl") GLL4 <- read.tucson("cleanrwl/GLL3lw.rwl") PVC <- read.tucson("cleanrwl/PVClw.rwl")} else{ Bonanza <- read.tucson("cleanrwl/BONww.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HICww.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STCww.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWww.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLEww.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORww.rwl') Uncas <- read.tucson("cleanrwl/UNCww.rwl") Glacial <- read.tucson("cleanrwl/GLAww.rwl") Englund <- read.tucson("cleanrwl/ENGww.rwl") Mound <- read.tucson("cleanrwl/MOUww.rwl") GLL1 <- read.tucson("cleanrwl/GLL1ww.rwl") GLL2 <- read.tucson("cleanrwl/GLL1ww.rwl") GLL3 <- read.tucson("cleanrwl/GLL2ww.rwl") GLL4 <- read.tucson("cleanrwl/GLL3ww.rwl") PVC <- read.tucson("cleanrwl/PVCww.rwl") }} # create a list of the tree ring growth sites rwls # run the R/corr_P.R script over all of the sites: source("R/corr_P.R") woods <- c("WW" ,"EW", "LW") for(w in 1:length(woods)){ wood <- woods[w] # run script over all wood types if(wood == "EW"){ Bonanza <- read.tucson("cleanrwl/BONew.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HICew.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STCew.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWew.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLEew.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORew.rwl') Uncas <- read.tucson("cleanrwl/UNCew.rwl") Glacial <- read.tucson("cleanrwl/GLAew.rwl") Englund <- read.tucson("cleanrwl/ENGew.rwl") Mound <- read.tucson("cleanrwl/MOUew.rwl") GLL1 <- read.tucson("cleanrwl/GLL1ew.rwl") GLL2 <- read.tucson("cleanrwl/GLL1ew.rwl") GLL3 <- read.tucson("cleanrwl/GLL2ew.rwl") GLL4 <- read.tucson("cleanrwl/GLL3ew.rwl") PVC <- read.tucson("cleanrwl/PVCew.rwl") }else{if(wood == "LW"){ Bonanza <- read.tucson("cleanrwl/BONlw.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HIClw.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STClw.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWlw.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLElw.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORlw.rwl') Uncas <- read.tucson("cleanrwl/UNClw.rwl") Glacial <- read.tucson("cleanrwl/GLAlw.rwl") Englund <- read.tucson("cleanrwl/ENGlw.rwl") Mound <- read.tucson("cleanrwl/MOUlw.rwl") GLL1 <- read.tucson("cleanrwl/GLL1lw.rwl") GLL2 <- read.tucson("cleanrwl/GLL1lw.rwl") GLL3 <- read.tucson("cleanrwl/GLL2lw.rwl") GLL4 <- read.tucson("cleanrwl/GLL3lw.rwl") PVC <- read.tucson("cleanrwl/PVClw.rwl")} else{ Bonanza <- read.tucson("cleanrwl/BONww.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HICww.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STCww.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWww.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLEww.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORww.rwl') Uncas <- read.tucson("cleanrwl/UNCww.rwl") Glacial <- read.tucson("cleanrwl/GLAww.rwl") Englund <- read.tucson("cleanrwl/ENGww.rwl") Mound <- read.tucson("cleanrwl/MOUww.rwl") GLL1 <- read.tucson("cleanrwl/GLL1ww.rwl") GLL2 <- read.tucson("cleanrwl/GLL1ww.rwl") GLL3 <- read.tucson("cleanrwl/GLL2ww.rwl") GLL4 <- read.tucson("cleanrwl/GLL3ww.rwl") PVC <- read.tucson("cleanrwl/PVCww.rwl") }} sites <- list(Townsend, Hickory, Bonanza, StCroix, Coral, Uncas, Glacial, Englund, Mound, GLL1, GLL2, GLL3, GLL4, PVC ) # create a list of codes for site names site.codes <- c("TOW", "HIC", "BON", "STC","COR", "UNC", "GLA","ENG", "MOU", "GL1", "GL2", "GL3", "GL4", "PVC") for(s in 1:length(sites)){ site <- sites[[s]] site.code <- site.codes[s] clim.corrs(site, site.code) } } # now run the correlations for all sites and wood types on PRISM data: source("R/corr_PRISM_data.R") woods <- c("WW" ,"EW", "LW") wood <- "WW" for(w in 1:length(woods)){ wood <- woods[w] # run script over all wood types if(wood == "EW"){ Bonanza <- read.tucson("cleanrwl/BONew.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HICew.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STCew.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWew.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLEew.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORew.rwl') Uncas <- read.tucson("cleanrwl/UNCew.rwl") Glacial <- read.tucson("cleanrwl/GLAew.rwl") Englund <- read.tucson("cleanrwl/ENGew.rwl") Mound <- read.tucson("cleanrwl/MOUew.rwl") GLL1 <- read.tucson("cleanrwl/GLL1ew.rwl") GLL2 <- read.tucson("cleanrwl/GLL1ew.rwl") GLL3 <- read.tucson("cleanrwl/GLL2ew.rwl") GLL4 <- read.tucson("cleanrwl/GLL3ew.rwl") PVC <- read.tucson("cleanrwl/PVCew.rwl") }else{if(wood == "LW"){ Bonanza <- read.tucson("cleanrwl/BONlw.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HIClw.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STClw.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWlw.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLElw.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORlw.rwl') Uncas <- read.tucson("cleanrwl/UNClw.rwl") Glacial <- read.tucson("cleanrwl/GLAlw.rwl") Englund <- read.tucson("cleanrwl/ENGlw.rwl") Mound <- read.tucson("cleanrwl/MOUlw.rwl") GLL1 <- read.tucson("cleanrwl/GLL1lw.rwl") GLL2 <- read.tucson("cleanrwl/GLL1lw.rwl") GLL3 <- read.tucson("cleanrwl/GLL2lw.rwl") GLL4 <- read.tucson("cleanrwl/GLL3lw.rwl") PVC <- read.tucson("cleanrwl/PVClw.rwl")} else{ Bonanza <- read.tucson("cleanrwl/BONww.rwl", header = TRUE) Hickory <- read.tucson ("cleanrwl/HICww.rwl", header = FALSE) #PleasantWolf <- read.tucson('data/wi006.rwl') #Pleasant prairie in southeast WI, from ITRDB StCroix <- read.tucson("cleanrwl/STCww.rwl") #saint croix savanna, MN #Sand <- read.tucson("data/il001.rwl", header = TRUE) #Sandwich, il. Cook tree rings from the 1980's #Pulaski <- read.tucson("./in001.rwl", header = TRUE) Townsend <- read.tucson('cleanrwl/TOWww.rwl', header = TRUE)#townsedn woods #YellowRiver <- read.tucson('data/ia029.rwl', header = TRUE) # had to fix a wrong year #Pleasant <- read.tucson('./cofecha/PLEww.rwl', header = TRUE) #Pleasant valley conservency #Desouix <- read.tucson('data/mn029.rwl', header = TRUE) #close to BONanza, in ITRDB Coral <- read.tucson('cleanrwl/CORww.rwl') Uncas <- read.tucson("cleanrwl/UNCww.rwl") Glacial <- read.tucson("cleanrwl/GLAww.rwl") Englund <- read.tucson("cleanrwl/ENGww.rwl") Mound <- read.tucson("cleanrwl/MOUww.rwl") GLL1 <- read.tucson("cleanrwl/GLL1ww.rwl") GLL2 <- read.tucson("cleanrwl/GLL2ww.rwl") GLL3 <- read.tucson("cleanrwl/GLL3ww.rwl") GLL4 <- read.tucson("cleanrwl/GLL4ww.rwl") PVC <- read.tucson("cleanrwl/PVCww.rwl") Avon <-read.tucson("cleanrwl/AVOww.rwl") }} sites <- list(Townsend, Hickory, Bonanza, StCroix, Coral, Uncas, Glacial, Englund, Mound, GLL1, GLL2, GLL3, GLL4, Avon) # create a list of codes for site names site.codes <- c("TOW", "HIC", "BON", "STC","COR", "UNC","GLA", "ENG", "MOU", "GL1", "GL2", "GL3", "GL4", "AVO") for(s in 11:length(sites)){ site <- sites[[s]] site.code <- site.codes[s] clim.PRISM.corrs(site, site.code) } } # pseudo code: # read in all the precip correlatinos for all sites # add a site column and name # join all together # make ggplot with monthy Precip correlations + water year, with different colors as # now make one big figure to Plot all the monthly correlations at each site + total precipitation site.codes <- c("TOW", "HIC", "BON", "STC","COR", "UNC","GLA", "ENG", "MOU", "GL1", "GL2", "GL3", "GL4", "AVO") # all precip.plots: vpdmaxcors<- read.csv(paste0("data/BootCors/PRISM/",site.code, "-", "WW", "VPDmaxcor.csv")) precipcors <- read.csv(paste0("data/BootCors/PRISM/", "COR", "-", "LW", "Precipcor.csv")) read.precip.cors <- function(x){ precipcors <- read.csv(paste0("data/BootCors/PRISM/", x, "-", "WW", "Precipcor.csv")) precipcors$site <- x precipcors } cor.list <- list() for(i in 1:length(site.codes)){ cor.list[[i]] <- read.precip.cors(site.codes[i]) } all.cors <- do.call(rbind, cor.list) all.cors.sub <- all.cors[all.cors$site %in% c("AVO", "BON","ENG", "GLA", "GL1", "GL2", "GL3", "MOU", "UNC"), ] all.cors.sub$site <- factor(all.cors.sub$site, levels = c("BON", "GL1", "GL2", "GL3", "ENG", "UNC", "AVO", "MOU", "GLA")) month.df <- data.frame(month = 1:25, mo.clim = c("pJan", "pFeb", "pMar", "pApr", "pMay", "pJun", "pJul", "pAug", "pSep", "pOct", "pNov", "pDec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", "wateryear")) all.cors.sub<- merge(all.cors.sub, month.df, by = "month") all.cors.sub$mo.clim <- factor(all.cors.sub$mo.clim, levels = c("pJan", "pFeb", "pMar", "pApr", "pMay", "pJun", "pJul", "pAug", "pSep", "pOct", "pNov", "pDec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", "wateryear")) precipitation.cors <- ggplot(data = all.cors.sub, aes(x=mo.clim, y= cor, ymin=ci.min, ymax=ci.max, fill=site)) + geom_bar(position="dodge", stat = "identity") + geom_errorbar( position = position_dodge(), colour="grey")+ scale_fill_manual(values = c(`BON`= "#d73027", `GL1`="#f46d43", `GL2`="#fdae61", `GL3`= "#fee090", `ENG`="#ffffbf", `UNC`="#e0f3f8", `AVO`="#abd9e9", `MOU`="#74add1", `GLA`="#4575b4"))+ theme_bw()+theme(panel.grid = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1))+ylab("Correlation Coefficient")+xlab("Precipiation") png(height = 4, width = 12, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_wateryr_all_sites_correlation_bootci.png") precipitation.cors dev.off() precipitation.cors.noboot <- ggplot(data = all.cors.sub, aes(x=mo.clim, y= cor, #ymin=ci.min, #ymax=ci.max, fill=site)) + geom_bar(position="dodge", stat = "identity") + #geom_errorbar( position = position_dodge(), colour="grey")+ scale_fill_manual(values = c(`BON`= "#d73027", `GL1`="#f46d43", `GL2`="#fdae61", `GL3`= "#fee090", `ENG`="#ffffbf", `UNC`="#e0f3f8", `AVO`="#abd9e9", `MOU`="#74add1", `GLA`="#4575b4"))+ theme_bw()+theme(panel.grid = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1))+ylab("Correlation Coefficient")+xlab("Precipiation") png(height = 4, width = 12, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_wateryr_all_sites_correlation.png") precipitation.cors.noboot dev.off() # now do the same thing for TMAX and VPDMAX: read.tmax.cors <- function(x){ tmaxcors <- read.csv(paste0("data/BootCors/PRISM/", x, "-", "WW", "tmaxcor.csv")) tmaxcors$site <- x tmaxcors } cor.list <- list() for(i in 1:length(site.codes)){ cor.list[[i]] <- read.tmax.cors(site.codes[i]) } all.cors <- do.call(rbind, cor.list) all.cors.sub <- all.cors[all.cors$site %in% c("AVO", "BON","ENG", "GLA", "GL1", "GL2", "GL3", "MOU", "UNC"), ] all.cors.sub$site <- factor(all.cors.sub$site, levels = c("BON", "GL1", "GL2", "GL3", "ENG", "UNC", "AVO", "MOU", "GLA")) month.df <- data.frame(month = 1:24, mo.clim = c("pJan", "pFeb", "pMar", "pApr", "pMay", "pJun", "pJul", "pAug", "pSep", "pOct", "pNov", "pDec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) all.cors.sub<- merge(all.cors.sub, month.df, by = "month") all.cors.sub$mo.clim <- factor(all.cors.sub$mo.clim, levels = c("pJan", "pFeb", "pMar", "pApr", "pMay", "pJun", "pJul", "pAug", "pSep", "pOct", "pNov", "pDec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) tmax.cors <- ggplot(data = all.cors.sub, aes(x=mo.clim, y= cor, ymin=ci.min, ymax=ci.max, fill=site)) + geom_bar(position="dodge", stat = "identity") + geom_errorbar( position = position_dodge(), colour="grey")+ scale_fill_manual(values = c(`BON`= "#d73027", `GL1`="#f46d43", `GL2`="#fdae61", `GL3`= "#fee090", `ENG`="#ffffbf", `UNC`="#e0f3f8", `AVO`="#abd9e9", `MOU`="#74add1", `GLA`="#4575b4"))+ theme_bw(base_size = 12)+theme(panel.grid = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1))+ylab("Correlation Coefficient")+xlab("Maximum Temperature") png(height = 4, width = 12, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_tmax_all_sites_correlation_bootci.png") tmax.cors dev.off() tmax.cors.noboot <- ggplot(data = all.cors.sub, aes(x=mo.clim, y= cor, #ymin=ci.min, #ymax=ci.max, fill=site)) + geom_bar(position="dodge", stat = "identity") + #geom_errorbar( position = position_dodge(), colour="grey")+ scale_fill_manual(values = c(`BON`= "#d73027", `GL1`="#f46d43", `GL2`="#fdae61", `GL3`= "#fee090", `ENG`="#ffffbf", `UNC`="#e0f3f8", `AVO`="#abd9e9", `MOU`="#74add1", `GLA`="#4575b4"))+ theme_bw(base_size = 12)+theme(panel.grid = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1))+ylab("Correlation Coefficient")+xlab("Maximum Temperature") png(height = 4, width = 12, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_tmax_all_sites_correlation.png") tmax.cors.noboot dev.off() # for VPD max: read.VPDmax.cors <- function(x){ VPDmaxcors <- read.csv(paste0("data/BootCors/PRISM/", x, "-", "WW", "VPDmaxcor.csv")) VPDmaxcors$site <- x VPDmaxcors } cor.list <- list() for(i in 1:length(site.codes)){ cor.list[[i]] <- read.VPDmax.cors(site.codes[i]) } all.cors <- do.call(rbind, cor.list) all.cors.sub <- all.cors[all.cors$site %in% c("AVO", "BON","ENG", "GLA", "GL1", "GL2", "GL3", "MOU", "UNC"), ] all.cors.sub$site <- factor(all.cors.sub$site, levels = c("BON", "GL1", "GL2", "GL3", "ENG", "UNC", "AVO", "MOU", "GLA")) month.df <- data.frame(month = 1:24, mo.clim = c("pJan", "pFeb", "pMar", "pApr", "pMay", "pJun", "pJul", "pAug", "pSep", "pOct", "pNov", "pDec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) all.cors.sub<- merge(all.cors.sub, month.df, by = "month") all.cors.sub$mo.clim <- factor(all.cors.sub$mo.clim, levels = c("pJan", "pFeb", "pMar", "pApr", "pMay", "pJun", "pJul", "pAug", "pSep", "pOct", "pNov", "pDec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) VPDmax.cors <- ggplot(data = all.cors.sub, aes(x=mo.clim, y= cor, ymin=ci.min, ymax=ci.max, fill=site)) + geom_bar(position="dodge", stat = "identity") + geom_errorbar( position = position_dodge(), colour="grey")+ scale_fill_manual(values = c(`BON`= "#d73027", `GL1`="#f46d43", `GL2`="#fdae61", `GL3`= "#fee090", `ENG`="#ffffbf", `UNC`="#e0f3f8", `AVO`="#abd9e9", `MOU`="#74add1", `GLA`="#4575b4"))+ theme_bw(base_size = 12)+theme(panel.grid = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1))+ylab("Correlation Coefficient")+xlab("Maximum VPD") png(height = 4, width = 12, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_VPDmax_all_sites_correlation_bootci.png") VPDmax.cors dev.off() VPDmax.cors.noboot <- ggplot(data = all.cors.sub, aes(x=mo.clim, y= cor, #ymin=ci.min, #ymax=ci.max, fill=site)) + geom_bar(position="dodge", stat = "identity") + #geom_errorbar( position = position_dodge(), colour="grey")+ scale_fill_manual(values = c(`BON`= "#d73027", `GL1`="#f46d43", `GL2`="#fdae61", `GL3`= "#fee090", `ENG`="#ffffbf", `UNC`="#e0f3f8", `AVO`="#abd9e9", `MOU`="#74add1", `GLA`="#4575b4"))+ theme_bw(base_size = 12)+theme(panel.grid = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1))+ylab("Correlation Coefficient")+xlab("Maximum VPD") png(height = 4, width = 12, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_VPDmax_all_sites_correlation.png") VPDmax.cors.noboot dev.off() site.legend <- get_legend(precipitation.cors.noboot) # make a big plot with tmax, precip, and vpdmax: png(height = 12, width = 15, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_3clim_all_sites_correlation_bootci.png") plot_grid( plot_grid(precipitation.cors, tmax.cors, VPDmax.cors, ncol = 1, labels = "AUTO"), site.legend, ncol = 2, rel_widths = c(1,0.05)) dev.off() # make a big plot with tmax, precip, and vpdmax, but no confidence intervals png(height = 12, width = 15, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_3clim_all_sites_correlation.png") plot_grid( plot_grid(precipitation.cors.noboot+theme(legend.position = "none"), tmax.cors.noboot+theme(legend.position = "none"), VPDmax.cors.noboot+theme(legend.position = "none"), ncol = 1, labels = "AUTO"), site.legend, ncol = 2, rel_widths = c(1,0.05)) dev.off() # for VPD max: read.BAL.cors <- function(x){ BALcors <- read.csv(paste0("data/BootCors/PRISM/", x, "-", "WW", "BALcor.csv")) BALcors$site <- x BALcors } cor.list <- list() for(i in 1:length(site.codes)){ cor.list[[i]] <- read.BAL.cors(site.codes[i]) } all.cors <- do.call(rbind, cor.list) all.cors.sub <- all.cors[all.cors$site %in% c("AVO", "BON","ENG", "GLA", "GL1", "GL2", "GL3", "MOU", "UNC"), ] all.cors.sub$site <- factor(all.cors.sub$site, levels = c("BON", "GL1", "GL2", "GL3", "ENG", "UNC", "AVO", "MOU", "GLA")) month.df <- data.frame(month = 1:24, mo.clim = c("pJan", "pFeb", "pMar", "pApr", "pMay", "pJun", "pJul", "pAug", "pSep", "pOct", "pNov", "pDec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) all.cors.sub<- merge(all.cors.sub, month.df, by = "month") all.cors.sub$mo.clim <- factor(all.cors.sub$mo.clim, levels = c("pJan", "pFeb", "pMar", "pApr", "pMay", "pJun", "pJul", "pAug", "pSep", "pOct", "pNov", "pDec", "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")) BAL.cors <- ggplot(data = all.cors.sub, aes(x=mo.clim, y= cor, ymin=ci.min, ymax=ci.max, fill=site)) + geom_bar(position="dodge", stat = "identity") + geom_errorbar( position = position_dodge(), colour="grey")+ scale_fill_manual(values = c(`BON`= "#d73027", `GL1`="#f46d43", `GL2`="#fdae61", `GL3`= "#fee090", `ENG`="#ffffbf", `UNC`="#e0f3f8", `AVO`="#abd9e9", `MOU`="#74add1", `GLA`="#4575b4"))+ theme_bw(base_size = 12)+theme(panel.grid = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1))+ylab("Correlation Coefficient")+xlab("Precipitation - Potential Evapotranspiration") png(height = 4, width = 12, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_BAL_all_sites_correlation_bootci.png") BAL.cors dev.off() BAL.cors.noboot <- ggplot(data = all.cors.sub, aes(x=mo.clim, y= cor, #ymin=ci.min, #ymax=ci.max, fill=site)) + geom_bar(position="dodge", stat = "identity") + #geom_errorbar( position = position_dodge(), colour="grey")+ scale_fill_manual(values = c(`BON`= "#d73027", `GL1`="#f46d43", `GL2`="#fdae61", `GL3`= "#fee090", `ENG`="#ffffbf", `UNC`="#e0f3f8", `AVO`="#abd9e9", `MOU`="#74add1", `GLA`="#4575b4"))+ theme_bw(base_size = 12)+theme(panel.grid = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1))+ylab("Correlation Coefficient")+xlab("Precipitation - Potential Evapotranspiration") png(height = 4, width = 12, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_BAL_all_sites_correlation.png") BAL.cors.noboot dev.off() site.legend <- get_legend(precipitation.cors.noboot) # make a big plot with tmax, precip, and BAL: png(height = 16, width = 15, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_3clim_all_sites_correlation_bootci.png") plot_grid( plot_grid(precipitation.cors, tmax.cors, BAL.cors, VPDmax.cors, ncol = 1, labels = "AUTO"), site.legend, ncol = 2, rel_widths = c(1,0.05)) dev.off() # make a big plot with tmax, precip, and BAL, but no confidence intervals png(height = 16, width = 15, units = "in", res = 300, "outputs/growth_model/paper_figures/full_PRISM_3clim_all_sites_correlation.png") plot_grid( plot_grid(precipitation.cors.noboot+theme(legend.position = "none"), tmax.cors.noboot+theme(legend.position = "none"), BAL.cors.noboot+theme(legend.position = "none"), VPDmax.cors.noboot + theme(legend.position = "none"), ncol = 1, labels = "AUTO"), site.legend, ncol = 2, rel_widths = c(1,0.05)) dev.off()
2447031c38b20f3f3ba3bb293797ecb8b496df3c
8e66d17601c1435da0f556d610fb238e8b4416bf
/cachematrix.R
309b428ce4464e9b9231f25e7d10df6b21d941b2
[]
no_license
pisharod/ProgrammingAssignment2
73a4f6495bd5a29167c88c57b5686b5a097711d7
ef54ecb0032bc07bd7324b4473f45fc2da3572f9
refs/heads/master
2020-12-25T08:38:15.149097
2015-03-22T18:44:16
2015-03-22T18:44:16
32,365,502
0
0
null
2015-03-17T02:06:29
2015-03-17T02:06:29
null
UTF-8
R
false
false
4,136
r
cachematrix.R
#==================================================================================== # cachematrix.R #------------------------------------------------------------------------------------ # This file contains 2 main functions, makeCacheMatrix and cacheSolve. The former # creates a special variable that contains a list of functions and a variable for # inverseMatrix and the data associated with matrix that was sent as a parameter # while creating this object # The latter is a function that gets optimises the call for inversing the matrix # If the matrix is already inversed earlier, it gets it from the cache and does away # from calling the solve function repeatedly #==================================================================================== #==================================================================================== # function name : makeCacheMatrix(matrix as input variable) #------------------------------------------------------------------------------------ # This function creates a special object that takes a matrix as input and creates # a list of 3 functions and has an additional variable that contains the inversed # matrix #==================================================================================== makeCacheMatrix <- function(normalMatrix = matrix()){ # initialise the variable that should contain the inversed matrix to NULL inversedMatrix <- NULL # create a new function called getData that will return the matrix that was # sent as an input while the object got created getData <- function() normalMatrix # create a new function that will store the inversed matrix sent as input to # the inverse matrix variable created earlier setInverseMatrix <- function (sentInversedMatrix) inversedMatrix <<- sentInversedMatrix # create a new function that will return the inverse matrix value getInverseMatrix <- function () inversedMatrix # create a list of 3 function that will be returned as part of this function # for making these function avaialbe as part of the object for accessing the # data and inverse matrix list (getData = getData, setInverseMatrix = setInverseMatrix, getInverseMatrix = getInverseMatrix) } #==================================================================================== # function name : cacheSolve(special Matirx created using makeCacheMatrix,...) # the ... variable means multiple special matrices can be sent #------------------------------------------------------------------------------------ # This function can be called instead of normal solve function as this function # ensures that if the inverse matrix is required for the same set of data, it gets it # from the cache as it has already called the solve function earlier... #==================================================================================== cacheSolve <- function(specialMatrix, ...) { # get the value of inversed matrix inversedMatrix <- specialMatrix$getInverseMatrix() # check if the value is NULL. if it is not, then we have already found the # inverse of the data matrix earlier, hence return the inverse from the cache # which was stored earlier if(!is.null(inversedMatrix)) { message("got from cache") return(inversedMatrix) } # if we have reached here, then this is the first time this function is called # and we have not found the inverse of the data matrix yet # get the data matrix sent as part of the input to this object data <- specialMatrix$getData() # get the inverse of the data matrices inversedMatrix <- solve(data) #set the inversed matrix value into the cached variable specialMatrix$setInverseMatrix(inversedMatrix) #return the value of inversed matrix to the caller inversedMatrix }
92fe4e305e5ff4a40ae0b4554e49551bb24f8764
f671065f3668f945bd395afc00b1032437ea5bcf
/R/meta.R
00608d2dc8bece65e486313819547bf2bf3f8493
[ "MIT" ]
permissive
kashenfelter/vueR
925ce23b05541668a4abc43066305598b537a03c
3f2a128c69470ae25a09d387c71bba2299635f66
refs/heads/master
2021-07-10T21:38:31.396200
2017-10-11T02:21:26
2017-10-11T02:21:26
null
0
0
null
null
null
null
UTF-8
R
false
false
56
r
meta.R
#'@keywords internal vue_version <- function(){'2.4.4'}
ec6faa80304c82aad3686c97d000b664e3db95ae
031c7679665929846b7093738a098e31c009c5f7
/IH_Mercy_Analysis/L1_MASTER_ID.R
85389a919487ad8b3453cf6a8552de1b74236966
[]
no_license
mrp011/interactive_health
04b1133af8a4e5456097875207c8a5c28ac23ecb
55d441cb53487a9d282dde5fd5e10d94394b8f6d
refs/heads/master
2021-01-22T01:11:00.752129
2017-10-20T19:22:20
2017-10-20T19:22:20
102,205,781
0
2
null
null
null
null
UTF-8
R
false
false
11,173
r
L1_MASTER_ID.R
############ Level 1 ############# ### Reads in raw census information to create master ID's ### Input Tables: raw_census ### risky_zips ### Output Tables: id_bridge ### human_flags ### Author: Michelle Powell ### Sourced By: master_id() ########################################### ########################################### ##### Column Parameters ##### census_last_name <- "LAST" census_first_name <- 'FIRST' census_sex <- "Gender" census_dob <- "DOB" census_zip <- "Supplemental Zip Code" census_state <- "Supplemental State" census_city <- "Supplemental City" census_address_1 <- "Supplemental Address Line 1" census_address_2 <- "Supplemental Address Line 2" census_start_date <- "Start Date" census_end_date <- "Stop Date" census_employee_spouse <- "Relationship" census_insurer_id <- "Mercy ID" census_IH_ID <- "IH PID" ##### Functions ##### format_sex<-function(x, male_start = ".*M.*",female_start = ".*F.*",ignore.case = FALSE){ y<-as.character(x) y<-gsub(pattern = male_start, replacement = "0", x = y, ignore.case = ignore.case) y<-gsub(pattern = female_start, replacement = "1", x = y, ignore.case = ignore.case) y<-as.numeric(y) return(y) } format_address <- function(address){ shortcuts <- read_csv(paste0(directory, 'Data/Fixed_Tables/postal_shortcuts.csv')) %>% filter(Long != Abbreviation) address <- gsub("[[:punct:]]", "", gsub("-", " ", address)) address <- gsub(" apt ", " ", gsub(" unit ", " ", address)) for(i in 1:length(shortcuts$Long)){ address <- gsub(paste0(" ",tolower(shortcuts$Long[i])," "), paste0(" ",tolower(shortcuts$Abbreviation[i])," "), address, ignore.case = TRUE) address <- gsub(paste0(" ",tolower(shortcuts$Long[i]),"$"), paste0(" ",tolower(shortcuts$Abbreviation[i])), address, ignore.case = TRUE) } return(address) } source('../Pipeline/L2_IH_DEID.R') ##### Read and Trim Raw Data ##### census_cols<-c(census_last_name, census_first_name, census_sex, census_dob, census_zip, census_state, census_city, census_address_1, census_address_2, census_start_date, census_end_date, census_employee_spouse, census_insurer_id, census_IH_ID) new_census_cols <- c("last", "first", "sex", "dob", "zip", "state", "city", "address_1", "address_2", "cov_start_dt", "cov_end_dt", 'emp_spouse', 'insurer_id', 'ih_id') census <- read_csv(paste0(directory, "Data/Raw/raw_census_2.csv"), col_types = cols(.default = "c")) %>% full_join(read_csv(paste0(directory, "Data/Raw/raw_census.csv"), col_types = cols(.default = "c")), by = c("Mercy ID" = "Lawson ID")) %>% distinct() census <- census %>% mutate("IH PID" = coalesce(`IH -PID`, `IH PID`), "LAST" = coalesce(LAST.y, LAST.x), 'FIRST' = coalesce(FIRST.y, FIRST.x), 'MIDDLE' = coalesce(MIDDLE.y, MIDDLE.x), 'Benefit Date 1' = coalesce(`Benefit Date 1.y`, `Benefit Date 1.x`), 'Cov Opt Desc' = coalesce(`Cov Opt Desc.y`, `Cov Opt Desc.x`), 'Supplemental Address Line 1' = coalesce(`Supplemental Address Line 1.y`, `Supplemental Address Line 1.x`), 'Supplemental Address Line 2' = coalesce(`Supplemental Address Line 2.y`, `Supplemental Address Line 2.x`), 'Supplemental City' = coalesce(`Supplemental City.y`, `Supplemental City.x`), 'Supplemental State' = coalesce(`Supplemental State.y`, `Supplemental State.x`), 'Supplemental Zip Code' = coalesce(`Supplemental Zip Code.y`, `Supplemental Zip Code.x`)) %>% select(`Mercy ID`, `IH PID`, `LAST`, `FIRST`, `MIDDLE`, `Relationship`, `DOB`, Gender, `Benefit Date 1`, `Cov Opt Desc`, `Supplemental Address Line 1`, `Supplemental Address Line 2`, `Supplemental City`, `Supplemental State`, `Supplemental Zip Code`, `Status`, `Total FTE`, `Start Date`, `Stop Date`, `Cov Opt`, `Dependent Count`, PL, `Process Level Name`, Dept, `Department Description`, `Job Code`, `Job Code Description`, `Location Code`, `Location Description`, `Supplemental County`) census <- census[match(census_cols, colnames(census))] colnames(census) <- new_census_cols risk_zips<-read_csv(paste0(directory, "Data/Fixed_Tables/risky_zips.csv"), col_types = 'c')$ZipCode rm('census_IH_ID', "census_last_name", "census_first_name", "census_sex", "census_dob", "census_zip", "census_state", "census_city", "census_address_1", "census_address_2", "census_start_date", "census_end_date", "census_employee_spouse", 'census_insurer_id') ##### Format Data ##### census_full <- census %>% transmute('insurer_id' = insurer_id, 'ih_id' = ih_id, 'last' = gsub("[[:punct:]]", "", gsub("-", " ", tolower(last))), 'first' = gsub("[[:punct:]]", "", gsub("-", " ", tolower(first))), 'sex' = format_sex(sex, male_start = 'M', female_start = 'F'), 'dob' = mdy(dob), 'address' = ifelse(is.na(address_2), tolower(address_1), do.call(paste, list(tolower(address_1), tolower(address_2)))), 'city' = tolower(city), 'state' = tolower(state), 'zip' = str_sub(zip, 1, 5), 'cov_start_dt' = mdy(cov_start_dt), 'cov_end_dt' = mdy(cov_end_dt), 'emp_spouse' = case_when(.$emp_spouse == "X" ~ "e", .$emp_spouse == "S" ~ "s")) %>% distinct() %>% filter(!is.na(emp_spouse)) %>% mutate(unique_id = do.call(paste0, list(insurer_id, dob))) %>% mutate(address = format_address(address)) %>% filter(!is.na(cov_start_dt), !is.na(cov_end_dt), !is.na(dob)) ##### Create Master_ID's With Distinct Individuals ##### census_master_id <- census_full %>% distinct(unique_id) %>% arrange(unique_id) %>% mutate(master_id = seq(from = 999999 - trunc(dim(.)[1]*trunc(trunc(899999/dim(.)[1])/2)), by = trunc(trunc(899999/dim(.)[1])/2), length.out = dim(.)[1])) id_bridge <- census_full %>% left_join(census_master_id) %>% distinct(master_id, unique_id, insurer_id, ih_id, last, first, sex, dob, address, city, state, zip) id_bridge_2 <- id_bridge %>% rename('master_id_2' = master_id, 'unique_id_2' = unique_id, 'insurer_id_2' = insurer_id) id_dedupe <- ih_deidentify(data = id_bridge_2, census_id_bridge = id_bridge, data_id = 'master_id_2', census_id = 'master_id', id_match = FALSE, pii_match = TRUE, fuzzy_match = FALSE, return_id_bridge = TRUE) %>% filter(master_id != master_id_2) %>% arrange(master_id) %>% mutate(master_id_a = pmax(master_id, master_id_2), master_id_b = pmin(master_id, master_id_2)) %>% distinct(master_id_a, master_id_b) census_master_id$master_id[match(id_dedupe$master_id_a, census_master_id$master_id)] <- id_dedupe$master_id_b ##### Build id_bridge for matching to other data sources ##### id_bridge <- census_full %>% left_join(census_master_id) %>% ungroup() %>% distinct(master_id, unique_id, insurer_id, ih_id, last, first, sex, dob, address, city, state, zip) ##### Build Human Flags Table ##### # Determine Who has been - and will therefore always be considered - and Employee # employee_flag <- census_full %>% left_join(census_master_id) %>% filter(emp_spouse == 'e') %>% distinct(master_id) # assign sex and zip to latest version of data, determine continuity of coverage # human_flags <- census_full %>% left_join(census_master_id) %>% distinct(master_id, dob, cov_start_dt, cov_end_dt, sex, zip) %>% group_by(master_id) %>% mutate(sex = last(sex, order_by = order(cov_start_dt)), zip = last(zip, order_by = order(cov_start_dt))) %>% ungroup() %>% distinct() %>% group_by(master_id) %>% arrange(master_id, cov_start_dt) %>% mutate(continuous = (master_id == lag(master_id, 1) & cov_start_dt == lag(cov_end_dt, 1))) %>% mutate(gap = ifelse(is.na(continuous), FALSE, !continuous)) %>% mutate(continuous = ifelse(is.na(continuous), FALSE, continuous)) %>% ungroup() # filter out individuals with gaps in coverage for loops # human_flags_gaps <- human_flags %>% filter(gap) %>% distinct() %>% select(master_id, 'gap_date' = cov_start_dt) human_flags_gappers <- human_flags %>% filter(master_id %in% human_flags_gaps$master_id) %>% select(-continuous, -gap) # coalesce continuous coverage records for complete start and end dates # human_flags <- human_flags %>% anti_join(human_flags_gappers) %>% group_by(master_id) %>% arrange(master_id, cov_start_dt) %>% mutate(cov_start_dt = min(cov_start_dt), cov_end_dt = max(cov_end_dt)) %>% select(-continuous, -gap) %>% ungroup() %>% distinct() # loop through individuals with gaps in coverage and assign appropriate dates # for(id in human_flags_gaps$master_id){ gapper <- human_flags_gappers %>% filter(master_id == id) gap_dates <- c(min(gapper$cov_start_dt), human_flags_gaps$gap_date[human_flags_gaps$master_id == id]) gapper$cov_start_dt <- gap_dates[findInterval(gapper$cov_start_dt, gap_dates)] gapper <- gapper %>% group_by(cov_start_dt) %>% mutate(cov_end_dt = max(cov_end_dt)) %>% ungroup() %>% distinct() human_flags <- bind_rows(human_flags, gapper) } # create flags # human_flags <- human_flags %>% distinct() %>% mutate('age' = round(interval(dob, analysis_date)/duration(1,'years'))) %>% mutate('geo_risk' = as.numeric(zip %in% risk_zips), 'emp_flag' = as.numeric(master_id %in% employee_flag$master_id), 'age_45' = as.numeric(age >= 45), 'age_18.45' = as.numeric(age >= 18 & age < 45)) %>% select(master_id, sex, geo_risk, emp_flag, cov_start_dt, cov_end_dt, age_45, age_18.45) %>% distinct() ##### Write Data ##### write_csv(id_bridge, paste0(directory, "Data/Sub_Tables/id_bridge.csv")) write_csv(human_flags, paste0(directory, "Data/Sub_Tables/human_flags.csv")) print("human_flags written to Data/Sub_Tables") print("id_bridge written to Data/Sub_Tables") human_flags_tab <- human_flags %>% ungroup() rm("human_flags", "id_bridge", "risk_zips", "census", "census_cols", "new_census_cols", 'census_full', 'census_master_id', 'census_rows', 'employee_flag', 'gapper', 'gap_dates', 'id', "format_sex", "format_address", 'human_flags_gappers', 'human_flags_gaps', 'id_bridge_2', 'id_dedupe')
ee891c8ac48f70e4e8576e197433d9e47b72811f
db357ce293c701f679c90e0733c692f8c0518ec2
/man/score_aus.Rd
6d847c91a947eb962ebc8fab8e41ed459f1b910e
[]
no_license
d-bohn/facereadeR
2daa98519a7f921f268772cc8e589e08a58a4b35
2824408902c5d7159dfb21c367668428a92594e3
refs/heads/master
2021-08-28T21:06:17.356219
2021-08-19T05:36:33
2021-08-19T05:36:33
56,341,949
0
0
null
null
null
null
UTF-8
R
false
true
324
rd
score_aus.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_clean.R \name{score_aus} \alias{score_aus} \title{Score Action Units numerically} \usage{ score_aus(data) } \arguments{ \item{data}{} } \value{ Dataframe with AUs recoded into numeric values. } \description{ Score Action Units numerically }
15d42c5b3c3e0c2daf55ae0cad1893f0d1710b4e
53204f71c6bc42f396a6a3279f09cac73c97fb0a
/finalReport/docksideMonitoring2019/Table1_subset_for_Carrie.R
2739a74ac8c01506ec18da436b3996b1b9773bd1
[]
no_license
Oyster-Recovery-Partnership/Ereporting
21f503d7addb3703ca2688c8cb0ff61f18da3d10
a3a09a8cd47acacfe2ad796d66c94bc7dd789bed
refs/heads/master
2022-05-13T01:29:30.661972
2022-03-18T21:16:19
2022-03-18T21:16:19
209,818,783
0
1
null
null
null
null
UTF-8
R
false
false
17,830
r
Table1_subset_for_Carrie.R
# -------------------- # # This script is for the roving monitor final report tables and figures # -------------------- # # -------------------- # # load packages # -------------------- # require(dplyr) require(ggplot2) library(readxl) library(tidyr) library(lubridate) library(utf8) #unsure whats up with this library(htmlTable) library(tableHTML) # -------------------- # # -------------------- # # set directories # -------------------- # dir.in = "//orp-dc01/Users/ORP Operations/Fisheries Program/E-Reporting/4.0 Pilot projects/Data/FACTSdata/rawdata/" dir.in2 = "//orp-dc01/Users/ORP Operations/Fisheries Program/E-Reporting/4.0 Pilot projects/Pilot Projects/Roving Monitor Pilot/Documentation/Resources for RMs/RM scheduling and priority lists/" dir.in3 = "//orp-dc01/Users/ORP Operations/Fisheries Program/E-Reporting/4.0 Pilot projects/Data/temp/" dir.out = "//orp-dc01/Users/ORP Operations/Fisheries Program/E-Reporting/4.0 Pilot projects/Data/FACTSdata/output/final_report_2019/" # -------------------- # # -------------------- # # load data # -------------------- # # regions source("U:/ORP Operations/Fisheries Program/E-Reporting/4.0 Pilot projects/Pilot Projects/Roving Monitor Pilot/code/importRegions.R") # load fishing data RM <- read_excel(paste(dir.in,"FACTSMD-684.xlsx", sep=""), sheet = 1) WM <- read_excel(paste(dir.in,"FACTSMD-684.xlsx", sep=""), sheet = 2) # rename names(RM) = c("TripID","DNRID","MonitorReportNum","AssignedMonitor", "ReportedBy","SpeciesGrade","Quantity","Unit", "Count", "Comments","Result","Scheduled","CrewCount","Time") names(WM) = c("TripID","DNRID","WatermenName","License","Date", "SH","EH","SHSubmittedTime","EHSubmittedTime","SHLandingTime", "EHLandingTime","SHAddress","SHZip","EHAddress","EHZip", "CrewCount","Fishery","Gear","SpeciesGrade","Quantity", "Unit", "Count") # take spaces out of names #names(WM) = gsub(" ", "", names(WM), fixed = TRUE) # needs to be changed in the data RM = RM %>% mutate(AssignedMonitor = replace(AssignedMonitor, TripID %in% c(565820, 569269, 569582, 574640, 578963, 569640, 569665, 579730, 566638, 584714, 584748, 584813, 588244), "Becky Rusteberg K"), AssignedMonitor = replace(AssignedMonitor, TripID %in% c(582379, 582924, 583278, 585968), "Steve Harris Womack")) # correct data RM$Quantity[RM$TripID %in% 596007 & RM$SpeciesGrade %in% "FEMALES"] = 16 RM$Quantity[RM$TripID %in% 596007 & RM$SpeciesGrade %in% "MIXED MALES"] = 2 # likely an error but same on the paper report so leaving as is #RM$Quantity[RM$TripID %in% 606012 & RM$SpeciesGrade %in% "PEELERS"] = 20 #RM$Quantity[RM$TripID %in% 606012 & RM$SpeciesGrade %in% "SOFT SHELL"] = 2 # -------------------- # # -------------------- # # manipulate data # -------------------- # # join fishery and name to RM based on trip ID RM = left_join(RM, dplyr::select(WM, TripID, Fishery, WatermenName, Date) %>% distinct, by = "TripID") # add regions WM = left_join(WM, mutate(zip_region_list, Zip = as.numeric(Zip)), by = c("EHZip" = "Zip")) %>% mutate(region = replace(region, is.na(region), "undefined")) RM = left_join(RM, dplyr::select(WM, TripID, region) %>% distinct, by = "TripID") # attr(WM$Date, "tzone") <- "EST" # attr(RM$Date, "tzone") <- "EST" RM = mutate(RM, Date = as.Date(as.character(Date), format = "%Y-%m-%d")) WM = mutate(WM, Date = as.Date(as.character(Date), format = "%Y-%m-%d")) WM = WM %>% filter(Date <= "2019-12-15") # -------------------- # # create table # subset for Jul1 - Dec 15 WM = WM %>% filter(Date >= "2019-7-1") RM = RM %>% filter(Date >= "2019-7-1") tripSummary = as.data.frame(matrix(data = NA, ncol=7, nrow=7)) names(tripSummary) = c("Regions","AvailTrips","AttemptedTrips","SuccessfulTrips","AvailWM","AttemptedWM","SuccessfulWM") tripSummary$Regions = c("1","2","3","4","5","6","Total") tripSummary[tripSummary$Regions %in% "Total",2:7] = c(prettyNum(length(unique(WM$TripID)), big.mark = ","), paste(formatC(length(unique(RM$TripID))/length(unique(WM$TripID))*100, digits = 3), "% (n = ", length(unique(RM$TripID)), ")", sep=""), paste(formatC((length(unique(RM$TripID[RM$Result %in% c("MONITORED (on paper)","MONITORED")]))/length(unique(WM$TripID)))*100, digits=3), "% (n = ", length(unique(RM$TripID[RM$Result %in% c("MONITORED (on paper)","MONITORED")])), ")", sep=""), length(unique(WM$WatermenName)), paste(formatC((length(unique(RM$DNRID))/length(unique(WM$DNRID)))*100, digits=4), "% (n = ", length(unique(RM$DNRID)), ")", sep=""), paste(formatC((length(unique(RM$DNRID[RM$Result %in% c("MONITORED (on paper)","MONITORED")]))/length(unique(WM$DNRID)))*100, digits=4), "% (n = ",length(unique(RM$DNRID[RM$Result %in% c("MONITORED (on paper)","MONITORED")])), ")", sep="")) for(n in c(1:6)){ tripSummary$AvailTrips[n] = prettyNum(length(unique(WM$TripID[WM$region %in% n])), big.mark = ",") tripSummary$AttemptedTrips[n] = paste(formatC(length(unique(RM$TripID[RM$region %in% n]))/length(unique(WM$TripID[WM$region %in% n]))*100, digits = 3), "% (n = ", length(unique(RM$TripID[RM$region %in% n])), ")", sep="") tripSummary$SuccessfulTrips[n] = paste(formatC((length(unique(RM$TripID[RM$Result %in% c("MONITORED (on paper)","MONITORED") &RM$region %in% n]))/length(unique(WM$TripID[WM$region %in% n])))*100, digits=3), "% (n = ", length(unique(RM$TripID[RM$Result %in% c("MONITORED (on paper)","MONITORED") &RM$region %in% n])), ")", sep="") tripSummary$AvailWM[n] = length(unique(WM$DNRID[WM$region %in% n])) tripSummary$AttemptedWM[n] = paste(formatC((length(unique(RM$DNRID[RM$region %in% n]))/length(unique(WM$DNRID[WM$region %in% n])))*100, digits=4), "% (n = ", length(unique(RM$DNRID[RM$region %in% n])), ")", sep="") tripSummary$SuccessfulWM[n] = paste(formatC((length(unique(RM$DNRID[RM$Result %in% c("MONITORED (on paper)","MONITORED") & RM$region %in% n]))/length(unique(WM$DNRID[WM$region %in% n])))*100, digits=4), "% (n = ",length(unique(RM$DNRID[RM$Result %in% c("MONITORED (on paper)","MONITORED") & RM$region %in% n])), ")", sep="") } rm(n) xTable = htmlTable(tripSummary, rnames = FALSE, caption="Table 1. Trip Summary for Roving Monitors July 1 to December 15, 2019", header = c("Region", "Total Available Trips", "Attempted Trips Monitored", "Successful Trips Monitored", "Number of Available Watermen", "Number of Individual Watermen Attempted to be Monitored", "Number of Individual Watermen Successfully Monitored"), n.rgroup = c(6,1), align = "lc", align.header = "lccc", css.cell = rbind(rep("font-size: 1.1em; padding-right: 0.6em", times=7), matrix("font-size: 1.1em; padding-right: 0.6em", ncol=7, nrow=7)), css.table = "margin-top: 1em; margin-bottom: 1em; table-layout: fixed; width: 1000px", total = "tspanner", css.total = c("border-top: 1px solid grey; font-weight: 900"), n.tspanner = c(nrow(tripSummary))) xTable write.table(xTable, file=paste(dir.out, "Table1_for_Carrie.html",sep=""), quote = FALSE, col.names = FALSE, row.names = FALSE) # -------------- # # -------------- # # FF # -------------- # WM_FF = WM %>% filter(Fishery %in% "Finfish") RM_FF = RM %>% filter(Fishery %in% "Finfish") tripSummary = as.data.frame(matrix(data = NA, ncol=7, nrow=7)) names(tripSummary) = c("Regions","AvailTrips","AttemptedTrips","SuccessfulTrips","AvailWM","AttemptedWM","SuccessfulWM") tripSummary$Regions = c("1","2","3","4","5","6","Total") tripSummary[tripSummary$Regions %in% "Total",2:7] = c(prettyNum(length(unique(WM_FF$TripID)), big.mark = ","), paste(formatC(length(unique(RM_FF$TripID))/length(unique(WM_FF$TripID))*100, digits = 3), "% (n = ", length(unique(RM_FF$TripID)), ")", sep=""), paste(formatC((length(unique(RM_FF$TripID[RM_FF$Result %in% c("MONITORED (on paper)","MONITORED")]))/length(unique(WM_FF$TripID)))*100, digits=3), "% (n = ", length(unique(RM_FF$TripID[RM_FF$Result %in% c("MONITORED (on paper)","MONITORED")])), ")", sep=""), length(unique(WM_FF$DNRID)), paste(formatC((length(unique(RM_FF$DNRID))/length(unique(WM_FF$DNRID)))*100, digits=4), "% (n = ", length(unique(RM_FF$DNRID)), ")", sep=""), paste(formatC((length(unique(RM_FF$DNRID[RM_FF$Result %in% c("MONITORED (on paper)","MONITORED")]))/length(unique(WM_FF$DNRID)))*100, digits=4), "% (n = ",length(unique(RM_FF$DNRID[RM_FF$Result %in% c("MONITORED (on paper)","MONITORED")])), ")", sep="")) for(n in c(1:6)){ tripSummary$AvailTrips[n] = prettyNum(length(unique(WM_FF$TripID[WM_FF$region %in% n])), big.mark = ",") tripSummary$AttemptedTrips[n] = paste(formatC(length(unique(RM_FF$TripID[RM_FF$region %in% n]))/length(unique(WM_FF$TripID[WM_FF$region %in% n]))*100, digits = 3), "% (n = ", length(unique(RM_FF$TripID[RM_FF$region %in% n])), ")", sep="") tripSummary$SuccessfulTrips[n] = paste(formatC((length(unique(RM_FF$TripID[RM_FF$Result %in% c("MONITORED (on paper)","MONITORED") & RM_FF$region %in% n]))/length(unique(WM_FF$TripID[WM_FF$region %in% n])))*100, digits=3), "% (n = ", length(unique(RM_FF$TripID[RM_FF$Result %in% c("MONITORED (on paper)","MONITORED") & RM_FF$region %in% n])), ")", sep="") tripSummary$AvailWM[n] = length(unique(WM_FF$DNRID[WM_FF$region %in% n])) tripSummary$AttemptedWM[n] = paste(formatC((length(unique(RM_FF$DNRID[RM_FF$region %in% n]))/length(unique(WM_FF$DNRID[WM_FF$region %in% n])))*100, digits=4), "% (n = ", length(unique(RM_FF$DNRID[RM_FF$region %in% n])), ")", sep="") tripSummary$SuccessfulWM[n] = paste(formatC((length(unique(RM_FF$DNRID[RM_FF$Result %in% c("MONITORED (on paper)","MONITORED") & RM_FF$region %in% n]))/length(unique(WM_FF$DNRID[WM_FF$region %in% n])))*100, digits=4), "% (n = ",length(unique(RM_FF$DNRID[RM_FF$Result %in% c("MONITORED (on paper)","MONITORED") & RM_FF$region %in% n])), ")", sep="") } rm(n) xTable = htmlTable(tripSummary, rnames = FALSE, caption="Table 2. Finfish Trip Summary for Roving Monitors July 1 to December 15, 2019", header = c("Region", "Total Available Trips", "Attempted Trips Monitored", "Successful Trips Monitored", "Number of Available Watermen", "Number of Individual Watermen Attempted to be Monitored", "Number of Individual Watermen Successfully Monitored"), n.rgroup = c(6,1), align = "lc", align.header = "lccc", css.cell = rbind(rep("font-size: 1.1em; padding-right: 0.6em", times=7), matrix("font-size: 1.1em; padding-right: 0.6em", ncol=7, nrow=7)), css.table = "margin-top: 1em; margin-bottom: 1em; table-layout: fixed; width: 1000px", total = "tspanner", css.total = c("border-top: 1px solid grey; font-weight: 900"), n.tspanner = c(nrow(tripSummary))) xTable write.table(xTable, file=paste(dir.out, "Table1FF_for_Carrie.html",sep=""), quote = FALSE, col.names = FALSE, row.names = FALSE) # # -------------- # # -------------- # # BC # -------------- # WM_BC = WM %>% filter(Fishery %in% "Blue Crab") RM_BC = RM %>% filter(Fishery %in% "Blue Crab") tripSummary = as.data.frame(matrix(data = NA, ncol=7, nrow=7)) names(tripSummary) = c("Regions","AvailTrips","AttemptedTrips","SuccessfulTrips","AvailWM","AttemptedWM","SuccessfulWM") tripSummary$Regions = c("1","2","3","4","5","6","Total") tripSummary[tripSummary$Regions %in% "Total",2:7] = c(prettyNum(length(unique(WM_BC$TripID)), big.mark = ","), paste(formatC(length(unique(RM_BC$TripID))/length(unique(WM_BC$TripID))*100, digits = 3), "% (n = ", length(unique(RM_BC$TripID)), ")", sep=""), paste(formatC((length(unique(RM_BC$TripID[RM_BC$Result %in% c("MONITORED (on paper)","MONITORED")]))/length(unique(WM_BC$TripID)))*100, digits=3), "% (n = ", length(unique(RM_BC$TripID[RM_BC$Result %in% c("MONITORED (on paper)","MONITORED")])), ")", sep=""), length(unique(WM_BC$DNRID)), paste(formatC((length(unique(RM_BC$DNRID))/length(unique(WM_BC$DNRID)))*100, digits=4), "% (n = ", length(unique(RM_BC$DNRID)), ")", sep=""), paste(formatC((length(unique(RM_BC$DNRID[RM_BC$Result %in% c("MONITORED (on paper)","MONITORED")]))/length(unique(WM_BC$DNRID)))*100, digits=4), "% (n = ",length(unique(RM_BC$DNRID[RM_BC$Result %in% c("MONITORED (on paper)","MONITORED")])), ")", sep="")) for(n in c(1:6)){ tripSummary$AvailTrips[n] = prettyNum(length(unique(WM_BC$TripID[WM_BC$region %in% n])), big.mark = ",") tripSummary$AttemptedTrips[n] = paste(formatC(length(unique(RM_BC$TripID[RM_BC$region %in% n]))/length(unique(WM_BC$TripID[WM_BC$region %in% n]))*100, digits = 3), "% (n = ", length(unique(RM_BC$TripID[RM_BC$region %in% n])), ")", sep="") tripSummary$SuccessfulTrips[n] = paste(formatC((length(unique(RM_BC$TripID[RM_BC$Result %in% c("MONITORED (on paper)","MONITORED") & RM_BC$region %in% n]))/length(unique(WM_BC$TripID[WM_BC$region %in% n])))*100, digits=3), "% (n = ", length(unique(RM_BC$TripID[RM_BC$Result %in% c("MONITORED (on paper)","MONITORED") & RM_BC$region %in% n])), ")", sep="") tripSummary$AvailWM[n] = length(unique(WM_BC$DNRID[WM_BC$region %in% n])) tripSummary$AttemptedWM[n] = paste(formatC((length(unique(RM_BC$DNRID[RM_BC$region %in% n]))/length(unique(WM_BC$DNRID[WM_BC$region %in% n])))*100, digits=4), "% (n = ", length(unique(RM_BC$DNRID[RM_BC$region %in% n])), ")", sep="") tripSummary$SuccessfulWM[n] = paste(formatC((length(unique(RM_BC$DNRID[RM_BC$Result %in% c("MONITORED (on paper)","MONITORED") & RM_BC$region %in% n]))/length(unique(WM_BC$DNRID[WM_BC$region %in% n])))*100, digits=4), "% (n = ",length(unique(RM_BC$DNRID[RM_BC$Result %in% c("MONITORED (on paper)","MONITORED") & RM_BC$region %in% n])), ")", sep="") } rm(n) xTable = htmlTable(tripSummary, rnames = FALSE, caption="Table 3. Blue Crab Trip Summary for Roving Monitors July 1 to December 15, 2019", header = c("Region", "Total Available Trips", "Attempted Trips Monitored", "Successful Trips Monitored", "Number of Available Watermen", "Number of Individual Watermen Attempted to be Monitored", "Number of Individual Watermen Successfully Monitored"), n.rgroup = c(6,1), align = "lc", align.header = "lccc", css.cell = rbind(rep("font-size: 1.1em; padding-right: 0.6em", times=7), matrix("font-size: 1.1em; padding-right: 0.6em", ncol=7, nrow=7)), css.table = "margin-top: 1em; margin-bottom: 1em; table-layout: fixed; width: 1000px", total = "tspanner", css.total = c("border-top: 1px solid grey; font-weight: 900"), n.tspanner = c(nrow(tripSummary))) xTable write.table(xTable, file=paste(dir.out, "Table1BC_for_Carrie.html",sep=""), quote = FALSE, col.names = FALSE, row.names = FALSE) # # -------------- #
dd645c3e2d8984cd56905b4c28116c4feaa90c66
29585dff702209dd446c0ab52ceea046c58e384e
/msos/R/bothsidesmodel.chisquare.R
70acc256c692dfb2cad60fd7b9f6716cad68d38b
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
425
r
bothsidesmodel.chisquare.R
bothsidesmodel.chisquare <- function(x,y,z,pattern0,patternA=matrix(1,nrow=ncol(x),ncol=ncol(z))) { bsm <- bothsidesmodel(x,y,z,patternA) which <- patternA*(1-pattern0) which <- c(t(which)) == 1 theta <- c(t(bsm$Beta))[which] covtheta <- bsm$Covbeta[which,which] chisq <- theta%*%solve(covtheta,theta) df <- sum(which) list(Theta=theta,Covtheta = covtheta,df = df, Chisq=chisq,pvalue=1-pchisq(chisq,df)) }
944a066cf4b78e188dc0de8242ed9bef4ff550e1
c16e3a0b0fd3b017242dcf1f16078b528d227abe
/man/deleteGP.Rd
4ecacd4995f8d4a543bc5586a6b3cbbc5e409dc5
[]
no_license
cran/laGP
906f6d217c7adbcaf2a06dada9cf633f9b28c580
e2ad6bdf6bf9864571a4cce063be3fe10b842848
refs/heads/master
2023-03-19T09:20:02.211617
2023-03-14T07:30:06
2023-03-14T07:30:06
17,696,958
8
1
null
null
null
null
UTF-8
R
false
false
1,073
rd
deleteGP.Rd
\name{deleteGP} \alias{deleteGP} \alias{deleteGPs} \alias{deleteGPsep} \alias{deleteGPseps} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Delete C-side Gaussian Process Objects } \description{ Frees memory allocated by a particular C-side Gaussian process object, or all GP objects currently allocated } \usage{ deleteGP(gpi) deleteGPsep(gpsepi) deleteGPs() deleteGPseps() } %- maybe also 'usage' for other objects documented here. \arguments{ \item{gpi}{ a scalar positive integer specifying an allocated isotropic GP object } \item{gpsepi}{ similar to \code{gpi} but indicating a separable GP object} } \details{ Any function calling \code{\link{newGP}} or \code{\link{newGPsep}} will require destruction via these functions or there will be a memory leak } \value{ Nothing is returned } \author{ Robert B. Gramacy \email{rbg@vt.edu} } \seealso{ \code{vignette("laGP")}, \code{\link{newGP}}, \code{\link{predGP}}, \code{\link{mleGP}} } \examples{ ## see examples for newGP, predGP, or mleGP } \keyword{ utilities }
3dea36a3f38187b646458dd764d437ecf6c458ba
8d29c9f8faa03eb55764ac9d2c60499c6b16b48c
/man/getReadClass.Rd
43f0e5566f6f3adcb22b4894a7675f219482b18e
[]
no_license
jsemple19/EMclassifieR
fd2671a5be8ac97cc67e20c94afe97223f44b0ba
a3626d02d5c73046073f04dbdba4b246a7278bcc
refs/heads/master
2022-08-10T06:27:20.721490
2022-08-08T11:34:43
2022-08-08T11:34:43
198,430,374
0
0
null
null
null
null
UTF-8
R
false
true
683
rd
getReadClass.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EMbasic.R \name{getReadClass} \alias{getReadClass} \title{Extract class info from dataOrderedByClass} \usage{ getReadClass(dataOrderedByClass, readNames) } \arguments{ \item{dataOrderedByClass}{A matrix of methylation frequency or bin counts for indivudal reads at particular positions where the reads have been sorted by class and the row names contain the read name and the class joined together: readName__classX} \item{readNames}{A vector of read names by which to order the classes} } \value{ Classification of reads ordered by readNames } \description{ Extract class info from dataOrderedByClass }
f734aa9093ef6a403ff0f45c3df8e164e284fe54
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/hypervolume/examples/hypervolume_project.Rd.R
1ff14a41cd56855eba2b5812daefef6fc73cbbbd
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,914
r
hypervolume_project.Rd.R
library(hypervolume) ### Name: hypervolume_project ### Title: Geographical projection of hypervolume for species distribution ### modeling, using the hypervolume as the environmental niche model. ### Aliases: hypervolume_project ### ** Examples # example does not run to meet CRAN runtime guidelines - set TRUE to run hypervolume_project_demo = FALSE if (hypervolume_project_demo==TRUE) { # load in lat/lon data data('quercus') data_alba = subset(quercus, Species=="Quercus alba")[,c("Longitude","Latitude")] data_alba = data_alba[sample(1:nrow(data_alba),500),] # get worldclim data from internet require(maps) require(raster) climatelayers = getData('worldclim', var='bio', res=10, path=tempdir()) # z-transform climate layers to make axes comparable climatelayers_ss = climatelayers[[c(1,12)]] for (i in 1:nlayers(climatelayers_ss)) { climatelayers_ss[[i]] <- (climatelayers_ss[[i]] - cellStats(climatelayers_ss[[i]], 'mean')) / cellStats(climatelayers_ss[[i]], 'sd') } climatelayers_ss = crop(climatelayers_ss, extent(-150,-50,15,60)) # extract transformed climate values climate_alba = extract(climatelayers_ss, data_alba[1:300,]) # compute hypervolume hv_alba <- hypervolume_gaussian(climate_alba) # do geographical projection raster_alba_projected_accurate <- hypervolume_project(hv_alba, rasters=climatelayers_ss) raster_alba_projected_fast = hypervolume_project(hv_alba, rasters=climatelayers_ss, type='inclusion', fast.or.accurate='fast') # draw map of suitability scores plot(raster_alba_projected_accurate,xlim=c(-100,-60),ylim=c(25,55)) map('usa',add=TRUE) plot(raster_alba_projected_fast,xlim=c(-100,-60),ylim=c(25,55)) map('usa',add=TRUE) }
d5bc3d3af14cb681c220dad8e536aca0856e4144
7a3792da66c63aa81c671469656ac19fe8f9451c
/shiny/nwscode_to_rivgroup.R
5295f284865194b6726be3fb0cceaaf6c46d4043
[]
no_license
dbo99/19nohrsc
82f3bd62e7c71c1a776dee3f4ea15ec52b111fef
cdf8df319f5027bd17532613a87527c257cb958e
refs/heads/master
2023-03-07T16:41:36.167709
2020-01-08T15:59:10
2020-01-08T15:59:10
177,946,572
0
0
null
null
null
null
UTF-8
R
false
false
864
r
nwscode_to_rivgroup.R
spdf_entbasins <- readOGR("basins.kml", "cnrfc_09122018_basins_thin") #spdf_basinzones <- readOGR(".","cnrfc_zones_wgs84aux") #pnts1 <- readOGR("riverFcast.kml", "![CDATA[River Guidance <br><a href="http://www.cnrfc.noaa.gov/rfc_guidance.php">CNRFC River Forecast Web Page</a>]]") # Convert spatialpolydf to an sf object sf_entbasins <- spdf_entbasins %>% st_as_sf() %>% mutate(Description = as.character(Description), rivgroupkml = gsub(".*<tr> <td>Group</td> <td>(.+)</td> </tr> <tr bgcolor=\"#D4E4F3.*", "\\1", Description ), desckml = gsub(".*Basin</td> <td>(.+)</td> </tr> </table> </td> </tr> </table> </body> </html>", "\\1", Description ), nwscodekml = Name) %>% select(-Description, -Name) #%>% nwscode <- sf_entbasins$nwscodekml rivgroup <- sf_entbasins$rivgroupkml nwscode_to_rivgroup <- data.frame(nwscode, rivgroup)
0024965464e464816258f24babc0cb4dd428028e
4d853f62cf346a624789859d3e81e211d422700a
/UNIGE_ovitrap/transform_ovitrap.R
3d6554f56dca790b40c5731c2b351ceea8ec6868
[]
no_license
rodekruis/epidemic-risk-assessment
fda6a3f502943a46ee999506f910ed0369b91342
ca9f5048d9d4088bd0e9acd3d2c77b07194deaf5
refs/heads/master
2020-08-02T06:51:41.256212
2020-07-20T12:00:11
2020-07-20T12:00:11
211,268,464
5
0
null
null
null
null
UTF-8
R
false
false
878
r
transform_ovitrap.R
#R script to transform the ovitrap data in bi-weekly averages #In case of any questions you can send an e-mail to fleur.hierink@unige.ch #libraries library(dplyr) library(lubridate) #fetch data from personal directory #load in ovitrap data ovitrap <- read.csv("/Users/...../....csv") #convert the date column into a date ovitrap$date <- as.Date(ovitrap$date) #create bi-weekly averages ovitrap_week <- ovitrap %>% mutate(two_weeks = round_date(date, "14 days")) %>% group_by(id, longitude, latitude, two_weeks) %>% summarise(average_ovi = mean(value)) #2014 is data richest year, subset 2014 and continue with this ovitrap_2014 <- ovitrap_week %>% mutate(two_weeks = as.Date(two_weeks)) %>% filter(two_weeks >= "2014-01-01" & two_weeks <= "2014-12-31") #save data as csv write.csv(ovitrap_week, "/Users/.../...csv") write.csv(ovitrap_2014, "/Users/.../...csv")
d792cfa1e6ccd0a08f622649d732b1be08d4ae51
4ea9492221d48e89eb9c29b5fa7cc5610ad4138e
/man/make_filename.Rd
09d76c8e53005c2732432cc66c02e4c59f888ff8
[]
no_license
krinard/MSDRFars
0329b35086238d00f8c8e3a2226a616719d9b535
5ccddb857c5dee6eea54a8363c5260cdfae53234
refs/heads/master
2023-02-12T02:20:12.571823
2021-01-11T15:14:36
2021-01-11T15:14:36
326,797,515
0
0
null
null
null
null
UTF-8
R
false
true
542
rd
make_filename.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fars.R \name{make_filename} \alias{make_filename} \title{Create a string to use as a filename for bz2 compressed csv file for accident data in the Fatality Analysis Reporting System.} \usage{ make_filename(year) } \arguments{ \item{year}{to use a suffix in the filename} } \value{ String in format accident_<year>/csv/bz2 } \description{ This function takes the year and returns a string in the format accident_<year>.csv.bz2 } \examples{ make_filename("2013") }
8bcd0e05799e3a68fd85f3f922f51f8aa9364b97
c144e367b414e28998ae6c07c7e86048a940a352
/plot3.R
9475dd557c4c3a873990a1f99cafadad6ff2be70
[]
no_license
attuquayejames/ExData_Plotting1
b536c6eec9535c7cc7ec208e7ac436b8af69cbbf
97dac6a27b015ff350194c992de565c725a34b8f
refs/heads/master
2021-01-18T01:52:24.059937
2015-07-09T10:59:40
2015-07-09T10:59:40
38,813,564
0
0
null
2015-07-09T10:16:05
2015-07-09T10:16:05
null
UTF-8
R
false
false
1,335
r
plot3.R
# read the data mydata <- read.table("household_power_consumption.txt", header=T, sep=";") # convert the Date variable to Date classes in R using the as.Date() function mydata$Date <- as.Date(mydata$Date, format="%d/%m/%Y") # subset the data mydata <- mydata[(mydata$Date=="2007-02-01" | mydata$Date=="2007-02-02"), ] # convert the Global_active_power variable to numeric class in R mydata$Global_active_power <- as.numeric(as.character(mydata$Global_active_power)) # transform timestamps to weekdays mydata <- transform(mydata, weekdays=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S") # transform Sub_metering variables to numeric variables mydata$Sub_metering_1 <- as.numeric(as.character(mydata$Sub_metering_1)) mydata$Sub_metering_2 <- as.numeric(as.character(mydata$Sub_metering_2)) mydata$Sub_metering_3 <- as.numeric(as.character(mydata$Sub_metering_3)) png(filename = "plot3.png", width = 480, height = 480) # generate plot3 plot(mydata$weekdays, mydata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(mydata$weekdays, mydata$Sub_metering_2, type="l", col="red") lines(mydata$weekdays, mydata$Sub_metering_3, type="l", col="blue") # add legend to the plot legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1)) dev.off()
b0b54a23eb97c3141a227450432e30d579b422aa
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/stratEst/man/is.stratEst.check.Rd
71b207b9467ca9b9ba3744ee920b4accdae24230
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
457
rd
is.stratEst.check.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is_stratEst_check.R \name{is.stratEst.check} \alias{is.stratEst.check} \title{Class stratEst.check} \usage{ is.stratEst.check(x) } \arguments{ \item{x}{object to be tested.} } \description{ Checks if an object is of class \code{stratEst.check}. } \details{ Objects of class \code{stratEst.check} are returned by the function \code{stratEst.check()} of package \code{stratEst}. }
bc42f2673d70c62e2650ba04292af3368d0b146d
3f02cb4dfd2e35fb7346830341e29df511f0137e
/man/is_valid_day.Rd
ad0446abeba470830103671d587eb37fec1f237e
[]
no_license
rajkboddu/admiral
ce08cb2698b62ca45ba6c0e8ed2ac5095f41b932
ffbf10d7ffdda1c997f431d4f019c072217188b1
refs/heads/master
2023-08-11T11:14:44.016519
2021-09-08T10:24:45
2021-09-08T10:24:45
null
0
0
null
null
null
null
UTF-8
R
false
true
494
rd
is_valid_day.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/assertions.R \name{is_valid_day} \alias{is_valid_day} \title{Check Validity of the Day Portion in the Date Input} \usage{ is_valid_day(arg) } \arguments{ \item{arg}{The argument to check} } \value{ \code{TRUE} if the argument is a day input, \code{FALSE} otherwise } \description{ Days are expected to range from 1 to 31 } \examples{ assertthat::assert_that(is_valid_day(20)) } \author{ Samia Kabi } \keyword{check}
92e5559d89507133e3bab2625420dec3c82f2a14
674e66b177dc35e12831a0923419d9d9aa336a66
/examples_Control_Structures.R
9cab6cee2e706a06ac16dd022fc6dab7846d6abf
[]
no_license
oreclios/datasciencecoursera
cd7d1b049e7d142e509e63eca7ae0edb6f51f478
87e6d7c757c9f194c1623fd2e578e18d9d3d6c4a
refs/heads/master
2021-01-13T07:15:00.474314
2016-10-25T21:13:30
2016-10-25T21:13:30
71,500,940
0
0
null
null
null
null
UTF-8
R
false
false
1,156
r
examples_Control_Structures.R
##Examples in R: Control Structures ##if - else: if(x > 3){ y <- 10 }else if (x < 3){ y <- 5 }else{ y <- 0 } ##Another way: y <- if(x > 3){ 10 }else if (x < 3){ 5 }else{ 0 } ################################################## ##for loops: for(i in 1:10){ print(i) } ##Other examples: x <- c("a", "b", "c", "d") for(i in 1:4){ print(x[i]) } for(i in seq_along(x)){ print(x[i]) } for(letter in x){ print(letter) } for(i in 1:4)print(x[i]) ##Nested Loops: x <- matrix(1:6, 2, 3) for(i in seq_len(nrow(x))){ for(j in seq_len(ncol(x))){ print(x[i, j]) } } ################################################################################ ##While Loops: count <- 0 while(count < 10){ print(count) count <- count +1 } ##Other example: z <- 5 while(z >= 3 && z <= 10){ print(z) coin <- rbinom(1, 1, 0.5) if(coin == 1){ z <- z+1 }else{ z <- z-1 } } ############################################################################################# ##Repeat Loops: x0 <- 1 tol <- 1e8 repeat{ x1 <- computeEstimate() if(abs(x1 - x0) < tol){ break }else{ x0 <- x1 } }
0609cfa6d4c00c3e84a37a1e6418e33abff6c4bd
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/HAC/examples/plot.hac.Rd.R
58668fd4ce0c3a630c583b185001bbf2b0650c4d
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
382
r
plot.hac.Rd.R
library(HAC) ### Name: plot.hac ### Title: Plot of a HAC ### Aliases: plot.hac ### ** Examples # a hac object is created tree = list(list("X1", "X5", 3), list("X2", "X3", "X4", 4), 2) model = hac(type = 1, tree = tree) plot(model) # the same procedure works for an estimated object sample = rHAC(2000, model) est.obj = estimate.copula(sample, epsilon = 0.2) plot(est.obj)
01781c9ead9d3944dbcbb2eda8ddecaea2934e00
247168dd727c19cef2ce885476d3e4102d2ca7de
/man/AuthenticationManager-class.Rd
af13c24d1aa785458c396f08a097eb5672fc1b94
[ "Apache-2.0" ]
permissive
DataONEorg/rdataone
cdb0a3a7b8c3f66ce5b2af41505d89d2201cce90
97ef173bce6e4cb3bf09698324185964299a8df1
refs/heads/main
2022-06-15T08:31:18.102298
2022-06-09T21:07:26
2022-06-09T21:07:26
14,430,641
27
19
null
2022-06-01T14:48:02
2013-11-15T17:27:47
R
UTF-8
R
false
true
3,241
rd
AuthenticationManager-class.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AuthenticationManager.R \docType{class} \name{AuthenticationManager-class} \alias{AuthenticationManager-class} \title{Manage DataONE authentication.} \description{ AuthenticationManager provides mechanisms to validate DataONE authentication, when either a DataONE authentication token or X.509 Certificate is used. } \details{ Understanding how your identity is managed is important for working with DataONE, especially to avoid unexpected results. For example, depending your authorization status, searches may return only public records, or the full set of public and private records. Object and package retrievals might fail if some or all of the objects being retrieved are private. Creating or updating objects on DataONE nodes and reserving identifiers might fail if your authorization credentials are missing or expired. DataONE version 1.0 identifies you using CILogon-provided x509 certificates. DataONE has partnered with CILogon to provide a widely-accessible certificate issuing mechanism that allows DataONE users to use existing trusted institutional and public accounts. DataONE version 2.0 provides an addition authentication mechanism known as authentication tokens. For information about tokens and instructions for generating a token for use with the dataone R package, view the overview document by entering the command: \code{'vignette("dataone-overview")'}. DataONE authentication tokens can be obtained by signing in to your DataONE account at https://search.dataone.org. CILogon recognizes many identity providers, including many universities as well as Google, so most times users new to DataONE can get certificates using one of their existing accounts. For more information about the CILogon service, see \url{https://cilogon.org/?skin=DataONE} . } \section{Slots}{ \describe{ \item{\code{obscured}}{Value of type \code{"character"} Is authentication disabled (obscured)?} }} \section{Methods}{ \itemize{ \item{\code{\link{AuthenticationManager}}}{: Create an AuthenticationManager object.} \item{\code{\link{isAuthValid}}}{: Verify authentication for a member node.} \item{\code{\link{getToken}}}{: Get the value of the DataONE Authentication Token, if one exists.} \item{\code{\link{getCert}}}{: Get the DataONE X.509 Certificate location.} \item{\code{\link{getAuthMethod}}}{: Get the current valid authentication mechanism.} \item{\code{\link{getAuthSubject}}}{: Get the authentication subject.} \item{\code{\link{getAuthExpires}}}{: Get the expiration date of the current authentication method.} \item{\code{\link{isAuthExpired}}}{: Check if the currently valid authentication method has reached the expiration time.} \item{\code{\link{obscureAuth}}}{: Temporarily disable DataONE authentication.} \item{\code{\link{restoreAuth}}}{: Restore authentication (after being disabled with \code{obscureAuth}).} \item{\code{\link{showAuth}}}{: Display all authentication information.} \item{\code{\link{getTokenInfo}}}{: Display all authentication token information.} \item{\code{\link{getCertInfo}}}{: Display all X.509 certificate information.} } } \seealso{ \code{\link{dataone}}{ package description.} }
b39a2227558415f576ad102f7ed5e9eaeec4321a
3048f86163f979ccd5f218f3d1a8007b3b2d5c08
/00-clean-data.R
a2017558fcb23587d8858097884bbeea2ab3adaf
[]
no_license
PedroHFreire/Desafio-Quantamental-UFFinance
3b5ff1a04df3266b758dd29d296dcdc4dee30a27
b4594f7a4c5f74717548cd88ee4a534e6710e6ae
refs/heads/master
2022-11-28T14:41:18.251884
2020-08-14T11:25:15
2020-08-14T11:25:15
273,997,592
0
0
null
null
null
null
UTF-8
R
false
false
1,217
r
00-clean-data.R
setwd("D:/Google Drive/Desafio Quantamental/GARCH Vol. forecast/Scripts") dados <- read.csv("cotacoes_ativos_inicio_2000.csv", sep = ";", header = FALSE, dec = ",", stringsAsFactors = FALSE) dados <- dados[, c(-3, -5)] colnames(dados) <- c("Data", "Preco", "Ativo") dados[1, "Data"] <- "2000-01-03 00:00:00.000" dados$Data <- sapply(dados$Data, FUN = substr, start = 1, stop = 10, USE.NAMES = FALSE) dados <- reshape(dados, idvar = "Data", timevar = "Ativo", direction = "wide") colnames(dados)[-1] <- sapply(colnames(dados)[-1], FUN = substr, start = 7, stop = 10000, USE.NAMES = FALSE) dados <- dados[order(dados$Data), ] dados$Data <- as.Date(dados$Data) library(xts) dados <- xts::xts(x = dados[, -1], order.by = dados$Data) dados <- dados["2004-12-31/"] save(dados, file = "dados.RData") # Fazendo double checks
b0c2c7e87ed7f96ed3d4f529ee7e3b348c3b65db
278c702f6192ffbf262a15a76fadb2b50e4886f3
/man/splityield.Rd
924a80935db9031a587e76fc1e0c52718dfa4851
[ "MIT" ]
permissive
nganbaohuynh/stat340
747f8d894d829190797d07dce9fdcd40ade02e17
1233e40400a363503e4375dc1f43c0c731959d55
refs/heads/master
2023-06-06T13:30:05.337264
2021-06-23T08:46:56
2021-06-23T08:46:56
375,981,140
0
0
NOASSERTION
2021-06-11T10:00:54
2021-06-11T10:00:54
null
UTF-8
R
false
true
484
rd
splityield.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{splityield} \alias{splityield} \title{yields data in R book, Ch 19.4, called \code{splityield} here} \format{ A data frame } \usage{ splityield } \description{ 72 observations, 5 variables } \examples{ library(nlme) data(splityield) model <- lme(yield ~ irrigation*density*fertilizer, random = ~1|block/irrigation/density, data = splityield) summary(model) } \keyword{datasets}
3be0066adefc39fb4e6ff1c6783eb9d60cca6d01
29585dff702209dd446c0ab52ceea046c58e384e
/plsgenomics/R/rpls.R
8162274eefa3e8b5b8e96726c38c54d1a8c48137
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
8,254
r
rpls.R
### rpls.R (2006-01) ### ### Ridge Partial Least square for binary data ### ### Copyright 2006-01 Sophie Lambert-Lacroix ### ### ### This file is part of the `plsgenomics' library for R and related languages. ### It is made available under the terms of the GNU General Public ### License, version 2, or at your option, any later version, ### incorporated herein by reference. ### ### This program is distributed in the hope that it will be ### useful, but WITHOUT ANY WARRANTY; without even the implied ### warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR ### PURPOSE. See the GNU General Public License for more ### details. ### ### You should have received a copy of the GNU General Public ### License along with this program; if not, write to the Free ### Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, ### MA 02111-1307, USA rpls <- function (Ytrain,Xtrain,Lambda,ncomp,Xtest=NULL,NbIterMax=50) { ## INPUT VARIABLES ######################### ## Xtrain : matrix ntrain x p ## train data matrix ## Ytrain : vector ntrain ## response variable {1,2}-valued vector ## Xtest : NULL or matrix ntest x p ## if no NULL Xtest is the test data matrix ## Lambda : real ## value for the regularization parameter Lambda ## NbIterMax : positive integer ## maximal number of iteration in the WIRRLS part ## ncomp : maximal number of PLS components ## 0 = Ridge ## OUTPUT VARIABLES ########################## ## hatY : matrix of size ntest x ncomp in such a way ## that the kieme column corresponds to the result ## for ncomp=k for ncomp !=0,1 ## Ytest : matrix ntest x 1 ## predicted label for ncomp ## Coefficients : vector p+1 x 1 ## regression coefficients w.r.t. the columns of [1 Xtest] ## DeletedCol : vector ## if some covariables have nul variance, DeletedCol gives the ## corresponding column number. Otherwise DeletedCol = NULL ## TEST ON INPUT VARIABLES ############################## #On Xtrain if ((is.matrix(Xtrain)==FALSE)||(is.numeric(Xtrain)==FALSE)) { stop("Message from rpls.R: Xtrain is not of valid type")} if (dim(Xtrain)[2]==1) { stop("Message from rpls.R: p=1 is not valid")} ntrain <- dim(Xtrain)[1] p <- dim(Xtrain)[2] #On Xtest if (is.null(Xtest)==FALSE) { if (is.vector(Xtest)==TRUE) {Xtest <- matrix(Xtest,nrow=1)} if ((is.matrix(Xtest)==FALSE)||(is.numeric(Xtest)==FALSE)) { stop("Message from rpls.R: Xtest is not of valid type")} if (dim(Xtrain)[2]!=dim(Xtest)[2]) { stop("Message from rpls.R: columns of Xtest and columns of Xtrain must be equal")} ntest <- dim(Xtest)[1] } #On Ytrain if ((is.vector(Ytrain)==FALSE)||(is.numeric(Ytrain)==FALSE)) { stop("Message from rpls.R: Ytrain is not of valid type")} if (length(Ytrain)!=ntrain) { stop("Message from rpls.R: the length of Ytrain is not equal to the Xtrain row number")} Ytrain <- Ytrain-1 if ((sum(floor(Ytrain)-Ytrain)!=0)||(sum(Ytrain<0)>0)){ stop("Message from rpls.R: Ytrain is not of valid type")} c <- max(Ytrain) if (c!=1) { stop("Message from rpls.R: Ytrain is not of valid type")} eff<-rep(0,2) for (i in 0:1) { eff[i+1]<-sum(Ytrain==i)} if (sum(eff==0)>0) { stop("Message from rpls.R: there are empty classes")} #On hyper parameters if ((is.numeric(Lambda)==FALSE)||(Lambda<0)){ stop("Message from rpls.R: Lambda is not of valid type")} if ((is.numeric(ncomp)==FALSE)||(round(ncomp)-ncomp!=0)||(ncomp<0)){ stop("Message from rpls.R: ncomp is not of valid type")} if ((is.numeric(NbIterMax)==FALSE)||(round(NbIterMax)-NbIterMax!=0)||(NbIterMax<1)){ stop("Message from rpls.R: NbIterMax is not of valid type")} #Some initializations r <- min(p,ntrain) DeletedCol <- NULL ## MOVE IN THE REDUCED SPACE ################################ # Standardize the Xtrain matrix Sigma2train <- apply(Xtrain,2,var)*(ntrain-1)/ntrain if (sum(Sigma2train==0)!=0){ if (sum(Sigma2train==0)>(p-2)){ stop("Message from rpls.R: the procedure stops because number of predictor variables with no null variance is less than 1.")} warning("There are covariables with nul variance") Xtrain <- Xtrain[,which(Sigma2train!=0)] Xtest <- Xtest[,which(Sigma2train!=0)] if (is.vector(Xtest)==TRUE) {Xtest <- matrix(Xtest,nrow=1)} index <- 1:p DeletedCol <- index[which(Sigma2train==0)] Sigma2train <-Sigma2train[which(Sigma2train!=0)] p <- dim(Xtrain)[2] r <- min(p,ntrain)} MeanXtrain <- apply(Xtrain,2,mean) sXtrain <- sweep(Xtrain,2,MeanXtrain,FUN="-") sXtrain <- sweep(sXtrain,2,sqrt(Sigma2train),FUN="/") #Compute the svd when necessary if (p>ntrain) {svd.sXtrain <- svd(t(sXtrain)) r<-length(svd.sXtrain$d[abs(svd.sXtrain$d)>10^(-13)]) V <- svd.sXtrain$u[,1:r] D <- diag(c(svd.sXtrain$d[1:r])) U <- svd.sXtrain$v[,1:r] sXtrain <- U%*%D rm(D) rm(U) rm(svd.sXtrain)} if (is.null(Xtest)==FALSE) { sXtest <- sweep(Xtest,2,MeanXtrain,FUN="-") sXtest <- sweep(sXtest,2,sqrt(Sigma2train),FUN="/") if (p>ntrain) {sXtest <- sXtest%*%V} Xtest <- 0} rm(Xtrain) ## RUN RPLS IN THE REDUCED SPACE ######################################## fit <- wirrls(Y=Ytrain,Z=cbind(rep(1,ntrain),sXtrain),Lambda=Lambda,NbrIterMax=NbIterMax,WKernel=diag(rep(1,ntrain))) #Check WIRRLS convergence if (fit$Cvg==0) stop("Message from rpls : WIRRLS did not converge; try another Lambda value") if (ncomp==0) #Ridge procedure {GAMMA <- fit$Coefficients} if (ncomp!=0) { #Compute Weight and pseudo variable #Pseudovar = Eta + W^-1 Psi Eta <- cbind(rep(1,ntrain),sXtrain)%*%fit$Coefficients mu<-1/(1+exp(-Eta)) diagW <- mu*(1-mu) W <- diag(c(diagW)) Psi <- Ytrain-mu ## Run PLS # W-Center the sXtrain and pseudo variable Sum=sum(diagW) # Weighted centering of Pseudo variable WMeanPseudoVar <- sum(W%*%Eta+Psi)/Sum WCtrPsi <- Psi WCtrEta <- Eta-c(WMeanPseudoVar) # Weighted centering of sXtrain WMeansXtrain <- t(diagW)%*%sXtrain/Sum WCtrsXtrain <- sXtrain-rep(1,ntrain)%*%WMeansXtrain #Initialize some variables PsiAux <- diag(c(rep(1,r))) E <- WCtrsXtrain f1 <- WCtrEta f2 <- WCtrPsi Omega <- matrix(0,r,ncomp) Scores <- matrix(0,ntrain,ncomp) TildePsi <- matrix(0,r,ncomp) Loadings <- matrix(0,r,ncomp) qcoeff <- vector(ncomp,mode="numeric") GAMMA <- matrix(0,nrow=(r+1),ncol=ncomp) #WPLS loop for (count in 1:ncomp) {Omega[,count]<-t(E)%*%(W%*%f1+f2) #Score vector t<-E%*%Omega[,count] c<-t(Omega[,count])%*%t(E)%*%W%*%E%*%Omega[,count] Scores[,count]<-t TildePsi[,count] <- PsiAux%*%Omega[,count] #Deflation of X Loadings[,count]<-t(t(t)%*%W%*%E)/c[1,1] E<-E-t%*%t(Loadings[,count]) #Deflation of f1 qcoeff[count]<-t(W%*%f1+f2)%*%t/c[1,1] f1 <- f1-qcoeff[count]*t #Recursve definition of RMatrix PsiAux<-PsiAux%*%(diag(c(rep(1,r)))-Omega[,count]%*%t(Loadings[,count])) #Express regression coefficients w.r.t. the columns of [1 sX] for ncomp=count if (count==1) {GAMMA[-1,count]<-TildePsi[,1:count]%*%t(c(qcoeff[1:count]))} if (count!=1) {GAMMA[-1,count]<-TildePsi[,1:count]%*%qcoeff[1:count]} GAMMA[1,count]=WMeanPseudoVar-WMeansXtrain%*%GAMMA[-1,count]}} ## CLASSIFICATION STEP ####################### if (is.null(Xtest)==FALSE) { hatY <- cbind(rep(1,ntest),sXtest)%*%GAMMA hatY <- (hatY>0)+0} ## CONCLUDE ############## ##Compute the coefficients w.r.t. [1 X] if (ncomp!=0) {GAMMA <- GAMMA[,ncomp]} Coefficients <- rep(0,p+1) if (p>ntrain) {Coefficients[-1] <- diag(c(1/sqrt(Sigma2train)))%*%V%*%GAMMA[-1]} if (p<=ntrain) {Coefficients[-1] <- diag(c(1/sqrt(Sigma2train)))%*%GAMMA[-1]} Coefficients[1] <- GAMMA[1]-MeanXtrain%*%Coefficients[-1] List <- list(Coefficients=Coefficients,Ytest=NULL,DeletedCol=DeletedCol) if (is.null(Xtest)==FALSE) { if ((ncomp==0)|(ncomp==1)) {List <- list(Coefficients=Coefficients,Ytest=(hatY[,1]+1),DeletedCol=DeletedCol)} if ((ncomp!=0)&(ncomp!=1)) {colnames(hatY)=1:ncomp rownames(hatY)=1:ntest List <- list(Coefficients=Coefficients,hatY=(hatY+1),Ytest=(hatY[,ncomp]+1),DeletedCol=DeletedCol)} } return(List) }
3c6e7c3791aedf6a507a26713d114858f17e03a7
9c96f302c63d7bdab317e573a2c4b66d4150979a
/Sim_Replication_TM_DR/TMA_GATES_vs_DO_GATES.R
379703a0ba988fded70e898ca80379945c31b103
[]
no_license
QuantLet/DR_GATES
480dd18652a827d91a9aead40a9e6c35115b51b4
54756cd146c15363ae5371995049919a79682d19
refs/heads/master
2020-09-12T13:32:07.183814
2019-12-18T01:58:32
2019-12-18T01:58:32
222,441,087
0
1
null
null
null
null
UTF-8
R
false
false
11,006
r
TMA_GATES_vs_DO_GATES.R
# create matrix of DGP settings settings <- as.data.frame(matrix(c(500,500,500,500,500,500,50,50,50,20,20,20), ncol=2)) settings$V3 <- c(F,F,F,F,T,T) settings$V4 <- c("constant","con_lin","con_non","binary","con_non","binary") settings$V5 <- c(0.5,NA,NA,NA,NA,NA) S = 10 M <- 50 ntile <- 5 error_matrix <- matrix(NA,S,3) colnames(error_matrix) <- c("GATES_MAE","DO_GATES_MAE", "True_Treatment") error_result <- list() GATES_result <- list() for(t in 1:nrow(settings)) { N <- settings[t,1] k <- settings[t,2] ID <- c(1:N) prop <- matrix(NA,N,M) prop <- cbind(prop,ID) pred_tm_gates <- matrix(NA,N,M) pred_dr_gates <- matrix(NA,N,M) pred_doubleML_gates <- matrix(NA,N,M) pred_tm_gates <- cbind(pred_tm_gates,ID) pred_dr_gates <- cbind(pred_dr_gates,ID) pred_doubleML_gates <- cbind(pred_doubleML_gates,ID) list_res_TM <- vector("list", M) list_res_DR <- vector("list", M) list_res_DoubleML <- vector("list", M) for(j in 1:S){ theta_set <- ifelse(settings[t,4]=="constant",settings[t,5],settings[t,4]) dataset <- datagen(y="con", N=settings[t,1],k=settings[t,2],random_d=settings[t,3],theta=theta_set,var=1) dataset$ID <- c(1:N) k <- ncol(dataset)-4 covariates <- c(paste0("V", 1:k)) covariates covariates_d <- c(paste0("V", 1:k),"d") dataset$d <- as.factor(ifelse(dataset$d==1,1,0)) for(i in 1:M){ ##### Parameter and datasets ##### trainIndex <- createDataPartition(dataset$d, p = .5, list = FALSE) df_aux <- dataset[trainIndex,] df_main <- dataset[-trainIndex,] # On the auxiliary sample # ----------------------- # Propensity score using regression forests rf_prop <- ranger(d~.,data=df_aux[covariates_d],probability = T, importance= "impurity") p_dr <- predict(rf_prop,data=df_aux[,covariates_d])$predictions[,2] p <- predict(rf_prop,data=df_main[,covariates_d])$predictions[,2] # Conditional mean proxy using regression forests aux_1 <- df_aux[which(df_aux$d==1),] aux_0 <- df_aux[which(df_aux$d==0),] form <- as.formula(paste("y", paste(covariates, collapse=" + "), sep=" ~ ")) rf_1 <- ranger(form,data=aux_1) rf_0 <- ranger(form,data=aux_0) y1_dr <- predict(rf_1,df_aux)$predictions y0_dr <- predict(rf_0,df_aux)$predictions y1 <- predict(rf_1,df_main)$predictions y0 <- predict(rf_0,df_main)$predictions # On the main sample # ----------------------- # Propensity score offset W - e(X) df_main$d <- as.numeric(as.character(df_main$d)) - p ind_1 <- (p_dr>0.02 & p_dr<0.98) ind <- (p>0.02 & p<0.98) y1_dr <- y1_dr[ind_1] y0_dr <- y0_dr[ind_1] p_dr <- p_dr[ind_1] df_aux <- df_aux[ind_1,] p <- p[ind] y1 <- y1[ind] y0 <- y0[ind] df_main <- df_main[ind,] # Score function for distribution df_main$S <- (y1 - y0) prop[,i][df_main$ID] <- df_main$S # Divide observations into k-tiles S2 <- df_main$S +runif(length(df_main$S), 0, 0.00001) # Include white noise to guarantee that the score (S) differs from the baseline effect breaks <- quantile(S2, seq(0,1, 0.2), include.lowest =T) breaks[1] <- breaks[1] - 0.01 # Offset for lower tails breaks[6] <- breaks[6] + 0.01 # Offset for upper tails SG <- cut(S2, breaks = breaks) SGX <- model.matrix(~-1+SG) # -1 Ereases the Intercept. Possible is also to keep the Intercept. DSG <- data.frame(as.numeric(I(as.numeric(df_main[,"d"])))*SGX) colnames(DSG) <- c("G1", "G2", "G3", "G4", "G5") df_main[,c("G1", "G2", "G3", "G4", "G5", "weight")] <- cbind( DSG$G1, DSG$G2, DSG$G3, DSG$G4, DSG$G5, as.numeric((1/(p*(1-p))))) form1 <- as.formula(paste("y", "~", "G1+G2+G3+G4+G5 ", sep="")) df_main$y <- as.numeric(df_main$y) # Now regress on group membership variables model <- lm(form1,df_main, weights = df_main$weight) groups <- c(paste0("G",1:ntile)) groups <- dput(as.character(groups)) thetahat1 <- model%>% .$coefficients %>% .[groups] #### gates_zero_help <- df_main[colnames(DSG)] gates_zero <- as.data.frame(which(gates_zero_help!=0,arr.ind = T)) gates_zero[,c("ID")] <- rownames(gates_zero) gates_zero <- gates_zero[,-1] thetahat2 <- as.data.frame(thetahat1) rownames(thetahat2) <- c("1","2","3","4","5") thetahat2["col"] <- rownames(thetahat2) head(thetahat2) gates_y <- merge(thetahat2,gates_zero,"col") gates_y$ID <- as.integer(gates_y$ID) pred_tm_gates[,i][gates_y$ID] <- gates_y$thetahat1 #### # Confidence intervals cihat <- confint(model,level=0.9)[groups,] list_res_TM[[i]] <- tibble(coefficient = dput(as.character(c(paste0("Group", 1:ntile)))), estimates = thetahat1, ci_lower_90 = cihat[,1], ci_upper_90 = cihat[,2]) #### This part is Doubly-Robust #################### # Doubly Robust df_aux$d <- as.numeric(ifelse(df_aux$d==1,1,0)) y_mo <- (y1_dr - y0_dr) + ((df_aux$d*(df_aux$y-y1_dr))/p_dr) - ((1-df_aux$d)*(df_aux$y-y0_dr)/(1-p_dr)) rf_dr <- ranger(y_mo~.,data=df_aux[covariates], importance = "impurity") score_dr <- predict(rf_dr,data=df_main[covariates])$predictions # Divide observations into k-tiles df_main$S <- (score_dr) S2 <- df_main$S +runif(length(df_main$S), 0, 0.00001) SG <- cut(S2, breaks = ntile) ## Double Orthogonal Scores - using u_hat ################### # Predict conditional mean of Y without D form_mu <- as.formula(paste("y", paste(covariates, collapse=" + "), sep=" ~ ")) rf_mu <- ranger(form_mu,data=df_aux) y_hat <- predict(rf_mu,data=df_main)$predictions df_main$u_hat <- df_main$y - y_hat SGX <- model.matrix(~-1+SG) # -1 Ereases the Intercept. Possible is also to keep the Intercept. DSG <- data.frame(as.numeric(I(as.numeric(df_main[,"d"])))*SGX) colnames(DSG) <- c("G1", "G2", "G3", "G4", "G5") df_main[,c("S", "G1", "G2", "G3", "G4", "G5", "weight")] <- cbind(df_main$S, DSG$G1, DSG$G2, DSG$G3, DSG$G4, DSG$G5, as.numeric((1/(p*(1-p))))) form1 <- as.formula(paste("u_hat", "~", "G1+G2+G3+G4+G5 ", sep="")) # Now regress on group membership variables model <- lm(form1,df_main) groups <- c(paste0("G",1:ntile)) groups <- dput(as.character(groups)) thetahat1 <- model%>% .$coefficients %>% .[groups] # Confidence intervals cihat <- confint(model,level=0.9)[groups,] list_res_DoubleML[[i]] <- tibble(coefficient = dput(as.character(c(paste0("Group", 1:ntile)))), estimates = thetahat1, ci_lower_90 = cihat[,1], ci_upper_90 = cihat[,2]) #### gates_zero_help <- df_main[colnames(DSG)] gates_zero <- as.data.frame(which(gates_zero_help!=0,arr.ind = T)) gates_zero[,c("ID")] <- rownames(gates_zero) gates_zero <- gates_zero[,-1] thetahat2 <- as.data.frame(thetahat1) rownames(thetahat2) <- c("1","2","3","4","5") thetahat2["col"] <- rownames(thetahat2) head(thetahat2) gates_y <- merge(thetahat2,gates_zero,"col") gates_y$ID <- as.integer(gates_y$ID) pred_doubleML_gates[,i][gates_y$ID] <- gates_y$thetahat1 } GATES_TM <- list_res_TM[] %>% bind_rows %>% na.omit() %>% group_by(coefficient) %>% summarize_all(median) pred_tm_gates_median <- pred_tm_gates[,-ncol(pred_tm_gates)] apply(pred_tm_gates_median,1, median, na.rm = TRUE) # Calculate the row median which is then used to classify each obs. into a "group". error_matrix[j,1] <- mean(abs(dataset$theta-pred_tm_gates_median),na.rm=T) GATES_DoubleML <- list_res_DoubleML[] %>% bind_rows %>% group_by(coefficient) %>% summarize_all(median) pred_doubleML_gates_median <- pred_doubleML_gates[,-ncol(pred_doubleML_gates)] apply(pred_doubleML_gates_median,1, median, na.rm = TRUE) # Calculate the row median which is then used to classify each obs. into a "group". error_matrix[j,2] <- mean(abs(dataset$theta-pred_doubleML_gates_median),na.rm=T) error_matrix[j,3] <- mean (dataset$theta) print(paste0("................................... ","The current iteration is: ", j, " out of " ,S)) } error_result[[t]] <- error_matrix GATES_result[[t]] <- c(GATES_TM, GATES_DR) print(paste0("................................... ","This is DGP : ", t, " out of " ,nrow(settings))) } ########################## error_result error_all <- matrix(NA,S*nrow(settings),3) error_all b=0 for(j in 1:nrow(settings)){ for(i in 1:S){ error_all[i+b,1] <- error_result[[j]][i] error_all[i+b,2] <- error_result[[j]][i,2] error_all[i+b,3] <- as.numeric(j) } b = b+S } error_all <- as.data.frame(error_all) colnames(error_all) <- c("GATES_MAE", "DO_GATES_MAE", "SETTING") # wilcox-test for mean differences between groups wilcox.test(error_matrix[1:10,1],error_matrix[1:10,2]) # Better use non-parametric test since the assumption that X and Y are ~ N(.) is not fulfilled. mean_error_1 <- c() mean_error_2 <- c() j = 1 for(i in 1:6){ mean_error_1[i] <- mean(safe_error_all_newDGP_lowDim[j:j+9,1]) mean_error_2[i] <- mean(safe_error_all_newDGP_lowDim[j:j+9,3]) j = j +10 } round(mean_error_1,2) round(mean_error_2,2) ggplot(error_all, aes(x=GATES_MAE, y=DO_GATES_MAE)) + xlim(0.0,1.0) + ylim(0.0,1.0) + geom_abline(mapping= aes(intercept=0.0,slope = 1.0, color="45 Degree line")) + scale_colour_manual(values="red") + labs(colour="") + geom_point() + theme_cowplot() + facet_wrap( ~ SETTING, scales="free", ncol=3) + # Facet wrap with common scales guides(fill = FALSE, color = FALSE, linetype = FALSE, shape = FALSE) + labs(x = "TMA GATES", y = "DO GATES")
0d1783c0cd622e85f2ae6525e94eafe9e5c5d6ee
fdedcc4fb558790169c100efd3a396614e815067
/R/analyze.population.R
ec96184fe59e54d1a6e7d5bfc35530fea411eb3a
[]
no_license
cran/MoBPS
b7f7fdbde92a190c800185e2dd005d514d81f1d3
c2aeedfcba8ebf563cc64e4d4a5d2d5f186e95e1
refs/heads/master
2021-11-22T18:48:13.109306
2021-11-09T15:50:18
2021-11-09T15:50:18
251,009,278
0
0
null
null
null
null
UTF-8
R
false
false
3,477
r
analyze.population.R
'# Authors Torsten Pook, torsten.pook@uni-goettingen.de Copyright (C) 2017 -- 2020 Torsten Pook This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. '# #' Analyze allele frequency of a single marker #' #' Analyze allele frequency of a single marker #' @param population Population list #' @param database Groups of individuals to consider for the export #' @param gen Quick-insert for database (vector of all generations to export) #' @param cohorts Quick-insert for database (vector of names of cohorts to export) #' @param chromosome Number of the chromosome of the relevant SNP #' @param snp Number of the relevant SNP #' @param snp.name Name of the SNP to analyze #' @examples #' data(ex_pop) #' analyze.population(ex_pop, snp=1, chromosome=1, gen=1:5) #' @return Frequency of AA/AB/BB in selected gen/database/cohorts #' @export analyze.population <- function(population, chromosome = NULL, snp=NULL, snp.name=NULL, database=NULL, gen=NULL, cohorts=NULL){ if(length(snp.name)==1){ n.snp <- which(population$info$snp.name == snp.name) } else{ p.snp <- sum(population$info$length[0:(chromosome-1)]) + population$info$position[[chromosome]][snp] n.snp <- sum(population$info$snp[0:(chromosome-1)]) + snp } groups <- sum(nrow(database), length(gen), length(cohorts)) state <- matrix(0, nrow=3, ncol = groups) col <- 1 if(length(gen)>0){ for(index in 1:length(gen)){ genos <- get.geno(population, gen = gen[index])[n.snp,] state[,col] <- c(sum(genos==0), sum(genos==1), sum(genos==2)) col <- col + 1 } } if(length(database)>0){ for(index in 1:nrow(database)){ genos <- get.geno(population, database = database[index,,drop=FALSE])[n.snp,] state[,col] <- c(sum(genos==0), sum(genos==1), sum(genos==2)) col <- col + 1 } } if(length(cohorts)>0){ for(index in 1:length(cohorts)){ genos <- get.geno(population, cohorts = cohorts[index])[n.snp,] state[,col] <- c(sum(genos==0), sum(genos==1), sum(genos==2)) col <- col + 1 } } datatime <- c(gen, database[,1], as.numeric(population$info$cohorts[cohorts,2])) state.prob <- t(t(state)/colSums(state)) maxp <- max(state.prob) graphics::plot(datatime ,state.prob[1,],xlim=c(min(datatime),max(datatime)),ylim=c(0,maxp),type="l",xlab="generation", ylab="frequency", lwd=3, main="") graphics::lines(datatime ,state.prob[2,],lty=2, lwd=3) graphics::lines(datatime ,state.prob[3,],lty=3, lwd=3) graphics::points(datatime ,state.prob[1,], pch = 0) graphics::points(datatime ,state.prob[2,], pch = 1) graphics::points(datatime ,state.prob[3,], pch = 2) graphics::legend("topleft",legend = c("AA","AB","BB"),lty=c(1,2,3), lwd=c(3,3,3), pch=c(0,1,2)) return(state) }
57834af86c4245269d6d632d1f8dbc08b5174230
b948824fe5eb9253a9c44b99067d8df954f2e8d2
/AllFunctions.R
838fded7548c8921e32b5e3a206dcd01b7556b3b
[]
no_license
willthomson1/GDGT_Models
63b875db1018951fd98ae65782e95a59d411a634
5d81c2027eddcb25a915a2d2f45b555ecb061dc2
refs/heads/master
2020-04-26T04:59:47.255821
2019-03-08T13:54:41
2019-03-08T13:54:41
173,319,681
0
0
null
null
null
null
UTF-8
R
false
false
12,109
r
AllFunctions.R
### This file contains functions to ### - fit the GP regression model (fitGPR), ### - compute nearest neighbour distances (Dnear), ### - make predictions from a GPR model (predictGPR), ### - fit the forward model (fitFWD) ### - make predictions from the forward model (predictFWD) ### - extract full posterior predictive distributions (posteriorFWD) ### You will need to install Python and also the GPy library for Python ### (run '!pip install GPy' in Python's command line) ### both are free to install. This code is based on Python 3.6. ### point to your Python distribution Sys.setenv(RETICULATE_PYTHON = YOUR_PYTHON_PATH) ### require(reticulate) GPy <- import("GPy") np <- import("numpy") ### import weights and nodes for 500-point Gauss-Hermite quadrature weightsAndNodes <- read.csv("ghWeightsNodes.csv")[,2:3] ### This is a simple wrapper around the GPy functions for fitting GP regression models ### It takes the modern data and returns an environment containing information about the ### fitted GP regression model ### load_previous allows one to load the already-optimised GP model object (so is faster) fitGPR <- function(modern.data,modern.temperatures, load_existing = TRUE){ if(load_existing){ mb <- np$load("mb.npy") } else{ KK <- GPy$kern$RBF(input_dim = ncol(modern.data),ARD = TRUE) mb <- GPy$models$GPRegression(as.matrix(modern.data),as.matrix(modern.temperatures),KK) mb$optimize() } return(mb) } ### This function computes nearest neighbour distances (weighted by the lengthscales of ### the kernel of a fitted GPR object (obtained via fitGPR) Dnear <- function(newX,model){ if(ncol(newX) != ncol(model$X)){ stop("newX has the wrong number of columns") } K <- model$kern$K(as.matrix(model$X),as.matrix(newX)) dists <- -log(K / as.numeric(model$kern$variance)) return(apply(dists,2,min)) } ### This function predicts mean temperatures and standard deviations of predictions ### for the points in newX given the model object (obtained via fitGPR) predictGPR <- function(newX,model){ if(ncol(newX) != ncol(model$X)){ stop("newX has the wrong number of columns") } pred <- mb$predict(newX) list(means = pred[[1]],sds = sqrt(pred[[2]])) } ### This function fits the forward model. It takes the modern data and returns a model ### object for use with other functions. Specifying load_existing will load a previously ### trained model object: ### - load_existing = 1 loads an MOGP model based on GDGTs 0-3 ### - load_existing = 2 loads an MOGP model based on GDGTs 0-5 ### - load_existing = NULL fits the model using GPy (can take some time) fitFWD <- function(modern.data,modern.temp,load_existing = c(1,2,NULL)){ if(load_existing == 1){ message('Loading MOGP model object based on GDGTs 0-3') mf <- np$load('mf4.npy') } else if(load_existing == 2){ message('Loading MOGP model object based on all 6 GDGTs') mf <- np$load('mf6.npy') } else{ message("Loading required package robCompositions") require(robCompositions) message("Imputing zeros") modern.data[modern.data == 0] <- NA modern.data <- impCoda(modern.data)$xImp message("ilr transforming the data") modern.data.ilr <- pivotCoord(modern.data) message("Setting up Multi-Output GP model") KK <- GPy$kern$Matern32(1) icm <- GPy$util$multioutput$ICM(input_dim = 1, num_outputs = ncol(modern.data.ilr), kernel = KK) temp.list <- lapply(1:ncol(modern.data.ilr),function(j) as.matrix(modern.temp)) ilr.list <- lapply(1:ncol(modern.data.ilr),function(j) as.matrix(modern.data.ilr[,j])) mf <- GPy$models$GPCoregionalizedRegression(temp.list,ilr.list,kernel = icm) ### horrible hacky way to fix kernel variance parameter mf$constraints$add('fixed',c(0L,1L,2L)) mf$constraints$remove('fixed',c(1L,2L)) message("Optimising hyperparameters; this might take some time (tens of minutes) depending on your machine") mf$optimize() } return(mf) } ### make predictions from the forward model ### inputs: newX :- a matrix or data.frame of GDGT values ### model :- a model object obtained via fitFWD() ### prior :- a 2-vector containing the mean and sd of the Gaussian prior on ### temperature. Defaults to (15,10) ### PofXgivenT :- a list containing means, invcovs, dets of p(X|T_j) for each ### Gauss-Hermite node T_j ### returnFullPosterior :- one of: ### - FALSE (default): only return means and variances ### - A vector of indices for which full posterior should be computed ### - TRUE: return full posterior for every new point predictFWD <- function(newX, model, prior = c(15,10), PofXgivenT = NULL, returnFullPosterior = FALSE, transformed = F){ dd <- max(model$Y_metadata[[1]]) + 1 npred <- nrow(newX) if(ncol(newX) != (dd + 1)){ stop("newX has the wrong number of columns") } if(returnFullPosterior){ returnFullPosterior <- 1:npred } whichzerorows <- NULL if(!transformed){ if(npred > 2*dd){ message("Loading required package robCompositions") require(robCompositions) message("Imputing zeros") newX[newX == 0] <- NA newX <- impCoda(newX)$xImp } else{ message("Not enough data points to impute zeros; removing rows containing zeros") whichzerorows <- which(apply(newX,1,function(x) any(x == 0))) } message("ilr transforming the data") newX <- as.matrix(pivotCoord(newX)) } ## 500 node Gauss-Hermite quadrature (straightforward to use fastGHquad package to ## change this if desired) n_nodes <- 500 xx <- sqrt(2) * prior[2] *weightsAndNodes$x + prior[1] if(!is.null(returnFullPosterior)){ priorAtNodes <- dnorm(xx,prior[1],prior[2]) } ww <- weightsAndNodes$w if(is.null(PofXgivenT)){ warning("For speed on repeated runs, it is recommended to provide PofXgivenT, which can be obtained via getPofXgivenT()") inds <- as.integer(0:(dd-1)) noise_dict <- dict(list(output_index = matrix(inds,dd,1))) message("Computing p(X|T) at each quadrature node...") pb <- txtProgressBar(0,n_nodes) means <- matrix(NA,n_nodes,dd) invcovs <- array(NA,c(n_nodes,dd,dd)) dets <- rep(NA,n_nodes) for(j in 1:n_nodes){ X <- rep(xx[j],5) X <- cbind(X,inds) tmpp <- model$predict(X,Y_metadata=noise_dict,full_cov = TRUE) means[j,] <- tmpp[[1]] cholInvCov <- chol(tmpp[[2]]) invtmp = chol2inv(cholInvCov) invcovs[j,,] = invtmp dets[j] = prod(diag(cholInvCov)^2) setTxtProgressBar(pb,j) } message("DONE") } else{ means = PofXgivenT$means invcovs = PofXgivenT$invcovs dets = PofXgivenT$dets } posterior_means <- rep(NA,npred) posterior_vars <- rep(NA,npred) full_posteriors <- list() Zout <- rep(NA,npred) message("Computing p(T|X) for new data...") pb <- txtProgressBar(0,npred) for(i in which(!((1:npred)%in%whichzerorows))){ ff <- rep(NA,n_nodes) xi <- newX[i,] for (j in 1:n_nodes){ ### evaluate multivariate Gaussian density at i-th ###################### ### composition, at j-th temperature node, p(x_i|T_j) #################### ########################################################################## qf <- t(xi - means[j,])%*%invcovs[j,,]%*%(xi - means[j,]) ############## ff[j] <- exp(-0.5 * qf) / sqrt((2 * pi)^dd * dets[j]) ############## ########################################################################## } ## compute normalising factor, int p(t) dt, by Gauss-Hermite quadrature Z <- t(ww)%*%ff mu <- t(ww)%*%(ff*xx) / Z ## Gauss-Hermite quadrature again posterior_means[i] <- mu posterior_vars[i] <- t(ww)%*%(ff * (xx - rep(mu,n_nodes))^2) / Z Zout[i] <- Z if(i %in% returnFullPosterior){ full_posteriors[[i]] <- data.frame(xx = xx,posterior = (ff * priorAtNodes) / rep(Z,n_nodes)) } setTxtProgressBar(pb,i) } message("DONE") if(!is.null(whichzerorows)){ message(paste("Predictions not made for points",whichzerorows, "because they contained zero entries")) } return(list(mean = posterior_means, variance = posterior_vars, full_posteriors = full_posteriors, Z = Zout, transformedData = newX)) } #### Function to obtain densities p(X|T) at the quadrature nodes getPofXgivenT <- function(model){ dd <- max(model$Y_metadata[[1]]) + 1 ## 500 node Gauss-Hermite quadrature (straightforward to use fastGHquad package to ## change this if desired) n_nodes <- 500 xx <- sqrt(2) * prior[2] *weightsAndNodes$x + prior[1] ww <- weightsAndNodes$w inds <- as.integer(0:(dd-1)) noise_dict <- dict(list(output_index = matrix(inds,dd,1))) message("Computing p(X|T) at each quadrature node...") pb <- txtProgressBar(0,n_nodes) means <- matrix(NA,n_nodes,dd) invcovs <- array(NA,c(n_nodes,dd,dd)) dets <- rep(NA,n_nodes) for(j in 1:n_nodes){ X <- rep(xx[j],5) X <- cbind(X,inds) tmpp <- model$predict(X,Y_metadata=noise_dict,full_cov = TRUE) means[j,] <- tmpp[[1]] cholInvCov <- chol(tmpp[[2]]) invtmp = chol2inv(cholInvCov) invcovs[j,,] = invtmp dets[j] = prod(diag(cholInvCov)^2) setTxtProgressBar(pb,j) } return(list(means = means,invcovs = invcovs,dets = dets)) } #### Compute the (unnormalised) posterior predictive density at the specified #### points for the data points in newX. #### Z is a vector of normalising constants (which can be obtained via predictFWD()). #### transformed is a logical input indicating whether newX contains transformed data. posteriorFWD <- function(newX,model,points = seq(-10,60,len = 200), prior = c(15,10),Z = NULL, transformed = FALSE){ dd <- max(model$Y_metadata[[1]]) + 1 npoints <- length(points) npred <- nrow(newX) priorAtPoints <- dnorm(points,prior[1],prior[2]) inds <- as.integer(0:(dd-1)) noise_dict <- dict(list(output_index = matrix(inds,dd,1))) whichzerorows <- NULL if(!transformed){ if(npred > 2*dd){ message("Loading required package robCompositions") require(robCompositions) message("Imputing zeros") newX[newX == 0] <- NA newX <- impCoda(newX)$xImp } else{ whichzerorows <- which(apply(newX,1,function(x) any(x == 0))) } message("ilr transforming the data") newX <- as.matrix(pivotCoord(newX)) } means <- matrix(NA,npoints,dd) invcovs <- array(NA,c(npoints,dd,dd)) dets <- rep(NA,npoints) for(j in 1:npoints){ X <- rep(points[j],5) X <- cbind(X,inds) tmpp <- model$predict(X,Y_metadata=noise_dict,full_cov = TRUE) means[j,] <- tmpp[[1]] cholInvCov <- chol(tmpp[[2]]) invtmp = chol2inv(cholInvCov) invcovs[j,,] = invtmp dets[j] = prod(diag(cholInvCov)^2) } PPD <- matrix(NA,npoints,npred) for(i in which(!((1:npred)%in%whichzerorows))){ ff <- rep(NA,npoints) xi <- newX[i,] for (j in 1:npoints){ ### evaluate multivariate Gaussian density at i-th ###################### ### composition, at j-th temperature node, p(x_i|T_j) #################### ########################################################################## qf <- t(xi - means[j,])%*%invcovs[j,,]%*%(xi - means[j,]) ############## ff[j] <- exp(-0.5 * qf) / sqrt((2 * pi)^dd * dets[j]) ############## ########################################################################## } PPD[i,] <- ff*priorAtPoints / (ifelse(!is.null(Z),Z[i],1)) } if(!is.null(whichzerorows)){ message(paste("Predictions not made for points",whichzerorows, "because they contained zero entries")) } return(PPD) }
cd674b310f80bea12f106842cb8b0af3908bcc85
40e327c3782d6dd8dc04a566da30e831ec58a3b6
/Project1-ExploreVis/kylegallatin_shiny/ui.R
ce883a84b50f61b94735d90098bcb0bdded607a2
[]
no_license
liyuhaojohn/bootcamp008_project
86f20f384f0d2cabc73eb1e8a00d74c0e03d9a98
014e183b37d2fca0b0bc963b8634b089432f03b2
refs/heads/master
2020-03-17T16:48:14.924383
2017-04-08T17:17:35
2017-04-08T17:17:35
null
0
0
null
null
null
null
UTF-8
R
false
false
1,498
r
ui.R
library(shiny) library(shinysky) fluidPage( titlePanel("Onco/Tumor Supressor Gene Database"), sidebarPanel( helpText('This app shows you the number of mutations, corresponding cancer types and mutations types by gene. There are over 27,000 genes in this dataset. Click the "Gene Map" tab for a visual representation of the gene and its mutations. The location of each mutation refers to its location on cDNA.'), #textInput.typeahead( #id="thti" #,placeholder="type a gene" #,local= data.frame(unique(mutations$GENE_NAME)) #,valueKey = unique(mutations$GENE_NAME) #,tokens=c(1:length(unique(mutations$GENE_NAME))) # ,template = HTML("<p class='repo-language'>{{info}}</p> <p class='repo-name'>{{name}}</p> <p class='repo-description'>You need to learn more CSS to customize this further</p>") #), textInput(inputId = "gene",label = "Enter a Gene Name"), selectizeInput(inputId = "NT", label = "Select Mutation Type for the Gene Map", choices = c("NT_Change", "Deletion", "Insertion"))), mainPanel( tabsetPanel(type = "tabs", tabPanel("Cancer Mutations", plotOutput("plot")), tabPanel("Gene Map", plotOutput("new"), textOutput("text1")), tabPanel("DNA Repair Mechanisms", img(src="DNA_Repair.png")), tabPanel("About the Author", img(src="handsome_man.jpg", height = 500, width = 500), textOutput("Author")) )))
021966c22e3431489dfe530daea551ff821ed918
7917fc0a7108a994bf39359385fb5728d189c182
/cran/paws.database/man/redshift_describe_cluster_parameter_groups.Rd
9f8ae11ab9d9d36f22293147130c61e3fd42be2e
[ "Apache-2.0" ]
permissive
TWarczak/paws
b59300a5c41e374542a80aba223f84e1e2538bec
e70532e3e245286452e97e3286b5decce5c4eb90
refs/heads/main
2023-07-06T21:51:31.572720
2021-08-06T02:08:53
2021-08-06T02:08:53
396,131,582
1
0
NOASSERTION
2021-08-14T21:11:04
2021-08-14T21:11:04
null
UTF-8
R
false
true
4,190
rd
redshift_describe_cluster_parameter_groups.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/redshift_operations.R \name{redshift_describe_cluster_parameter_groups} \alias{redshift_describe_cluster_parameter_groups} \title{Returns a list of Amazon Redshift parameter groups, including parameter groups you created and the default parameter group} \usage{ redshift_describe_cluster_parameter_groups(ParameterGroupName, MaxRecords, Marker, TagKeys, TagValues) } \arguments{ \item{ParameterGroupName}{The name of a specific parameter group for which to return details. By default, details about all parameter groups and the default parameter group are returned.} \item{MaxRecords}{The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified \code{MaxRecords} value, a value is returned in a \code{marker} field of the response. You can retrieve the next set of records by retrying the command with the returned marker value. Default: \code{100} Constraints: minimum 20, maximum 100.} \item{Marker}{An optional parameter that specifies the starting point to return a set of response records. When the results of a \code{\link[=redshift_describe_cluster_parameter_groups]{describe_cluster_parameter_groups}} request exceed the value specified in \code{MaxRecords}, AWS returns a value in the \code{Marker} field of the response. You can retrieve the next set of response records by providing the returned marker value in the \code{Marker} parameter and retrying the request.} \item{TagKeys}{A tag key or keys for which you want to return all matching cluster parameter groups that are associated with the specified key or keys. For example, suppose that you have parameter groups that are tagged with keys called \code{owner} and \code{environment}. If you specify both of these tag keys in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag keys associated with them.} \item{TagValues}{A tag value or values for which you want to return all matching cluster parameter groups that are associated with the specified tag value or values. For example, suppose that you have parameter groups that are tagged with values called \code{admin} and \code{test}. If you specify both of these tag values in the request, Amazon Redshift returns a response with the parameter groups that have either or both of these tag values associated with them.} } \value{ A list with the following syntax:\preformatted{list( Marker = "string", ParameterGroups = list( list( ParameterGroupName = "string", ParameterGroupFamily = "string", Description = "string", Tags = list( list( Key = "string", Value = "string" ) ) ) ) ) } } \description{ Returns a list of Amazon Redshift parameter groups, including parameter groups you created and the default parameter group. For each parameter group, the response includes the parameter group name, description, and parameter group family name. You can optionally specify a name to retrieve the description of a specific parameter group. For more information about parameters and parameter groups, go to \href{https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html}{Amazon Redshift Parameter Groups} in the \emph{Amazon Redshift Cluster Management Guide}. If you specify both tag keys and tag values in the same request, Amazon Redshift returns all parameter groups that match any combination of the specified keys and values. For example, if you have \code{owner} and \code{environment} for tag keys, and \code{admin} and \code{test} for tag values, all parameter groups that have any combination of those values are returned. If both tag keys and values are omitted from the request, parameter groups are returned regardless of whether they have tag keys or values associated with them. } \section{Request syntax}{ \preformatted{svc$describe_cluster_parameter_groups( ParameterGroupName = "string", MaxRecords = 123, Marker = "string", TagKeys = list( "string" ), TagValues = list( "string" ) ) } } \keyword{internal}
d96f5a06728c2cf2d0d6adbda33341236d4cbb93
a7a6d898b7aadeb556d6f04c943bd45b4fd2a205
/run_analysis.R
e3965e7f7705d1da75df1a2baf97efe53aefa24e
[]
no_license
SergeyPokalyaev/GettingAndCleaningData
7dd8a6787836086146a2d418357b3058f3ca9419
b74fd0adb5a79007e3ba56c7fdc8d2dc50b3c1f2
refs/heads/master
2021-01-13T02:30:31.519040
2015-06-07T21:53:32
2015-06-07T21:53:32
37,033,424
0
0
null
null
null
null
UTF-8
R
false
false
2,676
r
run_analysis.R
run_analysis <- function() { #Download file from internet destfileName = "downloadedData.zip" downloadedFile <- download.file(url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = destfileName); #Unzip file #unzip(destfileName, exdir = ".", overwrite = TRUE) #remove downloaded zip-file file.remove(destfileName) #Set activity names activityNames <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING") #Set feathers data frame feathers <- read.table("./UCI HAR Dataset/features.txt")[,2] #Mean/Std columns positions, names featuresColumns <- grep(".*(mean\\(|std\\().*", feathers) featuresNames <- feathers[featuresColumns] #4 Appropriately labels the data set with descriptive variable names. featuresNames <- gsub("^t", "Time", featuresNames) featuresNames <- gsub("^f", "Frequency", featuresNames) featuresNames <- gsub("-mean\\(\\)", "Mean", featuresNames) featuresNames <- gsub("-std\\(\\)", "StdDev", featuresNames) featuresNames <- gsub("-", "", featuresNames) #2 Extracts only the measurements on the mean and standard deviation for each measurement. #Get means, std XTrain <- read.table("./UCI HAR Dataset/train/X_train.txt")[, featuresColumns] XTest <- read.table("./UCI HAR Dataset/test/X_test.txt")[, featuresColumns] XTrainSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt")[, 1] XTestSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt")[, 1] YTrain <- activityNames[read.table("./UCI HAR Dataset/train/y_train.txt")[, 1]] YTest <- activityNames[read.table("./UCI HAR Dataset/test/y_test.txt")[, 1]] unlink("./UCI HAR Dataset") #1 Merges the training and the test sets to create one data set. #Merged sets XMerged <- rbind(XTrain, XTest) XMergedSubject <- c(XTrainSubject, XTestSubject) YMerged <- c(YTrain, YTest) #3 Uses descriptive activity names to name the activities in the data set colnames(XMerged) <- featuresNames tidyResult <- cbind(subject = XMergedSubject, activity = YMerged, XMerged) #5 From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. #Use this library for computing average library(plyr) tidyResultAverage <- ddply(tidyResult, .(subject, activity), function(data) { colMeans(data[,-c(1,2)])}) names(tidyResultAverage)[-c(1,2)] <- paste0("Mean", names(tidyResultAverage)[-c(1,2)]) write.table(tidyResultAverage, file = "tidyResultAverage.txt", row.name=FALSE) }
31c68ca88a1082cb3a925a3e2f65208adcd61bc6
897f0581bfc3403318f56072f7af1163b8189733
/rosetta-motifs.R
b2834476c16a87c651c91954bd31bd496814bfa2
[]
no_license
jashworth-UTS/ja-scripts
2985891e628bae59b1f4b8696739cbf63b5a2dc2
ac837ac0fee63c27b3b8ac4d9a5022810fb31976
refs/heads/master
2021-05-28T18:39:20.272694
2015-02-04T02:35:17
2015-02-04T02:35:17
null
0
0
null
null
null
null
UTF-8
R
false
false
6,854
r
rosetta-motifs.R
probMatrixFromFasta = function(fastafile,filter='') { require(Biostrings) seqs=readDNAStringSet(fastafile) if(filter != '') seqs = gsub(filter,'',seqs) ppm=consensusMatrix(seqs,as.prob=T,baseOnly=T) ppm=as.matrix(ppm) return(ppm) } seqlogoFromFasta = function(fastafile,plot=F) { require(seqLogo) ppm = probMatrixFromFasta(fastafile) # limit to first four rows (ACGT) (drops fifth row, 'other') ppm=ppm[seq(1,4),] # hardcoded arbitrary subsequence for 2e1c DNA (otherwise rosetta 'design_mutations' DNA .fa files read through both strands) ppm=ppm[,seq(2,16)] # normalize (if necessary) ppm=t(t(ppm)*1/colSums(ppm)) if(plot){seqLogo(ppm)} # if(plot){seqLogo(ppm,ic.scale=F)} return(ppm) } dis.ED = function(P,Q) # Euclidian distance { sqrt( sum((P-Q)**2) ) } dis.KL = function(P,Q) # Kullback-Leibler divergence (column vs. column) { sum(P * log(P/Q)) } dis.JS = function(P,Q) # Jensen-Shannon divergence (column vs. column) { M = 0.5*(P+Q) 0.5 * ( dis.KL(P,M) + dis.KL(Q,M) ) } ppm.distance.ED = function(ppm1,ppm2) { motif_distance = 0 for(col in 1:ncol(ppm1)){ motif_distance = motif_distance + dis.ED( ppm1[,col], ppm2[,col] ) } return(motif_distance) } ppm.distance.KL.symm = # works on two position probability matrices # bases in rows, positions in columns. Note correspondence check should have happened upstream # computes symmetric Kullback-Leibler distance between them function(ppm1,ppm2) { motif_distance = 0 for(col in 1:ncol(ppm1)){ motif_distance = motif_distance + 0.5 * ( dis.KL(ppm1[,col], ppm2[,col]) + dis.KL(ppm2[,col], ppm1[,col]) ) } return(motif_distance) } ppm.distance.ALLR = # works on two aligned position probability matrices # bases in rows, positions in columns. Note correspondence check should have happened upstream # computes Wang and Stormo average log likelihood ratio (ALLR) statistic # because the input is probabilities and not counts, total column counts are assumed to be constant between columns and motifs, and thus base probabilities are used in place of base counts function(ppm1,ppm2,bg=rep(0.25,4),nonzero=1e-2) { motif_dist = 0 # maxdis = 10 zeroflag=0 for(col in 1:ncol(ppm1)){ ALLR = 0 for(base in 1:nrow(ppm1)){ p1 = ppm1[base,col] p2 = ppm2[base,col] ballr=0 if( (p1==0 | p2==0) & zeroflag){ cat('zeros encountered. Substituting',nonzero,'This can be avoided by using pseudocounts\n') zeroflag=0 } p1 = max(p1,nonzero) p2 = max(p2,nonzero) ballr = (p1 * log(p1/bg[base]) + p2 * log(p2/bg[base])) / (p1+p2) # ballr = min(ballr,maxdis) # cat('column',col,'row',base,'ballr',ballr,'\n',sep=' ') ALLR = ALLR + ballr } # cat('column',col,'ALLR',ALLR,'\n',sep=' ') motif_dist = motif_dist + ALLR } return(motif_dist) } ppm.similarity.BLiC.Yanover = function(ppm1,ppm2,bg,...) { motif_distance = 0 for(col in 1:ncol(ppm1)){ col_dis = dis.JS( ppm1[,col]+ppm2[,col], bg) - dis.JS( ppm1[,col], ppm2[,col] ) motif_distance = motif_distance + col_dis } return(motif_distance) } # toying with Dirichlet distributions after reading Habib 2008 dirichlet.norm = function(alphas) { gamma(sum(alphas)) / prod(gamma(alphas)) } dirichlet = function(x,alphas) { norm = dirichlet.norm(alphas) cat('norm',norm,'\n') if(length(x) != length(alphas)){ cat('ERROR a,alphas unequal length\n') return(Inf) } val = 1 for(i in 1:length(x)){ val = val * (x[i]**(alphas[i]-1)) } return(val * norm) } ppm.similarity.BLiC = # computes Bayesian Liklihood 2-Component (BLiC) score (Habib et al. 2008) # works on two position probability matrices # bases in rows, positions in columns. Note correspondence check should have happened upstream # uses dirichlet prior ('P12') to represent the "common source distribution" # in the simple case this is like adding pseudocounts?? function(ppm1,ppm2,bg=rep(0.25,4),nonzero=1e-2,param=NULL) { if(param==NULL){ npos = ncol(ppm1) param=rep(1,pos) } # INCOMPLETE # WHAT IS P12?? motif_distance = 0 # maxdis = 10 zeroflag = 0 for(col in 1:ncol(ppm1)){ column_distance = 0 for(base in 1:nrow(ppm1)){ p1 = ppm1[base,col] p2 = ppm2[base,col] if( (p1==0 | p2==0) & zeroflag){ cat('zeros encountered. Substituting',nonzero,'This can be avoided by using pseudocounts\n') zeroflag=0 } p1 = max(p1,nonzero) p2 = max(p2,nonzero) # "score is composed of two components: the first measures whether the two motifs were generated from a common distribution, while the second reflects the distance of that common distribution from the background" # BLiC = log( P(m1,m2|common-source)/P(m1,m2|independent-source) ) # + log( P(m1,m2|common-source)/P(m1,m2|background) ) # P12 is a dirichlet mixture prior # not sure if this needs to fake 'counts' from input probability matrices or not # 2 * (n1+n2) * log(P12[base]) - n1 * log(p1) - n2 * log(p2) - (n1+n2) * log(bg[base]) 2 * (p1+p2) * log(P12[base]) - p1 * log(p1) - p2 * log(p2) - (p1+p2) * log(bg[base]) # cat('column',col,'row',base,'base_lr',base_lr,'\n',sep=' ') column_distance = column_distance + base_lr } # cat('column',col,'column_distance',column_distance,'\n',sep=' ') motif_distance = motif_distance + column_distance } return(motif_distance) } ppm.dis.matrix = function(matrixlist,disfunc='KL',bg=rep(0.25,4),...) { if(0 %in% bg){ cat('ERROR, no background probabilities may be zero\n') return(0) } names=names(matrixlist) l=length(matrixlist) dmat = matrix(0,nrow=l,ncol=l,dimnames=list(names,names)) for(i in 1:l){ for(j in 1:l){ ppm1 = matrixlist[[i]] ppm2 = matrixlist[[j]] if(ncol(ppm1) != ncol(ppm2)){ cat('ERROR: column mismatch\n') next } if(nrow(ppm1) != nrow(ppm2)){ cat('ERROR: column mismatch\n') next } if(disfunc=='ED'){ dmat[i,j] = ppm.distance.ED(ppm1,ppm2,...) } else if(disfunc=='ALLR'){ dmat[i,j] = ppm.distance.ALLR(ppm1,ppm2,bg,...) } else if(disfunc=='KL'){ dmat[i,j] = ppm.distance.KL.symm(ppm1,ppm2,...) } else if(disfunc=='BLiC.inv'){ # BLiC-like similarity metric from Yanover 2011 # sign inverted (for uniformity with distance metrics) dmat[i,j] = -1 * ppm.similarity.BLiC.Yanover(ppm1,ppm2,bg,...) } else { dmat[i,j] = ppm.distance.KL.symm(ppm1,ppm2,...) } } } return(dmat) } ppm.dis.test = function(disfunc='KL',bg=rep(0.25,4),...) { A = c(1,0,0,0) C = c(0,1,0,0) G = c(0,0,1,0) T = c(0,0,0,1) ppms = list( 'AAAA' = matrix(c(A,A,A,A),4) ,'AAAT' = matrix(c(A,A,A,T),4) ,'AATT' = matrix(c(A,A,T,T),4) ,'ATTT' = matrix(c(A,T,T,T),4) ,'TTTT' = matrix(c(T,T,T,T),4) ) return(ppm.dis.matrix(ppms,disfunc,bg,...)) } self.distance.to.NA = function(mat){ rns = rownames(mat) cns = colnames(mat) if(rns!=cns){return(mat)} for(rn in rns){ mat[rns=rn,cns=rn]=NA } return(mat) }
e02555b2f592f84830fb8ea0484d6913910ea5dc
cad3724a1a85fa998a42a12489079498cc62b688
/man/afmReadVeeco.Rd
343ab0e3c873d91ceda49d389bb9b4244a401b9c
[]
no_license
rbensua/afmToolkit
f045a63cc88fbf157947106ffd33789a4c31eb31
c6f62e9a5316dcd76002f6e609fe5f0fe99856d7
refs/heads/master
2021-06-26T08:07:26.411266
2020-11-29T09:07:45
2020-11-29T09:07:45
42,361,434
6
0
null
null
null
null
UTF-8
R
false
true
1,123
rd
afmReadVeeco.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/afmReadVeeco.R \name{afmReadVeeco} \alias{afmReadVeeco} \title{Read Bruke Nanoscope Veeco ascii file} \usage{ afmReadVeeco(filename, path = "") } \arguments{ \item{filename}{String with the name of the jpk file.} \item{path}{Path to the folder where the file is.} \item{FColStr}{String pattern identifying the Force columns (defaults to "pN")} \item{ZColStr}{String pattern identifying the Z columns (defaults to "Ramp")} \item{tColStr}{String pattern identifying the Time columns (defaults to "Time")} \item{TimeCol}{Logical value. If TRUE (default) there is a Time column.} } \value{ A afmdata structure list containing a field 'data' which is a data frame with variables Force, Z, Time (if aplicable) and Segment ("approach", "retract" and/or "pause") and a field 'params' which is a list with the fields 'curvename' and 'SpringConstant'. } \description{ Read an ascii Veeco file. Reads an ascii Veeco file with one or two segments. } \examples{ data <- afmReadVeeco("veeco_file.txt.gz",path = path.package("afmToolkit")) str(data) }
0d4ba65dbbff9fbe59c9787fd610a4a751385d24
9fc5e5d9388beea21812bd5adffdcfb67d1190ba
/ProgrammingAssignment2/cachematrix.R
aecc1a7752295d5902570cf0b983822ca0fc9e3f
[]
no_license
psridhar23/ProgrammingAssignment2
f2445e6dc9e0ed8957af6d7a844af44dce983612
2de37b46bda5919e54952c1f676cb91b4a64a2ba
refs/heads/master
2021-05-11T02:07:17.337853
2018-01-21T17:21:53
2018-01-21T17:21:53
118,350,555
0
0
null
2018-01-21T16:09:59
2018-01-21T16:09:58
null
UTF-8
R
false
false
772
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## The following functions maintains a matrix and its inverse in a cache. ## It is used to set the value in the cache of the matrix and its inverse ## and retrieve them when reuired. makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y mcache <<- NULL } get <- function() x setsolve <- function(m) mcache <<- m getsolve <- function() mcache list(set = set, get = get, setsolve = setsolve, getsolve = getsolve) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { mcache <- x$getsolve() if (!is.null(mcache)) { return(mcache) } data <- x$get() m <- solve(x) x$setsolve(m) m }
a55c66736d9de37c173f7828d31493ff2c896a58
47629a6296c81a812d1d92f3060aafd0fd1e020f
/words/randomize_words.R
1118a61109a60870100b03a2e346b67c09a81361
[]
no_license
stephmsherman/memory_consolidation_task
8fa8a8b3ec720b94bd8949ec56cdacd362a18d79
ce49d4f703464fe4c252599a53f4992d23123661
refs/heads/master
2021-01-22T03:45:09.139535
2017-03-16T19:00:40
2017-03-16T19:00:40
81,460,552
0
0
null
null
null
null
UTF-8
R
false
false
1,444
r
randomize_words.R
participant = "01" ## two digit number in quotes list = 1 # either 1 or 2 #define path where you download memory_consolidation_task. Make sure to end in the / path="/" one=read.csv(paste(path,"memory_consolidation_task/words/all_words_list",list,".csv",sep="")) head(one) #create a sequence from 1 to however many word pairs are in the list (54) number=seq(1,dim(one)[1]) #randomize numbers random1=sample(number) random2=sample(number) random3=sample(number) ###order the list using the randomized numbers #for the study phase (random_study) random_study=one[random1,] #for the first test phase (random_study_recall) random_study_recall=one[random2,] #for the actual recall tests random_recall=one[random3,] night_test= random_recall[1:((dim(one)[1])/2),] morning_test= random_recall[(((dim(one)[1])/2)+1):(dim(one)[1]),] #write out write.csv(random_study,paste(path,"memory_consolidation_task/study_list",list,"sub",participant,".csv",sep=""),row.names=FALSE,quote=FALSE) write.csv(random_study_recall,paste(path,"memory_consolidation_task/study_recall_list",list,"sub",participant,".csv",sep=""),row.names=FALSE,quote=FALSE) write.csv(night_test,paste(path,"memory_consolidation_task/night_recall_list",list,"sub",participant,".csv",sep=""),row.names=FALSE,quote=FALSE) write.csv(morning_test,paste(path,"memory_consolidation_task/morning_recall_list",list,"sub",participant,".csv",sep=""),row.names=FALSE,quote=FALSE)
69f576b60e586ac77ed7c5934a241b7c9107cfbf
d07c2602c5820b1868da52f557e655847e46a821
/man/toys.Rd
b198abfbd16cc484ad2ad4167817779281f257f8
[]
no_license
robingenuer/VSURF
f344cfdd8e19561ab005a6efd1bfeee40ba87d1d
af607ebf77acd40860e24fe88c295dff363369bf
refs/heads/master
2023-02-20T17:28:14.640950
2023-02-07T15:41:45
2023-02-07T15:41:45
32,991,821
29
15
null
2016-03-09T07:35:31
2015-03-27T14:49:41
R
UTF-8
R
false
true
1,450
rd
toys.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/toys.R \docType{data} \name{toys} \alias{toys} \title{A simulated dataset called toys data} \format{ The format is a list of 2 components: \describe{ \item{x}{a dataframe containing input variables: with 100 obs. of 200 variables} \item{y}{output variable: a factor with 2 levels "-1" and "1"} } } \source{ Weston, J., Elisseff, A., Schoelkopf, B., Tipping, M. (2003), \emph{Use of the zero norm with linear models and Kernel methods}, J. Machine Learn. Res. 3, 1439-1461 } \description{ \code{toys} is a simple simulated dataset of a binary classification problem, introduced by Weston et.al.. } \details{ It is an equiprobable two class problem, Y belongs to \{-1,1\}, with six true variables, the others being some noise. The simulation model is defined through the conditional distribution of the \eqn{X_i} for Y=y: \itemize{ \item with probability 0.7, X^j ~ N(yj,1) for j=1,2,3 and X^j ~ N(0,1) for j=4,5,6 ; \item with probability 0.3, X^j ~ N(0,1) for j=1,2,3 and X^j ~ N(y(j-3),1) for j=4,5,6 ; \item the other variables are noise, X^j ~ N(0,1) for j=7,\dots,p. } After simulation, the obtained variables are finally standardized. } \examples{ data(toys) toys.rf <- randomForest::randomForest(toys$x, toys$y) toys.rf \dontrun{ # VSURF applied for toys data: # (a few minutes to execute) data(toys) toys.vsurf <- VSURF(toys$x, toys$y) toys.vsurf } }
7ed6641f0e1dd07c8e529822ae9dcc68074c9489
9de3b2b8b28f89cfb13723b6be99f157fc13a313
/2_Functions/2_Analysis/Function_process_covariates.R
3ccf2eec3cd67f0c6ab0afec4445682444ddf79e
[]
no_license
WWF-ConsEvidence/MPAMystery
0e730dd4d0e39e6c44b36d5f9244a0bfa0ba319b
6201c07950206a4eb92531ff5ebb9a30c4ec2de9
refs/heads/master
2023-06-22T04:39:12.209784
2021-07-20T17:53:51
2021-07-20T19:34:34
84,862,221
8
1
null
2019-07-24T08:21:16
2017-03-13T18:43:30
R
UTF-8
R
false
false
9,348
r
Function_process_covariates.R
# # code: Preprocess matching covariates function # # author: Louise Glew, louise.glew@gmail.com # date: May 2019 # modified: -- # # # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # # ---- SECTION 1: SOURCE DATA ---- # # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # source('1_Data_wrangling/1_Social/2_Source_data/Source_social_data_flat_files.R', local = T) source('1_Data_wrangling/1_Social/3_Calculating_indicators/Calculate_household_indices.R') # Creating yearsPost to create a continuous variable of time after baseline HHData <- HHData %>% mutate(yearsPost = ifelse(MonitoringYear=="Baseline", 0, as.integer(substr(MonitoringYear, 1, 1)))) #---- Import look up tables ---- ethnic.lkp <- import("x_Flat_data_files/1_Social/Inputs/master_ethnic_lookup_2017_117.xlsx") education.lkp <- import("x_Flat_data_files/1_Social/Inputs/education_lkp.xlsx") # ---Create functions # Function to remove all white space in string variables trim <- function(x) gsub("^\\s+|\\s+$","",x) # Function to clean string variables (lower case, remove punctuation) str_clean <- function(strings) { require(dplyr) require(tm) strings %>% tolower() %>% removePunctuation(preserve_intra_word_dashes = TRUE) %>% stripWhitespace() %>% trim() } #----Define function # Age age.bin<-c(0,20,30,40,50,60,70,990) # duplicates created because of multiple HH heads in a household (in IndDemos) HH.age <- IndDemos %>% filter(RelationHHH==0) %>% dplyr::select(HouseholdID,IndividualAge) %>% left_join(dplyr::select(HHData,HouseholdID,yearsPost),by="HouseholdID") %>% mutate(IndividualAge=.bincode(IndividualAge-yearsPost,age.bin,TRUE,TRUE)) %>% dplyr::select(HouseholdID,IndividualAge)%>% distinct(HouseholdID,.keep_all = T) # Gender of Household Head (temp fix using distinct) gender.HHH <- IndDemos %>% filter(RelationHHH==0) %>% dplyr::select("HouseholdID","IndividualGender") %>% distinct(HouseholdID,.keep_all = T) # Residency resident.bin<-c(0,10,20,30,40,50,60,990) HH.residency <- HHData %>% dplyr::select(HouseholdID,YrResident,MonitoringYear,yearsPost) %>% mutate(YearsResident=ifelse(MonitoringYear=="Baseline",.bincode(YrResident,resident.bin,TRUE,TRUE), ifelse(YrResident>yearsPost,.bincode(YrResident-yearsPost,resident.bin,TRUE,TRUE), 1))) %>% dplyr::select(HouseholdID,YearsResident) %>% na.omit() # Dominant ethnicity # some duplicates in ethnicity table (NAs), filtering out these here ethnic.lkp1 <- ethnic.lkp %>% distinct(std.eth.str,eth.iso,.keep_all = T) %>% filter(eth.iso!="NA") # filter(!ethnic.id%in%c(2734,2813,5422,5425,5643)) # select out the specific five duplicates HH.eth <- HHData %>% dplyr::select(HouseholdID,PaternalEthnicity, MonitoringYear, SettlementID) %>% mutate(PaternalEthnicity=str_clean(PaternalEthnicity)) %>% left_join(ethnic.lkp1, by=c("PaternalEthnicity"="std.eth.str")) %>% mutate(SettlYear=paste0(MonitoringYear,"_",SettlementID)) # this code gives you the top ethnicity for each settlement at each sampling period max.eth <- HH.eth %>% group_by(SettlYear,eth.iso)%>% dplyr::summarise(freq.eth=n()) %>% top_n(1, freq.eth) HH.eth$dom.eth <- NA # assign dominant ethnicity in a loop will assign a 0 if parentalEthinicity==NA for (i in unique(HH.eth$SettlYear)){ max.eth.dom<- max.eth$eth.iso[max.eth$SettlYear==i] HH.eth$dom.eth[HH.eth$SettlYear==i] <- ifelse(HH.eth$eth.iso[HH.eth$SettlYear==i]%in%max.eth.dom,1,0) } HH.eth <- dplyr::select(HH.eth,HouseholdID,eth.iso,dom.eth) # Education level of household head # some duplicates in education table (NAs, perhaps white spaces), filtering out these here # dupl <- education.lkp$IndividualEducation[duplicated(education.lkp$IndividualEducation)] # education.lkp[education.lkp$IndividualEducation%in%dupl,] education.lkp1 <- education.lkp %>% distinct(IndividualEducation,ed.level,.keep_all = T) %>% filter(ed.level!="NA") # duplicates created because of multiple HH heads in a household (in IndDemos) HH.ed <- IndDemos %>% filter(RelationHHH==0) %>% dplyr::select(HouseholdID,IndividualEducation) %>% left_join(education.lkp1, by=c("IndividualEducation")) %>% dplyr::select(-IndividualEducation) %>% mutate(ed.level=ifelse(is.na(ed.level) | ed.level>=989, NA, as.numeric(ed.level))) %>% distinct(HouseholdID,.keep_all = T) # dupl <- unique(IndDemos$HouseholdID[duplicated(IndDemos$HouseholdID) & IndDemos$RelationHHH==0]) # test <- HH.ed %>% # filter(HouseholdID%in%dupl ) %>% # arrange(HouseholdID) # Children in Household IndDemos$Child <- ifelse(IndDemos$IndividualAge<19,1,0) # create new variable, child/adult N.Child <- IndDemos%>% group_by(HouseholdID) %>% summarise(n.child=sum(Child)) # Market distance #create mean by settlement-year market.mean.sett.yr <- HHData %>% group_by(SettlementID,MonitoringYear)%>% summarise (TimeMean.sett.yr=mean(TimeMarket, trim = 0.9,na.rm = T)) #create mean by settlement market.mean.sett <- HHData %>% group_by(SettlementID)%>% summarise (TimeMean.sett=mean(TimeMarket, trim = 0.9,na.rm = T)) market.distance <- HHData %>% dplyr::select(HouseholdID,TimeMarket,MonitoringYear,SettlementID) %>% left_join(market.mean.sett.yr,by=c("SettlementID" = "SettlementID", "MonitoringYear"="MonitoringYear")) %>% left_join(market.mean.sett,by=c("SettlementID" = "SettlementID")) %>% mutate(TimeMarket=ifelse(is.na(TimeMarket),TimeMean.sett.yr,TimeMarket), TimeMarket=ifelse(is.na(TimeMarket),TimeMean.sett,TimeMarket)) %>% dplyr::select(HouseholdID,TimeMarket) head(market.distance) # market.distance<-subset(HHData,select=c("HouseholdID","TimeMarket", "MonitoringYear","SettlementID")) # market.distance$TimeMarket[market.distance$TimeMarket >=990] <- 990 # market.mean <-market.distance %>% # group_by(SettlementID,MonitoringYear)%>% # summarise (mean=mean(TimeMarket[TimeMarket!=990])) # subsequent rows handle blind codes, and missing data # # market.mean$mean[is.na(market.mean$mean)]<- ave(market.mean$mean, # market.mean$SettlementID, # FUN=function(x)mean(x,na.rm = T))[is.na(market.mean$mean)] # # impute.market <- filter(market.distance,TimeMarket==990) # impute.market <-inner_join(subset(impute.market, select=c("HouseholdID","MonitoringYear", "SettlementID")),market.mean, by=c("MonitoringYear", "SettlementID")) # colnames(impute.market) <-c("HouseholdID","MonitoringYear", "SettlementID", "TimeMarket") # market.distance <-rbind((subset(market.distance, TimeMarket!=990)),impute.market) # # rm(market.mean, impute.market) # Compile match covariate match.covariate <- HHData %>% dplyr::select(HouseholdID, MPAID, SettlementID, MonitoringYear, yearsPost, Treatment) %>% left_join(market.distance[,c("HouseholdID","TimeMarket")],by="HouseholdID") %>% left_join(N.Child,by="HouseholdID") %>% left_join(HH.ed,by="HouseholdID") %>% left_join(HH.eth,by="HouseholdID") %>% left_join(HH.residency,by="HouseholdID") %>% left_join(gender.HHH,by="HouseholdID") %>% left_join(HH.age,by="HouseholdID") #rm(market.distance,N.Child,HH.ed, HH.eth,HH.residency,gender.HHH, HH.age, market.mean.sett,market.mean.sett.yr,max.eth) covariate.means <- match.covariate %>% group_by(SettlementID, MPAID, MonitoringYear) %>% summarise(mean.age=mean(IndividualAge,na.rm=T), mean.year.res=mean(YearsResident,na.rm=T), mean.ed.level=mean(ed.level,na.rm=T), mean.ind.gender=mean(IndividualGender,na.rm=T), mean.time.market=mean(TimeMarket,na.rm=T)) %>% mutate(mean.time.market=ifelse(MPAID==1 & MonitoringYear=="Baseline", mean.time.market[MPAID==1 & MonitoringYear=="2 Year Post"], ifelse(MPAID==2 & MonitoringYear=="Baseline", mean.time.market[MPAID==2 & MonitoringYear=="2 Year Post"], mean.time.market))) match.covariate <- left_join(match.covariate,covariate.means,by=c("SettlementID","MPAID","MonitoringYear")) %>% transmute(HouseholdID=HouseholdID, MPAID=MPAID, SettlementID=SettlementID, MonitoringYear=MonitoringYear, yearsPost=yearsPost, Treatment=Treatment, TimeMarket=ifelse(is.na(TimeMarket), mean.time.market, as.numeric(TimeMarket)), n.child=ifelse(is.na(n.child), 0,as.numeric(n.child)), ed.level=ifelse(is.na(ed.level) | ed.level>=989, mean.ed.level, as.numeric(ed.level)), dom.eth=dom.eth, YearsResident=ifelse(is.na(YearsResident), mean.year.res, as.numeric(YearsResident)), IndividualGender=ifelse(is.na(IndividualGender), mean.ind.gender, IndividualGender), IndividualAge=ifelse(is.na(IndividualAge), mean.age, as.numeric(IndividualAge)))
9c60502e65d6643f0b88df078ecb49818b54f325
5e6636f824327482c44c0f175387c39801fd1e02
/Week 3/Leap year example.r
5d1ae06ca586b48dad0b9c1c53c44ba1a46a73e0
[]
no_license
funshoelias/social_media_data_analytics
05f2ca0b27fa93a896544e8b62e2651d6b2ee37f
972a6f65aa85e49ad3f1f80a1ee44eca28d3e9ec
refs/heads/master
2021-02-16T18:09:53.229809
2018-08-10T20:19:32
2018-08-10T20:19:32
null
0
0
null
null
null
null
UTF-8
R
false
false
277
r
Leap year example.r
readinteger <- function() { n <- readline(prompt="Enter a year: ") if(!grepl("^[0-9]+$",n)) { return(readinteger()) } return(as.integer(n)) } year = readinteger() if (year%%4 == 0){ print("Leap year.") }else{ print("Not a leap year.") }
d860702dfc44bb69432c29d0ba198fe2cf856961
9b1984473184f69312ffcf3b42a7e3f7e27209f1
/cachematrix.R
90c4f494681eebc9ecb268d3f8714e4da823ba49
[]
no_license
mackenziewildman/ProgrammingAssignment2
068c36f9f518de4ccfdfaaacb17971f33fcec8e0
d3e83b8665436bce0591c5291f54fde3948b3ef0
refs/heads/master
2021-01-12T06:18:15.520017
2016-12-25T19:35:02
2016-12-25T19:35:02
77,338,291
0
0
null
2016-12-25T18:39:49
2016-12-25T18:39:49
null
UTF-8
R
false
false
1,715
r
cachematrix.R
## Programming Assignment 2 ## Mackenzie Wildman ## The following functions compute the inverse of a matrix ## by caching the the value of the matrix and its inverse. ## When computing a matrix inverse, the functions first check ## whether that matrix inverse has already been computed, ## and if so, returns the already computed inverse. If the ## matrix inverse is not already cached, then the functions ## compute the inverse and also cache the value. ## The function makeCacheMatrix creates a vector of functions. ## These four functions are named and perform the following: ## 1 $set(M) set the value of the matrix, takes matrix as input ## 2 $get() get the value of the matrix ## 3 $setinverse() set the value of the inverse, takes matrix as ## input ## 4 $getinverse() get the value of the inverse makeCacheMatrix <- function(x = matrix()) { invx <- NULL set <- function(y) { x <<- y invx <<- NULL } get <- function() x setinverse <- function(solve) invx <<- solve getinverse <- function() invx list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## The function cacheSolve returns the inverse of a ## matrix. It first checks to see if the inverse has ## already been computed. If so, it returns the inverse ## from the cache. If not, it computes the matrix ## inverse and stores the value in the cache using the ## setinverse function. ## It requires input argument of the type makeCacheMatrix cacheSolve <- function(x, ...) { m <- x$getinverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinverse(m) m }
8d1c3f0290d7aaae73fb2cb41378f100ef8a3163
fab8ecf98fef30704511173e23ee3d8117c594ff
/tests/testthat/TestGcov/R/TestCompiled.R
bb07d92257f4979392b05ec30b2e21a4d935a37b
[]
no_license
kirillseva/covr
66f9bbcfd8fc56392dca4dd67f6a993db22803fe
47356df2e7713e7bc9b388c4d656c30eea5238a0
refs/heads/master
2020-12-29T03:06:34.177615
2016-03-31T21:03:34
2016-03-31T21:03:34
32,338,648
0
1
null
2016-03-31T21:03:35
2015-03-16T16:38:15
R
UTF-8
R
false
false
86
r
TestCompiled.R
#' @useDynLib TestGcov simple_ simple <- function(x) { .Call(simple_, x) # nolint }
f084afc0b284205f77d638871f635c7d7999f48d
7b77d8b986b3e75c7b4ab0a97dac98d6b185a700
/user-interface.R
57b7dbfd5453304fc9e1410c92250afe3c7d4ffa
[]
no_license
thuynh12/final-hate-crimes
74428d83bbaccb4c4bfa03c6fed25232216d6630
d80d9b817f27c8e0a64712f6809c354c17e13702
refs/heads/master
2020-03-17T15:35:15.486547
2018-05-31T19:46:52
2018-05-31T19:46:52
133,716,241
0
0
null
null
null
null
UTF-8
R
false
false
11,451
r
user-interface.R
library(shiny) library(leaflet) library(geojson) library(geojsonio) source("analysis.R") source("rahma.kamel.R") ### TRY TO KEEP THE CODE NEAT. MAKE SURE YOUR PROGRAM RUNS BEFORE COMMITTING. ### AVOID MAKING OTHER'S CODE BREAK. ui <- tagList( navbarPage( theme = shinythemes::shinytheme("darkly"), title = "Hate Crimes Across the United States", tabPanel( "Home", sidebarLayout( sidebarPanel( selectInput('slider_year', label = "Select A Year", choices = unique(hate.crimes$year)), width = 2 ), mainPanel( h1("Overview"), p("The United States Federal Bureau of Investigation holds hate crimes to the highest priority of the Civil Rights program. The FBI defines hate crimes as criminal offense against a person or property motivated in whole or in part by an offender's bias against a race, religion, disability, sexual orientation, ethnicity, gender, or gender identity. Hate crimes have distructive impact on communities and families, and the preaching of hatred and intolerance can plant terrorism within the country. The FBI also mentions that hate itself is not a crime, and the FBI must be careful to protect freedom of speech and other civil liberties."), p("The data we have worked with covers information on the amount of hate crimes that happen within the years of 1991 to 2014. We see that there is a major difference in the amount of hate crimes that happened to Muslims and the amount that happened to Catholics. We also looked at major events and how those affected the rates of crime towards minority populations. Hate crimes continue to rise in the current political climate as continues research is being done and updated."), h3(textOutput('year_status'), align = 'center'), leafletOutput('overall_map'), strong("Click on a State for exact count of hate crimes."), h3("Resources:"), p(a("FBI's Hate Crime"), href = "https://www.fbi.gov/investigate/civil-rights/hate-crimes") ) ) ), tabPanel( "Mapping Hate Crimes", sidebarLayout( sidebarPanel( h3("Sect Bias and Year"), selectInput( 'select_bias', label = "Select Bias", choices = unique(hate.crimes$bias_motivation) ), selectInput( 'select_year', label = "Select Year", choices = unique(hate.crimes$year) ), width = 2 ), mainPanel( h3("Mapping Hate Crimes In America"), p("The American South has some very intense stereotypes of being more racist and intolerant towards People of Color. This map is to explore the concept and prenotion that Southerners are more racist than the rest of the countries. This mapping shows the distribution of hate crimes by types of bias and year."), h3("Hate Crimes By Bias and Year", align = 'center'), leafletOutput('hate_map'), strong("Click on a State for exact count of hate crimes."), p(""), p("However, you can see that the most hate crimes commited lie outside the South. This may be due to the population and demographic of other states. Some states may have higher populations for different racial groups. In addition, this map does not take account for state population, therefore for California and Texas being the most populous may have higher counts of hate crimes.") ) ) ), tabPanel( "History and Hate Crimes", mainPanel( h1("History and Hate Crimes", align = "center"), p("Analyzing how different historical events have impacted hate crimes and how often they occur has allowed us to draw trend lines and patterns over the years. Below we have chosen to analyze trends of hate crimes on Muslims before and after 9/11, hate crimes on LGBTQ+ overtime specifically analyzing 2000 when same sex marriage was passed in Vermont, making it the first state to do so, and finally the correlation of hate crimes against white and black people overtime"), plotOutput("plot_9_11"), p(""), p("The above visualization documents the developement of Anti-Muslim hate crimes over the years. The blue bar represents 2001 which is the year that 9/11 occured. Notably, after 2001 the count of crimes against Muslims increased significantly. This is because people connected an extremist claiming to follow religion to justify his violence when in reality Islam is a very peaceful religion. The data clearly shows a constant increase and trend line forming after 2001."), plotOutput("LGBT"), p(""), p("Hate crimes against the LGBTQ+ community have always been constant. Depending on the year and the political climate crimes will fluctuate averaging around 400 cases a year. The blue bar represents 2000, which is the year that Vermont, was the first state to legalize same sex marriage. The count for that year is notably less than the other years. This could have something to do with the legalization of same sex marriage or it can be an unrelated trend. This data very effectively visualizes the hardships that the LGBTQ+ community has had to go through and creates a pattern that we can work to avoid."), plotOutput("black_white"), p(""), p("Looking at the visualizations, anti-White hate crimes vary and are at times higher than that of anti-Black hate crimes. It is important to note the population accountability. The sample of the White population includes many groups that were marginalized historically in the United States. For instance, many Jewish, Italians and Greeks are taken into account as White. Another note to make is that many anti-Black hate crimes are more frequently underreported or are not accounted for in general because of the societal discrimination structures. Moreover, from the years 1991-2014 anti-Black hate crimes are clearly high. This is a crucial point that is being made through this analysis. Ant-Black hate crimes are significantly higher and this is due to many historical and current events that happen day to day in our contemporary society.") ) ), tabPanel( "Religious Hate Crime", h3("Catholic Hate Crimes", align = 'center'), plotOutput('plot_catholic'), h3("Muslim Hate Crimes", align = 'center'), plotOutput('plot_muslim'), h3("Comparing Religious Hate Crimes", align = 'center'), p(" Viewing the Anti-Islamic (Muslem) histogram and the Anti-Catholic histogram, we see that the level of Anti-Islamic hate crimes is skyrocketting much higher than those of the Anti-Catholic hate crimes. The Muslim hate crimes on average are in the hundreds wheraas those of the Catholics are below one hundred on average. Going into the Anti-Islamic trend, we see that it hits an ultimate high right after 2000. This marks an important event of 9/11 that were associated to terrorism acts in the United States. Many people generalized and associated violent people with a violent religion. Hate crimes towards Muslims increased after this because fear that plagued America during this time. Until now we see that there is a higher level of hate crimes towards muslims after this event. Prior to the 9/11 attacks, there was not as many. Another important note in the differences of hate crimes could be due to the fact that many Muslims are more distinguishable than people of other religions (with exceptions). Some Muslim women wear the head scarf or hijab that covers their hair which makes them stand out more and can be an easy target for people to unjustly associate them with the terrorism attacks that happen all over the world. Being different has always created a fear in people. In this society, it so happens to be Muslims. The American population comprises of a greater percentage of people from the sects of Christianity than those of Muslims. With the Catholic hate crimes we see that there is a pretty constant trend. They began to increase more or less in 2005. This could be due to religious views changing and moving towards a more liberal society that does not put as much value on religious beliefs. The value of religiousity has changed over time.") ), tabPanel( "General Data Table of Hate Crimes Against Select Minorities", sidebarLayout( sidebarPanel( h3("Selection"), selectInput( 'm.year', label = "Select Year", choices = unique(hate.crimes$year) ), width = 2 ), mainPanel( h3("Hate Crimes Against Selected Minorities"), p("A hate crimes is defined as a crime against an individual based on their background or chracterstics which make that person diverse from the majority. In the United States many are targeted based on such aspects which explains one main focus of our data which is bias motivation. The data table displays diverse groups (based on bias motivation) and the number of people within those groups who have been victimized by prejudice in the United States. The data was gathered from 1991-2014 and focuses on those who are Anti-Lesbian, Gay, Bisexual, or Transgender, Mixed Group (LGBT), Anti-Catholic, Anti-Male Homosexual (Gay), Anti-Female Homosexual (Lesbian), Anti-Islamic (Muslem), Anti-Black or African American. The plot displays a visual of the hate crimes throughout our chosen time period and it clearly shows that Ant-Black or African American bias remains the highest bias motivation throughout every year from 1991-2014. The high numbers could be explained by the median attention given to the group. Regardless of Whether the attention reflects positively or negatively on the group, those who are Anti-African American will react negatively. Overall, the data reveals consistently high numbers of opression towards African Americans and the other groups also hold consistent numbers of crimes against them throughout the years"), tableOutput('minority_table'), strong("This is a table summarizing counts of hate crimes commited during a specific year. You can select the year with the drop down menu on the left."), p(""), plotOutput('sum_plot'), p("This graph shows the overall hate crime distribution from 1991 to 2014"), p(""), h3("Resources:"), p(a("History of Hate Crime Tracking in the US"), href = "https://www.cnn.com/2017/01/05/health/hate-crimes-tracking-history-fbi/index.html") ) ) ) ) )
9a153cecdecf79a12b8fbafddcc4a8d40d3c7d52
3055b7865427f5689d3e68907be7960647ae71b6
/R/aggregate_module_summary_plots.R
cc6d54cea455a7fa9fa1987ffe54c7260e264eba
[]
no_license
wondersandy/AMPAD
37d6a6a7ee97412196be0847f89b525f8fc800b1
2803a72e31673f18560ac091d0928fcb3dd75380
refs/heads/master
2022-04-11T01:06:05.791277
2020-01-12T19:13:28
2020-01-12T19:13:28
null
0
0
null
null
null
null
UTF-8
R
false
false
6,141
r
aggregate_module_summary_plots.R
aggregate_module_summary_plots = function(outputFile=FALSE){ foo <- synapser::synTableQuery("select * from syn11932957")$asDataFrame() foo2 <- dplyr::select(foo,GeneID,Module) foo2$Presence <- 1 foo3 <- tidyr::pivot_wider(foo2, id_cols = "GeneID", names_from = "Module", values_from = "Presence") foo3[is.na(foo3)] <- 0 foo3 <- data.frame(foo3,stringsAsFactors=F) foo4 <- dplyr::select(foo3,GeneID,TCXblue,IFGyellow,PHGyellow) resu <- list() if(outputFile){ tiff(filename = 'consensusClusterA.tiff', height = 4, width = 6,units='in',pointsize=14,res=300) UpSetR::upset(foo4,nintersects = NA,show.numbers=F) dev.off() }else{ resu$A<-UpSetR::upset(foo4,nintersects = NA,show.numbers=F) } nUniqueGenesA <- data.frame(Module=c('TCXblue','PHGyellow','IFGyellow'),nGenes=c(979,366,127),stringsAsFactors=F) foo4 <- dplyr::select(foo3,GeneID,DLPFCblue,CBEturquoise,STGblue,PHGturquoise,IFGturquoise,TCXturquoise,FPturquoise) if(outputFile){ tiff(filename = 'consensusClusterB.tiff', height = 4, width = 6,units='in',pointsize=14,res=300) UpSetR::upset(foo4,nsets=7,nintersects = NA,point.size=1,show.numbers = F) dev.off() } else{ resu$B<-UpSetR::upset(foo4,nsets=7,nintersects = NA,point.size=1,show.numbers = F) } nUniqueGenesB <- data.frame(Module=c('CBEturquoise','DLPFCblue','IFGturquoise','PHGturquoise','STGblue','TCXturquoise','FPturquoise'),nGenes=c(593,349,275,209,163,69,40),stringsAsFactors=F) foo4 <- dplyr::select(foo3,GeneID,IFGbrown,STGbrown,DLPFCyellow,TCXgreen,FPyellow,CBEyellow,PHGbrown) if(outputFile){ tiff(filename = 'consensusClusterC.tiff', height = 4, width = 6,units='in',pointsize=14,res=300) UpSetR::upset(foo4,nsets=7,nintersects = NA,point.size=1,show.numbers = F) dev.off() }else{ resu$C<-UpSetR::upset(foo4,nsets=7,nintersects = NA,point.size=1,show.numbers = F) } nUniqueGenesC <- data.frame(Module=c('IFGbrown','FPyellow','STGbrown','DLPFCyellow','TCXgreen','PHGbrown','CBEyellow'),nGenes=c(966,641,233,178,141,139,28),stringsAsFactors=F) foo4 <- dplyr::select(foo3,GeneID,DLPFCbrown,STGyellow,PHGgreen,CBEbrown,TCXyellow,IFGblue,FPblue) if(outputFile){ tiff(filename = 'consensusClusterD.tiff', height = 4, width = 6,units='in',pointsize=14,res=300) UpSetR::upset(foo4,nsets=7,nintersects = NA,point.size=1,show.numbers = F) dev.off() }else{ resu$D<-UpSetR::upset(foo4,nsets=7,nintersects = NA,point.size=1,show.numbers = F) } nUniqueGenesD <- data.frame(Module=c('IFGblue','TCXyellow','FPblue','STGyellow','PHGgreen','DLPFCbrown','CBEbrown'),nGenes=c(1148,673,627,344,122,103,56),stringsAsFactors=F) foo4 <- dplyr::select(foo3,GeneID,FPbrown,CBEblue,DLPFCturquoise,TCXbrown,STGturquoise,PHGblue) if(outputFile){ tiff(filename = 'consensusClusterE.tiff', height = 4, width = 6,units='in',pointsize=14,res=300) UpSetR::upset(foo4,nsets=6,nintersects = NA,point.size=1,show.numbers = F) dev.off() } else{ resu$E<-UpSetR::upset(foo4,nsets=6,nintersects = NA,point.size=1,show.numbers = F) } nUniqueGenesE <- data.frame(Module=c('CBEblue','PHGblue','DLPFCturquoise','STGturquoise','TCXbrown','FPbrown'),nGenes=c(1862,951,447,423,358,201),stringsAsFactors=F) nUniqueGenes <- rbind(nUniqueGenesA, nUniqueGenesB, nUniqueGenesC, nUniqueGenesD, nUniqueGenesE) library(dplyr) modSize <- dplyr::group_by(foo2,Module) %>% dplyr::summarise(mSize=sum(Presence)) sumMat1 <- dplyr::left_join(modSize,nUniqueGenes) sumMat1$percentUnique <- sumMat1$nGenes/sumMat1$mSize customDf <- data.frame(moduleName=c('TCXblue', 'IFGyellow', 'PHGyellow', 'DLPFCblue', 'CBEturquoise', 'STGblue', 'PHGturquoise', 'IFGturquoise', 'TCXturquoise', 'FPturquoise', 'IFGbrown', 'STGbrown', 'DLPFCyellow', 'TCXgreen', 'FPyellow', 'CBEyellow', 'PHGbrown', 'DLPFCbrown', 'STGyellow', 'PHGgreen', 'CBEbrown', 'TCXyellow', 'IFGblue', 'FPblue', 'FPbrown', 'CBEblue', 'DLPFCturquoise', 'TCXbrown', 'STGturquoise', 'PHGblue'), Cluster= c(rep('Consensus Cluster A',3), rep('Consensus Cluster B',7), rep('Consensus Cluster C',7), rep('Consensus Cluster D',7), rep('Consensus Cluster E',6)), stringsAsFactors=F) sumMat1 <- dplyr::left_join(sumMat1,customDf,by=c('Module'='moduleName')) # cat('% overlap for Consensus Clusters A-C') # print(summary(lm(percentUnique ~ 1,dplyr::filter(sumMat1,Cluster=="Consensus Cluster A" | Cluster=="Consensus Cluster B" | Cluster=="Consensus Cluster C")))) # # cat('% overlap for Consensus Clusters D & E\n') # print(summary(lm(percentUnique ~ 1,dplyr::filter(sumMat1,Cluster=="Consensus Cluster D" | Cluster=="Consensus Cluster E")))) return(resu) }
fc7a5b800348b39510e68541f29de2f20853a3a2
97bcc8873287e1918725271f5bfc28946cc30fd2
/Model/hydrological/HydrologicalModel/CalculateGwHeads.r
5a3d7c15158612665cc2cd013547489f2cb8cb9f
[]
no_license
HYDFKI7/integrated-mk
536a678149f88ea03f81ac3be4955daabe8fb9a6
95ade4863a50a2122cbf54cfaea244aecd94b141
refs/heads/master
2021-06-01T12:41:36.898760
2016-09-05T04:57:22
2016-09-05T04:57:22
null
0
0
null
null
null
null
UTF-8
R
false
false
315
r
CalculateGwHeads.r
CalculateGwHeads = function(Gstorage, gwFitParam) { applyFit = function(fitParam, G) { Glevel = apply(fitParam, 2, function(x) {G*x["scale"] + x["intercept"]}) return(Glevel) } Glevel = mapply(applyFit, gwFitParam, unlist(apply(Gstorage, 2, list), recursive = FALSE)) return(Glevel) }
ef193751e1124c631600fc0058c7b594ba19881a
d4d160c8f13a839e1dcc21ee78310371eec607e8
/cachematrix.R
0fc651f4f734e0c66402702080f0eac56ed2f4d0
[]
no_license
rusek01/ProgrammingAssignment2
bc281f9114e31e270cd6f0c9c058b40b49747155
6cb34fad2f40689f5482d96db1033b4baec90c38
refs/heads/master
2021-01-22T14:05:33.096698
2015-07-26T21:37:26
2015-07-26T21:37:26
39,512,196
0
0
null
2015-07-22T14:53:45
2015-07-22T14:53:45
null
UTF-8
R
false
false
1,225
r
cachematrix.R
## Following functions can be used to offload demanding task of calculating ## matrix inversion ## function below creates list object with four methods which can be used ## to store matrix and its inversion. storing matrix clears inversion, it ## also provides mean of reading stored values makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(inv) m <<- inv getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## this function checks if matrix has its inversion already, if so it returns ## it, if now it reads original matrix and sets its inversion cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinv() ## becouse of how makeCacheMatrix works, if !is.null(m) is true we ## know that original matrix didn't change, becouse if we used $set method from makeCacheMatrix ## and thats only way to change original matrix, we set value of m to null if(!is.null(m)) { message("getting cached data") return(m) } d <- x$get() ## code to get matrix m <- solve(d) ## code to invert matrix x$setinv(m) m }
571570202d0d8586fb5de0bcd8f5820916de032c
7fcd66e557198b4b96ea9a964a89bf19efbde910
/R_shiny/ui.R
0ed72a10075a806aec50c955f456e8f0333df0f8
[]
no_license
QimingShi/R
e34877c0ceb34c90773d19cbe873bc817ae9c6d9
e71df6269bd09043667df6130039cef6b59fe118
refs/heads/master
2020-03-21T03:52:16.585494
2018-06-20T19:46:03
2018-06-20T19:46:03
138,078,373
0
0
null
null
null
null
UTF-8
R
false
false
981
r
ui.R
library(shiny) library(leaflet) library(RColorBrewer) library(raster) library(shapefiles) library(rgdal) library(xtermStyle) library(rgdal) library(lattice) library(Cairo) dat = readLines("Data/icd_format.txt", encoding = "UTF-8") vars <- c("Household Income"="Income", "Patient Ratio"="Ratio", "Population"="POP2010", "Median Age"="Median_Age", dat ) vars1 <- c( dat ) ui <- bootstrapPage( tags$style(type = "text/css", "html, body {width:100%;height:100%}"), leafletOutput("map", width = "100%", height = "100%"), absolutePanel(fixed = TRUE, raggable = TRUE,top = 10, right = 10,bottom = "auto", selectInput("color", "Color(X)", vars), selectInput("size", "Circle Size(Y)", vars1), # uiOutput("sliderIn"), checkboxInput("legend", "Show legend", TRUE), plotOutput('plots',height = 250,brush="plot_brush"), textOutput("text_r") ) )
15ab103dd209c0430d4da6aa09b5c10297acd66c
effe14a2cd10c729731f08b501fdb9ff0b065791
/cran/paws.mobile/man/devicefarm_get_device_pool_compatibility.Rd
24c082443cc71c99e6fb574f8ccc9aeae1b6850d
[ "Apache-2.0" ]
permissive
peoplecure/paws
8fccc08d40093bb25e2fdf66dd5e38820f6d335a
89f044704ef832a85a71249ce008f01821b1cf88
refs/heads/master
2020-06-02T16:00:40.294628
2019-06-08T23:00:39
2019-06-08T23:00:39
null
0
0
null
null
null
null
UTF-8
R
false
true
4,003
rd
devicefarm_get_device_pool_compatibility.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/devicefarm_operations.R \name{devicefarm_get_device_pool_compatibility} \alias{devicefarm_get_device_pool_compatibility} \title{Gets information about compatibility with a device pool} \usage{ devicefarm_get_device_pool_compatibility(devicePoolArn, appArn, testType, test, configuration) } \arguments{ \item{devicePoolArn}{[required] The device pool's ARN.} \item{appArn}{The ARN of the app that is associated with the specified device pool.} \item{testType}{The test type for the specified device pool. Allowed values include the following: \itemize{ \item BUILTIN\_FUZZ: The built-in fuzz type. \item BUILTIN\_EXPLORER: For Android, an app explorer that will traverse an Android app, interacting with it and capturing screenshots at the same time. \item APPIUM\_JAVA\_JUNIT: The Appium Java JUnit type. \item APPIUM\_JAVA\_TESTNG: The Appium Java TestNG type. \item APPIUM\_PYTHON: The Appium Python type. \item APPIUM\_NODE: The Appium Node.js type. \item APPIUM\_RUBY: The Appium Ruby type. \item APPIUM\_WEB\_JAVA\_JUNIT: The Appium Java JUnit type for web apps. \item APPIUM\_WEB\_JAVA\_TESTNG: The Appium Java TestNG type for web apps. \item APPIUM\_WEB\_PYTHON: The Appium Python type for web apps. \item APPIUM\_WEB\_NODE: The Appium Node.js type for web apps. \item APPIUM\_WEB\_RUBY: The Appium Ruby type for web apps. \item CALABASH: The Calabash type. \item INSTRUMENTATION: The Instrumentation type. \item UIAUTOMATION: The uiautomation type. \item UIAUTOMATOR: The uiautomator type. \item XCTEST: The XCode test type. \item XCTEST\_UI: The XCode UI test type. }} \item{test}{Information about the uploaded test to be run against the device pool.} \item{configuration}{An object containing information about the settings for a run.} } \description{ Gets information about compatibility with a device pool. } \section{Request syntax}{ \preformatted{svc$get_device_pool_compatibility( devicePoolArn = "string", appArn = "string", testType = "BUILTIN_FUZZ"|"BUILTIN_EXPLORER"|"WEB_PERFORMANCE_PROFILE"|"APPIUM_JAVA_JUNIT"|"APPIUM_JAVA_TESTNG"|"APPIUM_PYTHON"|"APPIUM_NODE"|"APPIUM_RUBY"|"APPIUM_WEB_JAVA_JUNIT"|"APPIUM_WEB_JAVA_TESTNG"|"APPIUM_WEB_PYTHON"|"APPIUM_WEB_NODE"|"APPIUM_WEB_RUBY"|"CALABASH"|"INSTRUMENTATION"|"UIAUTOMATION"|"UIAUTOMATOR"|"XCTEST"|"XCTEST_UI"|"REMOTE_ACCESS_RECORD"|"REMOTE_ACCESS_REPLAY", test = list( type = "BUILTIN_FUZZ"|"BUILTIN_EXPLORER"|"WEB_PERFORMANCE_PROFILE"|"APPIUM_JAVA_JUNIT"|"APPIUM_JAVA_TESTNG"|"APPIUM_PYTHON"|"APPIUM_NODE"|"APPIUM_RUBY"|"APPIUM_WEB_JAVA_JUNIT"|"APPIUM_WEB_JAVA_TESTNG"|"APPIUM_WEB_PYTHON"|"APPIUM_WEB_NODE"|"APPIUM_WEB_RUBY"|"CALABASH"|"INSTRUMENTATION"|"UIAUTOMATION"|"UIAUTOMATOR"|"XCTEST"|"XCTEST_UI"|"REMOTE_ACCESS_RECORD"|"REMOTE_ACCESS_REPLAY", testPackageArn = "string", testSpecArn = "string", filter = "string", parameters = list( "string" ) ), configuration = list( extraDataPackageArn = "string", networkProfileArn = "string", locale = "string", location = list( latitude = 123.0, longitude = 123.0 ), vpceConfigurationArns = list( "string" ), customerArtifactPaths = list( iosPaths = list( "string" ), androidPaths = list( "string" ), deviceHostPaths = list( "string" ) ), radios = list( wifi = TRUE|FALSE, bluetooth = TRUE|FALSE, nfc = TRUE|FALSE, gps = TRUE|FALSE ), auxiliaryApps = list( "string" ), billingMethod = "METERED"|"UNMETERED" ) ) } } \examples{ # The following example returns information about the compatibility of a # specific device pool, given its ARN. \donttest{svc$get_device_pool_compatibility( appArn = "arn:aws:devicefarm:us-west-2::app:123-456-EXAMPLE-GUID", devicePoolArn = "arn:aws:devicefarm:us-west-2::devicepool:123-456-EXAMPLE-GUID", testType = "APPIUM_PYTHON" )} } \keyword{internal}
11ed0c1f97d5832fa5cd31af7097dbfae0f136dd
abacca46954a0259b1530d254d62609d084a9a50
/pkg/R/ffdfdply.R
4f890835cec30c400f547e82b23d8f4d370ec693
[]
no_license
nalimilan/ffbase
4afb965d370425dd50d7adaae1848b0d4081ba97
e6a1f2be391e69d4290b7b8f06a9c527ce8af445
refs/heads/master
2021-01-18T06:09:14.035055
2013-11-11T16:09:31
2013-11-11T16:50:18
null
0
0
null
null
null
null
UTF-8
R
false
false
3,616
r
ffdfdply.R
#' Performs a split-apply-combine on an ffdf #' #' Performs a split-apply-combine on an ffdf. #' Splits the x ffdf according to split and applies FUN to the data, stores the result of the FUN in an ffdf.\cr #' Remark that this function does not actually split the data. In order to reduce the number of times data is put into RAM for situations with a lot #' of split levels, the function extracts groups of split elements which can be put into RAM according to BATCHBYTES. Please make sure your FUN covers the #' fact that several split elements can be in one chunk of data on which FUN is applied.\cr #' Mark also that NA's in the split are not considered as a split on which the FUN will be applied. #' #' @example ../examples/ffdfplyr.R #' @param x an ffdf #' @param split an ff vector which is part of the ffdf x #' @param FUN the function to apply to each split. This function needs to return a data.frame #' @param BATCHBYTES integer scalar limiting the number of bytes to be processed in one chunk #' @param RECORDBYTES optional integer scalar representing the bytes needed to process one row of x #' @param trace logical indicating to show on which split the function is computing #' @param ... other parameters passed on to FUN #' @return #' an ffdf #' @export #' @seealso \code{\link{grouprunningcumsum}, \link{table}} ffdfdply <- function( x, split, FUN, BATCHBYTES = getOption("ffbatchbytes"), RECORDBYTES = sum(.rambytes[vmode(x)]), trace=TRUE, ...){ force(split) splitvmode <- vmode(split) if(splitvmode != "integer"){ stop("split needs to be an ff factor or an integer") } splitisfactor <- is.factor.ff(split) ## Detect how it is best to split the ffdf according to the split value -> more than MAXSIZE = BATCHBYTES / RECORDBYTES splitbytable <- table.ff(split, useNA="no") splitbytable <- splitbytable[order(splitbytable, decreasing=TRUE)] if(max(splitbytable) > MAXSIZE){ warning("single split does not fit into BATCHBYTES") } tmpsplit <- grouprunningcumsum(x=as.integer(splitbytable), max=MAXSIZE) nrsplits <- max(tmpsplit) ## Loop over the split groups and apply the function allresults <- NULL for(idx in 1:nrsplits){ tmp <- names(splitbytable)[tmpsplit == idx] if(!splitisfactor){ if(!is.null(ramclass(split)) && ramclass(split) == "Date"){ tmp <- as.Date(tmp) }else{ tmp <- as.integer(tmp) } } if(trace){ message(sprintf("%s, working on split %s/%s", Sys.time(), idx, nrsplits)) } ## Filter the ffdf based on the splitby group and apply the function if(splitisfactor){ fltr <- split %in% ff(factor(tmp, levels = names(splitbytable))) }else{ if(!is.null(ramclass(split)) && ramclass(split) == "Date"){ fltr <- split %in% ff(tmp, vmode = "integer", ramclass = "Date") }else{ fltr <- split %in% ff(tmp, vmode = "integer") } } if(trace){ message(sprintf("%s, extracting data in RAM of %s split elements, totalling, %s GB, while max specified data specified using BATCHBYTES is %s GB", Sys.time(), length(tmp), round(RECORDBYTES * sum(fltr) / 2^30, 5), round(BATCHBYTES / 2^30, 5))) } inram <- ffdfget_columnwise(x, fltr) result <- FUN(inram, ...) if(!inherits(result, "data.frame")){ stop("FUN needs to return a data frame") } rownames(result) <- NULL if(!is.null(allresults) & nrow(result) > 0){ rownames(result) <- (nrow(allresults)+1):(nrow(allresults)+nrow(result)) } ## Push the result to an ffdf if(nrow(result) > 0){ allresults <- ffdfappend(x=allresults, dat=result, recode=FALSE) } } allresults }
621d835c1226cb08c9c7357c281bfc7714cef74a
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/Surrogate/examples/FixedBinContIT.Rd.R
05437ffed667e7e832592f8daaabff54eea4aa9c
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,386
r
FixedBinContIT.Rd.R
library(Surrogate) ### Name: FixedBinContIT ### Title: Fits (univariate) fixed-effect models to assess surrogacy in the ### case where the true endpoint is binary and the surrogate endpoint is ### continuous (based on the Information-Theoretic framework) ### Aliases: FixedBinContIT ### Keywords: plot Information-Theoretic BinCont Multiple-trial setting ### Information-theoretic framework Trial-level surrogacy ### Individual-level surrogacy Likelihood Reduction Factor (LRF) ### Fixed-effect models Binary endpoint Continuous endpoint ### ** Examples ## Not run: ##D # Time consuming (>5sec) code part ##D # Generate data with continuous Surr and True ##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, ##D R.Indiv.Target=.8, Seed=123, Model="Full") ##D ##D # Make T binary ##D Data.Observed.MTS$True_Bin <- Data.Observed.MTS$True ##D Data.Observed.MTS$True_Bin[Data.Observed.MTS$True>=0] <- 1 ##D Data.Observed.MTS$True_Bin[Data.Observed.MTS$True<0] <- 0 ##D ##D # Analyze data ##D Fit <- FixedBinContIT(Dataset = Data.Observed.MTS, Surr = Surr, ##D True = True_Bin, Treat = Treat, Trial.ID = Trial.ID, Pat.ID = Pat.ID, ##D Model = "Full", Number.Bootstraps=50) ##D ##D # Examine results ##D summary(Fit) ##D plot(Fit, Trial.Level = FALSE, Indiv.Level.By.Trial=TRUE) ##D plot(Fit, Trial.Level = TRUE, Indiv.Level.By.Trial=FALSE) ## End(Not run)
51b817fdb899a3c55ccf820cb84ca9b823146cb3
457fce4c5c67741ee54126217e39721faa771b94
/SGL_R_modified/SGL/R/additional.R
03635d2c2c25e78627f345470ac6426773acec31
[]
no_license
ababii/sparse_group_lasso_matlab
b49f360ec6d9c71784a98b6adf8e0a8ba47f9e44
ddeda2f393f0f937b3bd9cc079f7cb3d1f3043cc
refs/heads/master
2022-02-13T09:20:05.211775
2019-07-29T11:52:48
2019-07-29T11:52:48
null
0
0
null
null
null
null
UTF-8
R
false
false
2,238
r
additional.R
# SGL predict function internal: cvSGL.predict.internal <- function(obj, newx, s="lambda.1se"){ l<-getmin(obj$lambdas,obj$lldiff,obj$llSD) if(s=="lambda.1se"){lam <- l$lambda.1se} else {lam <- l$lambda.min} idx <- which(obj$lambdas==lam) fit <- obj$fit X <- newx if (!is.null(obj$X.transform)) { if (is.matrix(X)) { X <- t(t(newX) - obj$X.transform$X.means) X <- t(t(X)/obj$X.transform$X.scale) } if (is.vector(X)) { X <- X - obj$X.transform$X.means X <- X/obj$X.transform$X.scale } } intercept <- fit$intercept if(is.null(intercept)) {intercept <- 0} if (is.matrix(X)) { eta <- X %*% fit$beta[, idx] + intercept } if (is.vector(X)) { eta <- sum(X * fit$beta[, idx]) + intercept } y.pred <- eta return(y.pred) } # cvSGL saves transformations... cvSGL.internal <- function(data, index = rep(1, ncol(data$x)), type = "linear", maxit = 1000, thresh = 0.001, min.frac = 0.05, nlam = 20, gamma = 0.8, nfold = 10, standardize = TRUE, verbose = FALSE, step = 1, reset = 10, alpha = 0.95, lambdas = NULL,block.cv = FALSE) { X.transform <- NULL if (standardize == TRUE) { X <- data$x means <- apply(X, 2, mean) X <- t(t(X) - means) var <- apply(X, 2, function(x) (sqrt(sum(x^2)))) X <- t(t(X)/var) data$x <- X X.transform <- list(X.scale = var, X.means = means) } if (type == "linear") { if (standardize == TRUE) { intercept <- mean(data$y) data$y <- data$y - intercept } Sol <- linCrossVal(data, index, nfold = nfold, maxit = maxit, thresh = thresh, min.frac = min.frac, nlam = nlam, lambdas = lambdas, gamma = gamma, verbose = verbose, step = step, reset = reset, alpha = alpha, block.cv = block.cv) if (standardize == TRUE) { Sol$fit = list(beta = Sol$fit$beta, lambdas = Sol$fit$lambdas, intercept = intercept, step = step) } } Sol = list(fit = Sol$fit, lldiff = Sol$lldiff, lambdas = Sol$lambdas, type = type, llSD = Sol$llSD, X.transform = X.transform) class(Sol) = "cv.SGL" return(Sol) }
b4c784a7972e9fd40d80c91bcf37ee267aab2d48
4680f495ab20b619ddf824584939a1e0356a0ed3
/scripts/solution/create_decks.R
014b8ae8542868d8aac4b23a91b45d864e6aa47c
[]
no_license
Laurigit/flAImme
7ca1de5e4dd82177653872f50e90e58aed5968f7
9d4b0381d4eedc928d88d0774c0376ba9341774b
refs/heads/master
2023-05-24T17:06:58.416499
2023-04-28T08:10:30
2023-04-28T08:10:30
251,082,000
0
0
null
null
null
null
UTF-8
R
false
false
1,360
r
create_decks.R
#create decks #cyclers <- c(1,2,3,4,5,6) #required_data(c("STG_DECK", "ADM_CYCLER_DECK")) create_decks <- function(cyclers, ADM_CYCLER_DECK, extra_exhaust = NULL, breakaway_data = NULL) { res <- ADM_CYCLER_DECK[CYCLER_ID %in% cyclers, .(CYCLER_ID, CARD_ID, Count, Zone = "Deck", MOVEMENT)] if (!is.null(extra_exhaust)) { if (sum(extra_exhaust) > 0) { cyc_exh <- data.table(CYCLER_ID = cyclers, CARD_ID = 1, Count = extra_exhaust, Zone = "Deck", MOVEMENT = 2)[Count > 0] res <- rbind(res, cyc_exh) } } if (!is.null(breakaway_data)) { #CYCLER_ID, MOVEMENT, bid_count aggr_first <- breakaway_data[, .(bid_count = .N), by = .(MOVEMENT, CYCLER_ID)] join_to_res <- aggr_first[res, on = .(CYCLER_ID, MOVEMENT)] join_to_res[!is.na(bid_count), Count := Count - bid_count] #add exhaust ba_cyclers <- aggr_first[, CYCLER_ID] cyc_exh <- data.table(CYCLER_ID = ba_cyclers, CARD_ID = 1, Count = extra_exhaust, Zone = "Deck", MOVEMENT = 2)[Count > 0] join_to_res[, bid_count := NULL] res_temp <- rbind(join_to_res, cyc_exh) #aggregate over multiple exhaust rows res <- res_temp[, .(Count = sum(Count)), by = .(CYCLER_ID, CARD_ID, Zone, MOVEMENT)] } spread <- res[rep(1:.N,Count)][,index:=1:.N,by=CARD_ID][, index := NULL][, Count := NULL] spread[, row_id := seq_len(.N)] return(spread) }
7d9bc84b833caf606862b425e931cb5bc252f30f
e80f2a5a0e13370e52cc97fe42f5c9edcc8eead5
/man/simple_wmap.Rd
593dbc6acbed4216d00f7ccb097e5f87c11cede6
[]
no_license
marlonecobos/rangemap
80cd91c6847338763f793ad7ac66f6fc3a1210eb
1edfc01612a120de25f92cf651e9ca64a4f8535a
refs/heads/master
2022-05-21T11:48:08.929230
2022-04-14T17:51:43
2022-04-14T17:51:43
133,424,345
17
11
null
2020-09-15T03:58:57
2018-05-14T21:36:23
R
UTF-8
R
false
true
720
rd
simple_wmap.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helpers.R \name{simple_wmap} \alias{simple_wmap} \title{Get a simplified SpatialPolygonsDataFrame of the world} \usage{ simple_wmap(which = "simplest", regions = ".") } \arguments{ \item{which}{(character) name of type of SpatialPolygons to be obtained. Options are: "simple" and "simplest"; default = "simplest".} \item{regions}{(character) name of the country (or region if \code{which} = "simple") for which to obtain the SpatialPolygonsDataFrame.} } \value{ A simplified SpatialPolygonsDataFrame of the world in WGS84 projection. } \description{ Get a simplified SpatialPolygonsDataFrame of the world } \examples{ map <- simple_wmap() }
612792e77272f12d65976896161ed344b1da37dd
3f51f4c1b7d5a881289327df655a6852b9198838
/man/tsview.Rd
c36538bf85da68c142ae769a77ba7501938ea604
[]
no_license
mdijkstracpb/tsview
daf37077991271bcfe14ef6f594382dfa805f738
5f4760c8c3b599614d35cd1ad26ade21ad02a7bd
refs/heads/master
2021-01-11T16:35:06.229076
2017-02-27T09:06:08
2017-02-27T09:06:08
79,892,437
0
0
null
2017-01-24T08:44:43
2017-01-24T08:24:50
null
UTF-8
R
false
true
930
rd
tsview.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tsview-tsview.R \name{tsview} \alias{tsview} \title{Plotting Time Series Objects in Browser} \usage{ tsview(x = ts(matrix(rnorm(150), 30, 5), start = c(1961, 1), frequency = 4), plot.type = "multiple") } \arguments{ \item{x}{time series object, usually inheriting from class "\code{ts}" or "\code{regts}".} \item{plot.type}{for multivariate time series. \code{multiple} displays each series separately (with a common time axis), \code{single} displays all series in the same plot.} } \description{ Method for conveniently viewing objects inheriting from classes "\code{ts}", "\code{regts}" in your favorite web browser. } \examples{ x = ts(matrix(rnorm(150), 30, 5), start = c(1961, 1), frequency = 4) # 5 time series \dontrun{ tsview(x, "single") tsview(x, "multiple") } } \seealso{ \code{\link{tsplot}, \link{ts}, \link{regts}, \link{grepl}} }
897318d871dbfa7b9aebe22a8f1c4796c8687dc7
f208136b3e095cc0abaf5a00057b93fc15088bb8
/R/download_.R
7ed92c4feb994a6c01fbce3dc5fe67ea489c3788
[ "MIT" ]
permissive
AtlasOfLivingAustralia/SoE2021
fae6323bcc6290420015d151ab28a76838726365
4b5d8c24789c35a77866f5874c639da0c0796d61
refs/heads/main
2023-04-23T04:31:21.995125
2021-05-10T06:28:00
2021-05-10T06:28:00
339,227,244
0
0
MIT
2021-05-03T05:15:47
2021-02-15T22:43:10
R
UTF-8
R
false
false
623
r
download_.R
download_data <- function(data) { downloadHandler( filename = function() { paste("soe_data", ".csv", sep = "") }, content = function(file) { write.csv(data, file, row.names = FALSE) } ) } download_plot <- function(type) { if (type == "i_map") { downloadHandler( filename = paste0(type, "_plot.png"), content = function(file) { mapshot(df$plot_i_map, file = "i_map_plot.png") } ) } else { downloadHandler( filename = paste0(type, "_plot.png"), content = function(file) { ggsave(file, width = 20, height = 15) } ) } }
80a3dd536d2b0dba6bd6a25de5bee2aa2817403b
7cc3a2e4a797a77f4ca1c74b9fc14147a6193cf7
/Code_R/new_frank_touni.R
ae08a0532d9c3ca6cf186d0b2e751a15f24fa73f
[]
no_license
yuqing-li1110/Copula-in-Rainfall-Analysis
f864ec1bffab645e936c377c3b065762f597880a
8517c7ba933f2e6b95aa0de7e955f58de5c85091
refs/heads/master
2022-12-11T05:30:52.616776
2020-08-28T04:46:07
2020-08-28T04:46:07
null
0
0
null
null
null
null
UTF-8
R
false
false
3,599
r
new_frank_touni.R
library("dplyr") library("copula") # Copula package library("lcopula") library("tiger") library("gsl") LSC <- read.csv("result_2005_2017_LSC.csv") # SSC <- read.csv("result_2005_2017_SSC.csv") ## Filter LSC = LSC%>% filter(LSC$durT > 1 | LSC$newstart == 0) LSC <- LSC[,-c(1,4,17,18,19)] LSC <- na.omit(LSC) LSC <- LSC %>% filter( mmaxR < quantile(LSC$mmaxR, 0.95), Axismaj < quantile(LSC$Axismaj, 0.95), vel < quantile(LSC$vel, 0.95) ) ## Compute Kendall tau for all statistics #corrKendall = cor(LSC, method = c("kendall")) # histfit ## mmaxR, Axismaj, durT X = LSC$mmaxR - 35 shape_X = 1.81776244814184 scale_X = 3.99936970775806 Y = LSC$Axismaj mu_Y = 2.19381480694163 sigma_Y = 0.456145826153028 Z = LSC$durT mu_Z = 0.813023327672115 sigma_Z = 0.829317405252584 # random term aa = runif(lengths(Z), max = dlnorm(Z,mu_Z,sigma_Z), min = dlnorm(Z+1,mu_Z,sigma_Z)) Z = Z+aa # XYZ = cbind(X, Y, Z) # b = cor(XYZ, method = c("kendall")) # xy 0.3155247, xz 0.4054274, yz 0.3560683 # param = matrix(c(shape_X, scale_X, mu_Y, sigma_Y, mu_Z, sigma_Z), nrow = 3, ncol = 2) #tau <- 0.5 # transform to uniform # 1. to.uniform ux = to.uniform(X) #mmaxR uy = to.uniform(Y) #Axismaj uz = to.uniform(Z) #durT U = cbind(ux, uy, uz) # U(0,1)^d ## check uniform #hist(ux) #hist(uy) #hist(uz) # 2. CDF # x <- round(rgamma(100000,shape_X,1/scale_X),1) # y <- rlnorm(100000, mu_Y, sigma_Y) # z <- rlnorm(100000, mu_Z, sigma_Z) # # hist(x) # # hist(y) # # hist(z) # xcdf = pgamma(x, shape_X, 1/scale_X) # ycdf = plnorm(y, mu_Y,sigma_Y) # zcdf = plnorm(z, mu_Z,sigma_Z) # # hist(xcdf) # # hist(ycdf) # # hist(zcdf) # U = cbind(xcdf, ycdf, zcdf) ## Copula fitting # f.t <- fitCopula(tCopula(dim = 3), U, method = c("itau"), start = NULL) summary(f.t <- fitCopula(frankCopula(dim=3, use.indepC="FALSE"), U, method="itau")) # to.uniform #Estimate Std. Error # param 3.268 0.004 # cdf #Estimate Std. Error #param 3.268 0.004 Ut <- cCopula(U, copula = f.t@copula) # conditional copula splom2(Ut, cex = 0.2) ## resample # gofC = gofCopula(frankCopula(dim=3, use.indepC="FALSE"), U, method="Sn") # para_gof = matrix(c(3.1, 8.04, 5e-04), nrow = 1, ncol = 3) # # dimnames(para_gof) = list(c(""), c("parameter", "Statistic", "p-value")) # # write.csv(para_gof, "para_gof_frank_touni.csv") r = mvdc(copula=frankCopula(f.t@estimate, dim = 3),margins=c("gamma","lnorm","lnorm"), paramMargins = list(list(shape=shape_X, scale=scale_X),list(meanlog=mu_Y,sdlog=sigma_Y),list(meanlog=mu_Z,sdlog=sigma_Z))) samp <- rMvdc(length(X), r) x.samp = samp[, c(1)] y.samp = samp[, c(2)] z.samp = samp[, c(3)] # x.para = fitdistr(x.samp, 'gamma') # #shape rate # # 1.816275520 0.249832783 # # (0.007963340) (0.001260021) # y.para = fitdistr(y.samp, densfun = "log-normal") # # meanlog sdlog # #2.193539443 0.457107617 # # (0.001535640) (0.001085861) # z.para = fitdistr(z.samp, densfun = "log-normal") # meanlog sdlog # 0.814829015 0.826762192 # (0.002777484) (0.001963977) ux.samp = to.uniform(x.samp) #mmaxR uy.samp = to.uniform(y.samp) #Axismaj uz.samp = to.uniform(z.samp) #durT U.samp = cbind(ux, uy, uz) # U(0,1)^d # U.samp = U.samp[1:1000,] pairs(U.samp) pairs(samp) K.plot(U) K.plot(samp) corr1 = cor(U, method = c("kendall")) corr2 = cor(U.samp, method = c("kendall")) write.csv(corr1,"frank Kendall correlation of the original data.csv") write.csv(corr2,"frank Kendall correlation of the resample data.csv")
4fbdff90aab9d27b9b6139f6cbc2822547845b76
15479d42825658129d960589425dfd2e45734414
/makeBndLines.R
453efc083a6391658ba78cd45c294283dd3b63ba
[]
no_license
claretandy/Veg-Precip_WestAfrica
883911bdfc0ae594c46cb3f2632829e390ab1b40
77fb250143131fab150682072fab1dd16e602190
refs/heads/master
2020-04-06T05:26:44.436568
2015-04-28T17:15:18
2015-04-28T17:15:18
34,739,561
0
0
null
null
null
null
UTF-8
R
false
false
2,794
r
makeBndLines.R
densify <- function(xy,n=5){ ## densify a 2-col matrix cbind(dens(xy[,1],n=n),dens(xy[,2],n=n)) } dens <- function(x,n=5){ ## densify a vector out = rep(NA,1+(length(x)-1)*(n+1)) ss = seq(1,length(out),by=(n+1)) out[ss]=x for(s in 1:(length(x)-1)){ out[(1+ss[s]):(ss[s+1]-1)]=seq(x[s],x[s+1],len=(n+2))[-c(1,n+2)] } out } simplecentre <- function(xyP,dense){ require(deldir) require(splancs) require(igraph) require(rgeos) ### optionally add extra points if(!missing(dense)){ xy = densify(xyP,dense) } else { xy = xyP } ### compute triangulation d=deldir(xy[,1],xy[,2]) ### find midpoints of triangle sides mids=cbind((d$delsgs[,'x1']+d$delsgs[,'x2'])/2, (d$delsgs[,'y1']+d$delsgs[,'y2'])/2) ### get points that are inside the polygon sr = SpatialPolygons(list(Polygons(list(Polygon(xyP)),ID=1))) ins = over(SpatialPoints(mids),sr) ### select the points pts = mids[!is.na(ins),] dPoly = gDistance(as(sr,"SpatialLines"),SpatialPoints(pts),byid=TRUE) pts = pts[dPoly > max(dPoly/1.5),] ### now build a minimum spanning tree weighted on the distance G = graph.adjacency(as.matrix(dist(pts)),weighted=TRUE,mode="upper") T = minimum.spanning.tree(G,weighted=TRUE) ### get a diameter path = get.diameter(T) if(length(path)!=vcount(T)){ stop("Path not linear - try increasing dens parameter") } # browser() ### path should be the sequence of points in order list(pts=pts[path,],tree=T) } onering=function(p, i){p@polygons[[1]]@Polygons[[i]]@coords} capture = function(){p=locator(type="l") SpatialLines(list(Lines(list(Line(cbind(p$x,p$y))),ID=1)))} s = capture() p = gBuffer(s,width=0.2) plot(p,col="#cdeaff") plot(s,add=TRUE,lwd=3,col="red") # scp = simplecentre(onering(p)) scp = simplecentre(onering(rbnd.pol, 1)) lines(scp$pts,col="white") # Get polygons with number of vertices > 45 maxid <- 0 maxval<- 0 for (i in 1:length(r2p@polygons[[1]]@Polygons)){ nr <- nrow(onering(r2p, i)) if (nr > 45){ print(paste(i,nr, sep=" : "))} if (nr > maxval){maxval <- nr ; maxid <- i} } print(paste("Max ID : ",maxid, "(",maxval,")", sep="")) # Make a new feature for each polygon ... ancils <- loadAllAncils(myproj=myproj, nBndClass=1, model="rb5216.4km.std", vegThreshold=vegThreshold, bndDef=bndDef, nBuf=1, overwrite=F) r2p <- rasterToPolygons(ancils$mycl.f, fun = function(x){x==4}, n=8, dissolve=T) plist <- vector("list") for (i in 1:length(r2p@polygons[[1]]@Polygons)){ plist[[i]] <- Polygons(list(r2p@polygons[[1]]@Polygons[[i]]), paste("s",i,sep="")) } plist.sp <- SpatialPolygons(plist, 1:length(plist))
e44262b575ca26f69754ded7a7cff74923b99d44
899420d8106be354a2010f5964fc5802f533294c
/man/drawFeature2sf.Rd
5b526903dc63916dc2f05291bc17755fa9cae9b7
[]
no_license
annakrystalli/sedMaps
4cea5a3e51feb27427d01188b607efe7c40b160c
a93da7c5ba1125f5716cbb60674b80cfb74ad36b
refs/heads/master
2021-06-24T17:26:34.668563
2021-06-19T12:25:27
2021-06-19T12:25:27
149,792,890
1
1
null
2018-09-22T10:16:53
2018-09-21T16:59:44
R
UTF-8
R
false
true
379
rd
drawFeature2sf.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract-sf.R \name{drawFeature2sf} \alias{drawFeature2sf} \title{Convert a drawn leaflet feature to sf} \usage{ drawFeature2sf(feature) } \arguments{ \item{feature}{drawn leaflet feature} } \value{ a simple feature object of the leaflet feature } \description{ Convert a drawn leaflet feature to sf }
3d7a0094c8c4e7fb15db4d549b2dcd30607864bc
ca5f11d0358ab203d9468659c1306d1b186eb206
/R/blandr.plot.ggplot.r
86a8765ca65a1dba33a2d8dc926cffbd68fdfa9f
[]
no_license
deepankardatta/blandr
75b3a30b2d961fd3c7b12824ab035943f8c01208
4d5b1a43536cd1fd9021ff5b1736a7534bc14072
refs/heads/v.0.5.3-development
2021-12-14T12:45:38.472889
2020-03-28T07:15:04
2020-03-28T07:15:04
95,990,424
15
9
null
2021-12-06T01:33:16
2017-07-01T22:25:47
R
UTF-8
R
false
false
9,802
r
blandr.plot.ggplot.r
#' @title Bland-Altman plotting function, using ggplot2 #' #' @description Draws a Bland-Altman plot using data calculated using the other functions, using ggplot2 #' #' @author Deepankar Datta <deepankardatta@nhs.net> #' #' @param statistics.results A list of statistics generated by the blandr.statistics function: see the function's return list to see what variables are passed to this function #' @param method1name (Optional) Plotting name for 1st method, default "Method 1" #' @param method2name (Optional) Plotting name for 2nd method, default "Method 2" #' @param plotTitle (Optional) Title name, default "Bland-Altman plot for comparison of 2 methods" #' @param ciDisplay (Optional) TRUE/FALSE switch to plot confidence intervals for bias and limits of agreement, default is TRUE #' @param ciShading (Optional) TRUE/FALSE switch to plot confidence interval shading to plot, default is TRUE #' @param normalLow (Optional) If there is a normal range, entering a continuous variable will plot a vertical line on the plot to indicate its lower boundary #' @param normalHigh (Optional) If there is a normal range, entering a continuous variable will plot a vertical line on the plot to indicate its higher boundary #' @param overlapping (Optional) TRUE/FALSE switch to increase size of plotted point if multiple values using ggplot's geom_count, deafault=FALSE. Not currently recommend until I can tweak the graphics to make them better #' @param x.plot.mode (Optional) Switch to change x-axis from being plotted by means (="means") or by either 1st method (="method1") or 2nd method (="method2"). Default is "means". Anything other than "means" will switch to default mode. #' @param y.plot.mode (Optional) Switch to change y-axis from being plotted by difference (="difference") or by proportion magnitude of measurements (="proportion"). Default is "difference". Anything other than "proportional" will switch to default mode. #' @param plotProportionalBias (Optional) TRUE/FALSE switch. Plots a proportional bias line. Default is FALSE. #' @param plotProportionalBias.se (Optional) TRUE/FALSE switch. If proportional bias line is drawn, switch to plot standard errors. See stat_smooth for details. Default is TRUE. #' @param assume.differences.are.normal (Optional, not operationally used currently) Assume the difference of means has a normal distribution. Will be used to build further analyses #' #' @return ba.plot Returns a ggplot data set that can then be plotted #' #' @import ggplot2 #' #' @examples #' # Generates two random measurements #' measurement1 <- rnorm(100) #' measurement2 <- rnorm(100) #' #' # Generates a ggplot #' # Do note the ggplot function wasn't meant to be used on it's own #' # and is generally called via the bland.altman.display.and.draw function #' #' # Passes data to the blandr.statistics function to generate Bland-Altman statistics #' statistics.results <- blandr.statistics( measurement1 , measurement2 ) #' #' # Generates a ggplot, with no optional arguments #' blandr.plot.ggplot( statistics.results ) #' #' # Generates a ggplot, with title changed #' blandr.plot.ggplot( statistics.results , plotTitle = "Bland-Altman example plot" ) #' #' # Generates a ggplot, with title changed, and confidence intervals off #' blandr.plot.ggplot( statistics.results , plotTitle = "Bland-Altman example plot" , #' ciDisplay = FALSE , ciShading = FALSE ) #' #' @export blandr.plot.ggplot <- function ( statistics.results , method1name = "Method 1" , method2name = "Method 2" , plotTitle = "Bland-Altman plot for comparison of 2 methods" , ciDisplay = TRUE , ciShading = TRUE , normalLow = FALSE , normalHigh = FALSE , overlapping = FALSE , x.plot.mode = "means" , y.plot.mode = "difference" , plotProportionalBias = FALSE , plotProportionalBias.se = TRUE , assume.differences.are.normal = TRUE ) { # Does a check if ggplot2 is available # It should be as it is in the imports section but in CRAN checks some systems don't have it! if (!requireNamespace("ggplot2", quietly = TRUE)) { stop("Package \"ggplot2\" needed for this function to work. Please install it.", call. = FALSE) } # Selects if x-axis uses means (traditional) or selects one of the methods # as the gold standard (non-traditional BA) # See Krouwer JS (2008) Why Bland-Altman plots should use X, not (Y+X)/2 when X is a reference method. Statistics in Medicine 27:778-780 # NOT ENABLED YET x.axis <- statistics.results$means # Selects if uses differences (traditional) or proportions (non-traditional BA) if( y.plot.mode == "proportion" ) { y.axis <- statistics.results$proportion } else { y.axis <- statistics.results$differences } # Constructs the plot.data dataframe plot.data <- data.frame( x.axis , y.axis ) # Rename to allow plotting # This was a hangover from an older version so I'm not sure we need it anymore # But not really a priority to check and remove now colnames(plot.data)[1] <- "x.axis" colnames(plot.data)[2] <- "y.axis" # Plot using ggplot ba.plot <- ggplot( plot.data , aes( x = plot.data$x.axis , y = plot.data$y.axis ) ) + geom_point() + theme(plot.title = element_text(hjust = 0.5)) + geom_hline( yintercept = 0 , linetype = 1 ) + # "0" line geom_hline( yintercept = statistics.results$bias , linetype = 2 ) + # Bias geom_hline( yintercept = statistics.results$bias + ( statistics.results$biasStdDev * statistics.results$sig.level.convert.to.z ) , linetype = 2 ) + # Upper limit of agreement geom_hline( yintercept = statistics.results$bias - ( statistics.results$biasStdDev * statistics.results$sig.level.convert.to.z ) , linetype = 2 ) + # Lower limit of agreement ggtitle( plotTitle ) + xlab( "Means" ) # Re-titles the y-axis dependent on which plot option was used if ( y.plot.mode == "proportion" ) { ba.plot <- ba.plot + ylab( "Difference / Average %" ) } else { ba.plot <- ba.plot + ylab( "Differences" ) } # Drawing confidence intervals (OPTIONAL) if( ciDisplay == TRUE ) { ba.plot <- ba.plot + geom_hline( yintercept = statistics.results$biasUpperCI , linetype = 3 ) + # Bias - upper confidence interval geom_hline( yintercept = statistics.results$biasLowerCI , linetype = 3 ) + # Bias - lower confidence interval geom_hline( yintercept = statistics.results$upperLOA_upperCI , linetype = 3 ) + # Upper limit of agreement - upper confidence interval geom_hline( yintercept = statistics.results$upperLOA_lowerCI , linetype = 3 ) + # Upper limit of agreement - lower confidence interval geom_hline( yintercept = statistics.results$lowerLOA_upperCI , linetype = 3 ) + # Lower limit of agreement - upper confidence interval geom_hline( yintercept = statistics.results$lowerLOA_lowerCI , linetype = 3 ) # Lower limit of agreement - lower confidence interval # Shading areas for 95% confidence intervals (OPTIONAL) # This needs to be nested into the ciDisplay check if( ciShading == TRUE ) { ba.plot <- ba.plot + annotate( "rect", xmin = -Inf , xmax = Inf , ymin = statistics.results$biasLowerCI , ymax = statistics.results$biasUpperCI , fill="blue" , alpha=0.3 ) + # Bias confidence interval shading annotate( "rect", xmin = -Inf , xmax = Inf , ymin = statistics.results$upperLOA_lowerCI , ymax = statistics.results$upperLOA_upperCI , fill="green" , alpha=0.3 ) + # Upper limits of agreement confidence interval shading annotate( "rect", xmin = -Inf , xmax = Inf , ymin = statistics.results$lowerLOA_lowerCI , ymax = statistics.results$lowerLOA_upperCI , fill="red" , alpha=0.3 ) # Lower limits of agreement confidence interval shading } } ### Function has finished drawing of confidence intervals at this line # If a normalLow value has been sent, plots this line if( normalLow != FALSE ) { # Check validity of normalLow value to plot line if( is.numeric(normalLow) == TRUE ) { ba.plot <- ba.plot + geom_vline( xintercept = normalLow , linetype = 4 , col=6 ) } } # If a normalHighvalue has been sent, plots this line if( normalHigh != FALSE ) { # Check validity of normalHigh value to plot line if( is.numeric(normalHigh) == TRUE ) { ba.plot <- ba.plot + geom_vline( xintercept = normalHigh , linetype = 4 , col=6 ) } } # If overlapping=TRUE uses geom_count # See the param description at the top if( overlapping == TRUE ) { ba.plot <- ba.plot + geom_count() } # If plotProportionalBias switch is TRUE, plots a proportional bias line as well if( plotProportionalBias == TRUE ) { # Check for validity of options passed to the plotProportionalBias.se switch # As if we throw an invalid option to ggplot it will just stop with an error if( plotProportionalBias.se !=TRUE && plotProportionalBias.se != FALSE) { plotProportionalBias.se <- TRUE } # Plots line ba.plot <- ba.plot + ggplot2::geom_smooth( method = 'lm' , se = plotProportionalBias.se ) } # End of drawing proportional bias line # Draws marginal histograms if option selected # Idea from http://labrtorian.com/tag/bland-altman/ # REMOVED AS INTRODUCED SOME INCOMPATIBILITIES DEPENDENT ON USERS R VERSION # ALSO MASSIVELY INCREASED PACKAGE SIZE # if( marginalHistogram == TRUE ) { ba.plot <- ggMarginal( ba.plot , type="histogram" ) } # Return the ggplot2 output return(ba.plot) #END OF FUNCTION }
3b0ecb22ee9e0bfbb4255bc1234a8296dae105d2
15ca9daea2d93ee87bc02669f63c90d016d29a60
/data-raw/data_processing.R
461478c72e3d9728470b2b69443c94ba931f859c
[ "CC-BY-4.0" ]
permissive
Global-Health-Engineering/durbanplasticwaste
71160fdb13a9b676d41c57d7feaedfee3f8c94d4
5853f3d5878798fb063cfaaf109ef6c1bc937740
refs/heads/main
2023-08-08T18:34:39.769299
2023-05-09T10:57:02
2023-05-09T10:57:02
604,573,987
0
1
CC-BY-4.0
2023-09-12T07:45:41
2023-02-21T10:52:19
R
UTF-8
R
false
false
3,610
r
data_processing.R
# description ------------------------------------------------------------- # R script to process uploaded raw data into a tidy dataframe # R packages -------------------------------------------------------------- library(tidyverse) library(here) library(readxl) library(janitor) # read data --------------------------------------------------------------- litterboom <- read_excel("data-raw/Data for R_Raúl.xlsx", skip = 2) locations <- read_csv("data-raw/litterboom-sample-locations.csv") # tidy data --------------------------------------------------------------- litterboom_df <- litterboom |> select(-...1) |> clean_names() |> mutate(year = "2022") |> unite(col = "date", c("date", "year"), sep = ".") |> mutate(date = lubridate::dmy(date)) |> relocate(date) |> mutate(amount = case_when( is.na(amount) == TRUE ~ 0, TRUE ~ amount )) ## store weights data as separate table litterboom_weights <- litterboom_df |> select(date, location, pet = weight_pet, hdpe_pp = weight_hdpe_pp) |> distinct() ## import tidy brand names after exporting excel ## Issue 2: https://github.com/Global-Health-Engineering/durbanplasticwaste22/issues/2 litterboom_df |> count(brand, name = "count") |> mutate(new_name = NA_character_) |> openxlsx::write.xlsx("data-raw/tidy-brand.names.xlsx") brand_names <- read_excel("data-raw/tidy-brand.names-rb.xlsx") |> select(brand, new_name) |> mutate(new_name = case_when( is.na(new_name) == TRUE ~ brand, TRUE ~ new_name )) litterboom_counts <- litterboom_df |> select(-weight_pet, -weight_hdpe_pp) |> rename(count = amount) |> left_join(brand_names) |> relocate(new_name, .before = brand) |> select(-brand) |> relocate(location, .after = date) |> rename(brand = new_name) |> mutate(group = case_when( group == "OTHER GROUPS" ~ "OTHER", group == "The Coca-Cola Company" ~ "Coca Cola Beverages South Africa", group == "Coca Cola Company" ~ "Coca Cola Beverages South Africa", str_detect(group, "UnID") == TRUE ~ "unidentifiable", TRUE ~ group )) |> mutate(category = case_when( category == "skiin" ~ "Skin/Hair Products", TRUE ~ category )) ## locations data - convert locations from degress, minutes, seconds to ## decimal degrees locations <- locations |> pivot_longer(cols = latitude:longitude) |> mutate(value = str_replace(value, pattern = "˚", replacement = "")) |> mutate(value = str_replace(value, pattern = "'", replacement = "")) |> mutate(value = str_replace(value, pattern = "''", replacement = "")) |> separate(value, into = c("degree", "minutes", "seconds", "direction"), sep = " ") |> mutate(across(c(degree:seconds), as.numeric)) |> mutate(dd = degree + minutes/60 + seconds/3600) |> mutate(dd = case_when( direction == "S" ~ -dd, TRUE ~ dd )) |> select(location, name, dd) |> pivot_wider(names_from = name, values_from = dd) # write data -------------------------------------------------------------- usethis::use_data(litterboom_weights, litterboom_counts, locations, overwrite = TRUE) write_csv(litterboom_counts, here::here("inst", "extdata", "litterboom_counts.csv")) write_csv(litterboom_weights, here::here("inst", "extdata", "litterboom_weights.csv")) write_csv(locations, here::here("inst", "extdata", "locations.csv")) openxlsx::write.xlsx(litterboom_counts, here::here("inst", "extdata", "litterboom_counts.xlsx")) openxlsx::write.xlsx(litterboom_weights, here::here("inst", "extdata", "litterboom_weights.xlsx")) openxlsx::write.xlsx(locations, here::here("inst", "extdata", "locations.xlsx"))
e847701aad2778df9c865d7935cdef10b6ce99d8
1c4fb1e9c330a6ccbcbf0707befa792aeae0f925
/R/data.R
65526a1fccfb16ebc6155ad2623b0b85af4cdbc0
[ "MIT" ]
permissive
ramongss/qualiscapes
e908134d72498f46614377fae6e8ef8f4996ea5d
6eba63bc9cc0659de948237e4c9f8eda94919174
refs/heads/master
2023-04-21T11:30:05.878300
2021-05-13T21:52:26
2021-05-13T21:52:26
363,000,405
5
0
null
null
null
null
UTF-8
R
false
false
433
r
data.R
#' 2019 Qualis CAPES database #' #' A database with the preliminary 2019 Qualis CAPES. #' #' @format A data frame with 22,046 rows and 3 variables: #' \describe{ #' \item{ISSN_2019}{The issn of the Journal/Congress} #' \item{TITULO_2019}{The name of the Journal/Congress} #' \item{ESTRATO_2019}{The Qualis CAPES of the Journal/Congress} #' } #' #' @source \url{https://github.com/enoches/Qualis_2019_preliminar} "qualiscapes"
2e039936c87d91748b7e333c7f21b284b67a00da
5bcb21798e65f99903c5f4f76237d4cf2badb557
/srv_ui_elements/visualize_UI_misc.R
d04eedcae15e016564898c095e3952c5b2b3091f
[]
no_license
EMSL-Computing/FREDA
380f77da60a98bc90c0d0aa0172302d573a4afa9
734f076348203d30dd0f6bf42492cd9d2c918d3f
refs/heads/master
2022-11-06T17:04:00.096746
2022-09-08T23:37:17
2022-09-08T23:37:17
122,237,917
1
1
null
2022-11-03T17:59:22
2018-02-20T18:27:52
HTML
UTF-8
R
false
false
1,099
r
visualize_UI_misc.R
list( # warning messages for viztab output$warnings_visualize <- renderUI({ HTML(paste(revals$warningmessage_visualize, collapse = "")) }), # icon control for viztab collapsible sections output$chooseplots_icon <- renderUI({ req(input$top_page == 'Visualize') if('peakplots' %in% input$viz_sidebar) icon('chevron-up', lib = 'glyphicon') else icon('chevron-down', lib = 'glyphicon') }), output$axlabs_icon <- renderUI({ req(input$top_page == 'Visualize') if('axlabs' %in% input$viz_sidebar) icon('chevron-up', lib = 'glyphicon') else icon('chevron-down', lib = 'glyphicon') }), output$saveplots_icon <- renderUI({ req(input$top_page == 'Visualize') if('downloads' %in% input$viz_sidebar) icon('chevron-up', lib = 'glyphicon') else icon('chevron-down', lib = 'glyphicon') }), output$dynamic_opts_icon <- renderUI({ req(input$top_page == 'Visualize') if('reactive_plot_opts' %in% input$viz_sidebar) icon('chevron-up', lib = 'glyphicon') else icon('chevron-down', lib = 'glyphicon') }) # )
fb0cb891da07f264445f32d8ca9518d6a3409d15
7917fc0a7108a994bf39359385fb5728d189c182
/cran/paws.database/man/rds_add_role_to_db_instance.Rd
bf443f007e380c7f3f8b276094e8055b99c941f2
[ "Apache-2.0" ]
permissive
TWarczak/paws
b59300a5c41e374542a80aba223f84e1e2538bec
e70532e3e245286452e97e3286b5decce5c4eb90
refs/heads/main
2023-07-06T21:51:31.572720
2021-08-06T02:08:53
2021-08-06T02:08:53
396,131,582
1
0
NOASSERTION
2021-08-14T21:11:04
2021-08-14T21:11:04
null
UTF-8
R
false
true
1,190
rd
rds_add_role_to_db_instance.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rds_operations.R \name{rds_add_role_to_db_instance} \alias{rds_add_role_to_db_instance} \title{Associates an AWS Identity and Access Management (IAM) role with a DB instance} \usage{ rds_add_role_to_db_instance(DBInstanceIdentifier, RoleArn, FeatureName) } \arguments{ \item{DBInstanceIdentifier}{[required] The name of the DB instance to associate the IAM role with.} \item{RoleArn}{[required] The Amazon Resource Name (ARN) of the IAM role to associate with the DB instance, for example \verb{arn:aws:iam::123456789012:role/AccessRole}.} \item{FeatureName}{[required] The name of the feature for the DB instance that the IAM role is to be associated with. For the list of supported feature names, see DBEngineVersion.} } \value{ An empty list. } \description{ Associates an AWS Identity and Access Management (IAM) role with a DB instance. To add a role to a DB instance, the status of the DB instance must be \code{available}. } \section{Request syntax}{ \preformatted{svc$add_role_to_db_instance( DBInstanceIdentifier = "string", RoleArn = "string", FeatureName = "string" ) } } \keyword{internal}
d5ba6bbff56697b8e360ccbc959b107a3d242298
ca3fa26a219a1695dc8d30f447325148a2f9c6f5
/man/assignDirectory.Rd
80457d1e954cfff936e37d2193e0045a1e314491
[]
no_license
joshbrowning2358/romeHousePrices
fc0c2cca2bdd66c02c655e01ec1fbcf61ba98322
e3568316c7d515605f8d72255c825b4569f7ae61
refs/heads/master
2023-02-18T15:47:17.485520
2015-12-14T05:59:16
2015-12-14T05:59:16
null
0
0
null
null
null
null
UTF-8
R
false
false
596
rd
assignDirectory.Rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/assignDirectory.R \name{assignDirectory} \alias{assignDirectory} \title{Get Directory} \usage{ assignDirectory() } \value{ No value is returned, but workingDir and savingDir are written to the global environment. } \description{ This function uses the information at Sys.info() to assign a working directory and saving directory. The names of the two objects created are workingDir and savingDir, and they are assigned to the global environment so as to not require returning/assigning by this function. }
6e270cca049399a4c0abec31c45a92392dcab256
dc1995bd7e6a5320cd454d69c5e730990519fd75
/06-experimental.R
0af95c12437b64bd98e75f989231323e633f42c0
[]
no_license
aswansyahputra/30daychartchallenge
80b3d013350c7f82bf3d6ea8c20b3759d9c5bb59
877b899559c40c2bfa07ebbf5976e60435c7b3a9
refs/heads/master
2023-04-24T11:34:35.318862
2021-04-29T13:54:11
2021-04-29T13:54:11
353,593,761
6
0
null
null
null
null
UTF-8
R
false
false
3,446
r
06-experimental.R
# Load packages ----------------------------------------------------------- library(tidyverse) library(tidycode) library(treemapify) library(ggfittext) library(ggtext) library(scales) library(paletteer) # List path of rscripts withing project directories ----------------------- rpaths <- list.files( path = "use/your/directory/path", pattern = "*\\.R$", recursive = TRUE, full.names = TRUE ) # Read rscripts as dataframe ---------------------------------------------- rcodes <- map_dfr(rpaths, possibly(read_rfiles, otherwise = NULL)) # Classify function used in rscripts -------------------------------------- rcodes_class <- rcodes %>% filter(str_detect(file, "utils", negate = TRUE)) %>% unnest_calls(expr) %>% inner_join( get_classifications( lexicon = "crowdsource", include_duplicates = TRUE ) ) %>% anti_join(get_stopfuncs()) %>% select(-args) # Prepare data for visualization ------------------------------------------ to_plot <- rcodes_class %>% mutate( classification = if_else( func == "GET", "import", classification ), classification = recode( classification, "data cleaning" = "transformation" ) ) %>% count(classification) %>% left_join( treemapify( ., area = "n", subgroup = "classification", xlim = c(0, 10), ylim = c(0, 10) ) ) %>% mutate( pct = n / sum(n), label = percent(pct, accuracy = 0.1), txtcolour = case_when( pct < 0.1 ~ "#2e8db0", TRUE ~ "#e5e5e3" ) ) # Create plot ------------------------------------------------------------- p <- to_plot %>% ggplot(aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax)) + treemapify:::geom_rrect( aes(fill = pct), radius = unit(15, "pt"), colour = "#e5e5e3", size = 5, show.legend = FALSE ) + geom_fit_text( aes(label = label, colour = txtcolour), place = "bottomright", family = "Arial Narrow", padding.x = unit(4, "mm"), padding.y = unit(4, "mm"), grow = TRUE ) + geom_fit_text( aes(label = classification, colour = txtcolour, angle = if_else(classification %in% c("export", "communication"), 0, 90)), min.size = 10, place = "topright", family = "Arial Narrow", fontface = "bold", padding.x = unit(4, "mm"), padding.y = unit(4, "mm"), reflow = TRUE, show.legend = FALSE ) + labs( caption = "<b style='font-size:35pt;color:grey15'>What are these codes for?</b><br>Classification of my #rstats codes within 25 data analysis projects at work<br><br><i style='font-size:10pt;'><br>﹋﹋﹋﹋﹋﹋﹋﹋﹋﹋<br>Data and visualization by Muhammad Aswan Syahputra</i>" ) + scale_fill_paletteer_c("ggthemes::Blue-Teal") + scale_colour_identity() + theme_void(base_family = "Arial Narrow") + theme( plot.background = element_rect(fill = "#e5e5e3", colour = NA), panel.background = element_rect(fill = "#e5e5e3", colour = NA), plot.caption.position = "plot", plot.caption = element_markdown(hjust = 0.5, colour = "gray25", size = rel(1.2), lineheight = 0.8), plot.margin = margin(20, 20, 20, 20) ) + coord_cartesian(clip = "off") # Save plot --------------------------------------------------------------- ggsave( "outfile/06-experimental.png", plot = p, width = 8, height = 8, dpi = 300, type = "cairo-png" )
26f050e94e56f89606cbfcb209ccc2da35ef6deb
13f24d4689ea4420c0e3358a59be38f8c6e2bb2f
/R/univtwinmod.R
4d5bc851ea39cfce4a6dc018cef5c556eeec893d
[]
no_license
deepchocolate/qglavmods
43d360d63b443d1ab76e38e9110278c5fe75445c
e6256adb042fb46fd300f0f57f343e57948b4c0f
refs/heads/master
2023-09-04T01:11:44.246683
2021-10-13T08:12:00
2021-10-13T08:12:00
342,619,944
0
0
null
null
null
null
UTF-8
R
false
false
2,264
r
univtwinmod.R
#' Generate syntax for a univariate twin model #' #' @param measT1 Variable name of measurement in twin one. #' @param measT2 Variable name of measurement in twin two. #' @param model A list of variance components (A,C,D,E) and labels, eg list(A='a',C='c',E='e'). #' @param append Any additional syntax. #' @param varLabels Currently not used. #' @export univTwinMod <- function (measT1, measT2, model=list(A='a',C='c',E='e'), append='',varLabels=list(A='add',C='com',D='dom',E='uni'), independentT1=list(int='1'),independentT2=list(int='1')) { corrs <- list(A=c(0.5,1),C=c(1,1),D=c(0.25,1)) o <- '# Measurments are uncorrelated\n' o <- paste0(measT1,'~~0*',measT2,'\n') if (length(independentT1) > 0 | length(independentT2) > 0) { o <- paste0(o,'# Regressions\n') r <- c() for (lab in names(independentT1)) { r <- c(r,paste0('c(',lab,',',lab,')*',independentT1[lab])) } o <- paste0(o,measT1,' ~ ',paste(r,collapse=' + '),'\n') r <- c() for (lab in names(independentT2)) { r <- c(r,paste0('c(',lab,',',lab,')*',independentT2[lab])) } o <- paste0(o,measT2,' ~ ',paste(r,collapse=' + '),'\n') } totalVars <- c() for (fac in names(model)) { fac1 <- paste0(fac,'1') fac2 <- paste0(fac,'2') facName <- model[fac] totalVars <- c(totalVars,paste0(facName,'2')) o <- paste0(o,fac1,' =~ c(',facName,',',facName,')*',measT1,'\n') o <- paste0(o,fac2,' =~ c(',facName,',',facName,')*',measT2,'\n') if (fac=='A') { o <- paste0(o,fac1,' ~~ c(0.5,1)*',fac2,'\n') } if (fac=='C') { o <- paste0(o,fac1,'~~1*',fac2,'\n') } if (fac == 'D') { o <- paste0(o,fac1,' ~~ c(0.25,1)*',fac2,'\n') } if (fac=='E') { # Residual variation is uncorrelated o <- paste0(o,fac1,'~~0*',fac2,'\n') } o <- paste0(o,facName,'2 := ',facName,'*',facName,'\n') } totVar <- paste0('(',paste0(totalVars,collapse='+'),')') geneFacs <- c() for (fac in names(model)) { if (fac %in% c('A','D')) geneFacs <- c(geneFacs,paste0(model[fac],'2')) o <- paste0(o,varLabels[fac],' := ',model[fac],'2/',totVar,'\n') } o <- paste0(o,'h2 := (',paste0(geneFacs,collapse='+'),')/',totVar,'\n') return(paste0(o,append)) }
75a2d615b535dd2a516aae17b4b8445a7582427e
0bc7b27b4ecdf338211f763915e498afbd076f19
/R/resumenNumericoPonderada.R
faec068c7dd2ba5de29c57d6e29597447db4af34
[]
no_license
cran/RcmdrPlugin.TeachStat
f42fd6b05a5e351d3f77e7204daabeae93bc93f1
702e87f2c3e6e7036a50d547f529f20ea915d369
refs/heads/master
2022-08-01T00:58:27.010966
2022-06-22T11:00:02
2022-06-22T11:00:02
162,720,733
0
0
null
null
null
null
UTF-8
R
false
false
7,786
r
resumenNumericoPonderada.R
resumenNumericoPonderada <- function(){ Library("abind") Library("e1071") # Library("Hmisc") defaults <- list(initial.x=NULL,initial.sg=gettext("<no variable selected>",domain="R-RcmdrPlugin.TeachStat"), initial.sg2=gettext("<no variable selected>",domain="R-RcmdrPlugin.TeachStat"), initial.mean="1", initial.sd="1", initial.se.mean="0", initial.IQR="1", initial.cv="0", initial.quantiles.variable="1", initial.quantiles="0, .25, .5, .75, 1", initial.skewness="0", initial.kurtosis="0", initial.tab=0) dialog.values <- getDialog("resumenNumericoPonderada", defaults) initial.group <- dialog.values$initial.group initializeDialog(title=gettext("Numerical summaries - Weighted variables",domain="R-RcmdrPlugin.TeachStat"), use.tabs=TRUE, tabs=c("dataTab", "statisticsTab")) xBox <- variableListBox(dataTab, Numeric(), selectmode="multiple", title=gettext("Variables (pick one or more)",domain="R-RcmdrPlugin.TeachStat"), initialSelection=varPosn(dialog.values$initial.x, "numeric")) selectVariablePonderacion <- variableComboBox(dataTab, variableList=Numeric(), initialSelection=dialog.values$initial.sg, title=gettext("Weight variable",domain="R-RcmdrPlugin.TeachStat")) if (length(Factors())!=0){ mostrar<-"readonly" }else { mostrar<-"disabled" } selectGroupComboBox <- variableComboBox(dataTab, variableList=Factors(), state=mostrar, initialSelection=dialog.values$initial.sg2, title=gettext("Group (pick one)",domain="R-RcmdrPlugin.TeachStat")) checkBoxes(window = statisticsTab, frame="checkBoxFrame", boxes=c("mean", "sd", "se.mean", "IQR", "cv","skewness", "kurtosis"), initialValues=c(dialog.values$initial.mean, dialog.values$initial.sd, dialog.values$initial.se.mean, dialog.values$initial.IQR, dialog.values$initial.cv,dialog.values$initial.skewness, dialog.values$initial.kurtosis), labels=gettext(c("Mean", "Standard Deviation", "Standard Error of Mean", "Interquartile Range", "Coefficient of Variation","Skewness", "Kurtosis"),domain="R-RcmdrPlugin.TeachStat")) quantilesVariable <- tclVar(dialog.values$initial.quantiles.variable) quantilesFrame <- tkframe(statisticsTab) quantilesCheckBox <- tkcheckbutton(quantilesFrame, variable=quantilesVariable, text=gettext("Quantiles:",domain="R-RcmdrPlugin.TeachStat")) quantiles <- tclVar(dialog.values$initial.quantiles) quantilesEntry <- ttkentry(quantilesFrame, width="20", textvariable=quantiles) onOK <- function(){ tab <- if (as.character(tkselect(notebook)) == dataTab$ID) 0 else 1 x <- getSelection(xBox) pondVar<-getSelection(selectVariablePonderacion) g<- getSelection(selectGroupComboBox) #doItAndPrint(str(sg2var)) quants <- tclvalue(quantiles) meanVar <- tclvalue(meanVariable) sdVar <- tclvalue(sdVariable) se.meanVar <- tclvalue(se.meanVariable) IQRVar <- tclvalue(IQRVariable) cvVar <- tclvalue(cvVariable) quantsVar <- tclvalue(quantilesVariable) skewnessVar <- tclvalue(skewnessVariable) kurtosisVar <- tclvalue(kurtosisVariable) putDialog("resumenNumericoPonderada", list( initial.x=x,initial.sg=pondVar,initial.sg2=g, initial.mean=meanVar, initial.sd=sdVar, initial.se.mean=se.meanVar, initial.IQR=IQRVar, initial.cv=cvVar, initial.quantiles.variable=quantsVar, initial.quantiles=quants, initial.skewness=skewnessVar, initial.kurtosis=kurtosisVar, initial.tab=tab )) if (length(x) == 0){ errorCondition(recall=resumenNumericoPonderada, message=gettext("No variable selected",domain="R-RcmdrPlugin.TeachStat")) return() } closeDialog() quants <- paste(gsub(",+", ",", gsub(" ", ",", quants)), sep="") quants <- as.numeric( strsplit(quants,split=",")[[1]]) posiblesstatistic<-c("mean", "sd", "se(mean)", "IQR", "quantiles", "cv", "skewness", "kurtosis") statselegidas<-c(meanVar, sdVar, se.meanVar, IQRVar, quantsVar, cvVar, skewnessVar, kurtosisVar) #print(posiblesstatistic) #print(statselegidas) stats <- posiblesstatistic[as.logical(as.numeric(statselegidas))] if (length(stats) == 0){ errorCondition(recall=resumenNumericoPonderada, message=gettext("No statistics selected",domain="R-RcmdrPlugin.TeachStat")) return() } if(((NA %in% quants)||(length( quants[(quants<0)|(quants>1)])!=0) || length(quants)<1)&&(quantsVar==1)){ errorCondition(recall=resumenNumericoPonderada, message=gettext("Quantiles must be a numeric vector in [0,1]",domain="R-RcmdrPlugin.TeachStat")) return() } if((length(quants)==0)&&(quantsVar==1)){ errorCondition(recall=resumenNumericoPonderada, message=gettext("Quantiles must be a numeric vector in [0,1]",domain="R-RcmdrPlugin.TeachStat")) return() } activeDataSet <- ActiveDataSet() activeDataSet<-get(activeDataSet) vSeleccionadas<-subset(activeDataSet,select = x) if(pondVar==gettext("<no variable selected>",domain="R-RcmdrPlugin.TeachStat")){variablePonderacion<-NULL} else{variablePonderacion<-activeDataSet[,pondVar]} if(g==gettext("<no variable selected>",domain="R-RcmdrPlugin.TeachStat")){factorAgrupacion<-NULL} else{factorAgrupacion<-activeDataSet[,g]} ##################### Imprimir la funci?n a llamar por RCommander ########################################### .activeDataSet<-ActiveDataSet() if(0 == length(x)) vponderada<-"NULL" else{ if (length(x) == 1){vponderada<- paste('"', x, '"', sep="") vponderada<-paste(.activeDataSet, "[,c(", vponderada, ")]", sep="") } else{ vponderada<-paste("c(", paste('"', x, '"', collapse=", ", sep=""), ")", sep="") vponderada <- paste(.activeDataSet, "[,", vponderada, "]", sep="") } } stadisticas <- paste("c(", paste(c('"mean"', '"sd"', '"se(mean)"', '"IQR"', '"quantiles"', '"cv"', '"skewness"', '"kurtosis"') [c(meanVar, sdVar, se.meanVar, IQRVar, quantsVar, cvVar, skewnessVar, kurtosisVar) == 1], collapse=", "), ")", sep="") if(pondVar==gettext("<no variable selected>",domain="R-RcmdrPlugin.TeachStat")){vPonderacion<-"NULL"} else{vPonderacion<-paste(.activeDataSet,"$",pondVar, sep="")} if(g==gettext("<no variable selected>",domain="R-RcmdrPlugin.TeachStat")){grupo<-"NULL"} else{grupo<-paste(.activeDataSet,"$",g, sep="")} if(0 == length(quants)) cuantiles <-"NULL" else{ cuantiles <- if (length(quants) == 1) paste(quants , sep="") else paste("c(", paste(quants, collapse=",", sep=""), ")", sep="") } command<- paste("W.numSummary(data=", vponderada,", statistics =", stadisticas,", quantiles = ",cuantiles,", weights=",vPonderacion,", groups=", grupo,")",sep="" ) doItAndPrint(command) tkfocus(CommanderWindow()) } OKCancelHelp(helpSubject="W.numSummary", reset="resumenNumericoPonderada", apply ="resumenNumericoPonderada") tkgrid(getFrame(xBox),labelRcmdr(dataTab, text=" "),getFrame(selectVariablePonderacion),labelRcmdr(dataTab, text=" "),getFrame(selectGroupComboBox),sticky="nw") tkgrid(checkBoxFrame, sticky="nw") tkgrid(quantilesCheckBox, quantilesEntry, sticky="w") tkgrid(quantilesFrame) dialogSuffix(use.tabs=TRUE, grid.buttons=TRUE, tabs=c("dataTab", "statisticsTab"), tab.names=c("Data", "Statistics")) }
cdc663edf35bfd24d53afff0438a76fb03d0b108
2c7170e80155d784ada407c7bedd7330677dfdcc
/R/lfqRestructure.R
d75d79102fa053417692acd9171256ff56be2d99
[]
no_license
cran/TropFishR
24d5eb2e67763e9fcb7e2de384c793a81398860f
7314b3f27dbc1598c4f90f6644730354714271c1
refs/heads/master
2021-10-06T11:19:43.850654
2021-10-04T07:10:02
2021-10-04T07:10:02
62,435,814
1
0
null
null
null
null
UTF-8
R
false
false
8,292
r
lfqRestructure.R
#' @title Restructuring of length frequency data #' #' @description First step of the Electronic LEngth Frequency ANalysis (ELEFAN), #' which is restructuring length-frequency data (lfq). #' This is done according to a certain protocol, described by many authors (see #' Details or References for more information). #' #' @param param a list consisting of following parameters: #' \itemize{ #' \item \strong{midLengths} midpoints of the length classes #' \item \strong{dates} dates of sampling times (class Date) #' \item \strong{catch} matrix with catches/counts per length class (row) and #' sampling date (column) #' } #' @param MA number indicating over how many length classes the moving average #' should be performed (default: 5) #' @param addl.sqrt additional squareroot transformation of positive values #' according to Brey et al. (1988) (default: FALSE). #' Particularly useful if many observations have a low frequency (<10) #' #' @examples #' # data and plot of catch frequencies #' data(synLFQ4) #' plot(synLFQ4, Fname="catch") #' #' # restructuring and calculation of ASP #' synLFQ4 <- lfqRestructure(synLFQ4, MA=11) #' synLFQ4$ASP #' #' # plot of restructured scores and fit of soVBGF growth curves #' plot(synLFQ4) #' lfqFitCurves(synLFQ4, #' par=list(Linf=80, K=0.5, t_anchor=0.25, C=0.75, ts=0), #' draw=TRUE #' )$fASP #' #' #' @details This function is used prior to fitting of growth curves (e.g. in #' \code{\link{ELEFAN}}, \code{\link{ELEFAN_SA}} functions). It restructures a length #' frequency data set according to a list of steps to emphasise cohorts in the data. #' The steps can be found in various publications, see e.g. Brey et al. (1988) or #' Pauly and David (1981). Here, the most recent steps documented in Gayanilo (1997) #' are followed. #' #' @return A list with the input parameters and following list objects: #' \itemize{ #' \item \strong{rcounts}: restructured frequencies, #' \item \strong{peaks_mat}: matrix with uniquely numbered positive peaks, #' \item \strong{ASP}: available sum of peaks, sum of posititve peaks which #' could be potential be hit by growth curves. This is calculated as the sum of #' maximum values from each run of posive restructured scores, #' \item \strong{MA}: moving average used for restructuring. #' } #' #' #' @references #' Brey, T., Soriano, M., and Pauly, D. 1988. Electronic length frequency analysis: #' a revised and expanded user's guide to ELEFAN 0, 1 and 2. #' #' Gayanilo, Felimon C. FAO-ICLARM stock assessment tools: reference manual. #' No. 8. Food & Agriculture Org., 1997. #' #' Pauly, D. 1981. The relationship between gill surface area and growth performance in fish: #' a generalization of von Bertalanffy's theory of growth. \emph{Meeresforsch}. 28:205-211 #' #' Pauly, D. and N. David, 1981. ELEFAN I, a BASIC program for the objective extraction of #' growth parameters from length-frequency data. \emph{Meeresforschung}, 28(4):205-211 #' #' Pauly, D., 1985. On improving operation and use of ELEFAN programs. Part I: Avoiding #' "drift" of K towards low values. \emph{ICLARM Conf. Proc.}, 13-14 #' #' Pauly, D., 1987. A review of the ELEFAN system for analysis of length-frequency data in #' fish and aquatic invertebrates. \emph{ICLARM Conf. Proc.}, (13):7-34 #' #' Pauly, D. and G. R. Morgan (Eds.), 1987. Length-based methods in fisheries research. #' (No. 13). WorldFish #' #' Pauly, D. and G. Gaschuetz. 1979. A simple method for fitting oscillating length #' growth data, with a program for pocket calculators. I.C.E.S. CM 1979/6:24. #' Demersal Fish Cttee, 26 p. #' #' Pauly, D. 1984. Fish population dynamics in tropical waters: a manual for use #' with programmable calculators (Vol. 8). WorldFish. #' #' Quenouille, M. H., 1956. Notes on bias in estimation. \emph{Biometrika}, 43:353-360 #' #' Somers, I. F., 1988. On a seasonally oscillating growth function. #' ICLARM Fishbyte 6(1): 8-11. #' #' Sparre, P., Venema, S.C., 1998. Introduction to tropical fish stock assessment. #' Part 1. Manual. \emph{FAO Fisheries Technical Paper}, (306.1, Rev. 2): 407 p. #' #' Tukey, J., 1958. Bias and confidence in not quite large samples. #' \emph{Annals of Mathematical Statistics}, 29: 614 #' #' Tukey, J., 1986. The future of processes of data analysis. In L. V. Jones (Eds.), #' The Collected Works of John W. Tukey-philosophy and principles of data analysis: #' 1965-1986 (Vol. 4, pp. 517-549). Monterey, CA, USA: Wadsworth & Brooks/Cole #' #' @export lfqRestructure <- function(param, MA=5, addl.sqrt=FALSE){ lfq <- param # replace NAs in catch lfq$catch[which(is.na(lfq$catch))] <- 0 if(MA%%2 == 0) stop("MA must be an odd integer") # Steps refer to Gayanilo (1997) FAO-ICLARM stock assessment tools: reference manual rcounts <- 0*lfq$catch for(i in seq(ncol(lfq$catch))){ pm <- (MA-1)/2 # plus minus # positions of first and last non-zero valules val_first <- min(which(lfq$catch[,i] != 0)) val_last <- max(which(lfq$catch[,i] != 0)) val_pos <- seq(val_first, val_last) val_string <- lfq$catch[val_pos,i] # number of values n <- length(val_string) AF <- NaN*val_string nz <- NaN*val_string if(n > 1){ temp <- seq(val_string) }else{ temp <- 1 } ## Steps A & B - Computation of the moving average for(j in temp){ idx <- (j-pm):(j+pm) idx <- idx[which(idx %in% temp)] idxn <- idx[-which(idx==j)] # neighbors only nz[j] <- sum(val_string[idxn] == 0) + (MA-length(idx)) # number of adjacent zeros MA.j <- sum(val_string[idx])/MA AF[j] <- val_string[j]/MA.j } # intermediate step to remove Inf or NA AF <- replace(AF, which(AF==Inf | is.na(AF)), 0) # Calculate mean quotient mprime <- mean(AF, na.rm=TRUE) ## Step C Divide by mean quotient and subtract 1.0 Fs <- AF / mprime - 1 # restructured frequencies ## Steps D & E - Identify isolated peaks; Adjust for zero frequency posFs <- which(Fs > 0) if(length(posFs)>0) {Fs[posFs] <- (Fs * 0.5^nz)[posFs]} # replace ultimate length bin with zero if negative if(sign(Fs[length(Fs)]) == -1){Fs[length(Fs)] <- 0} # divide penultimate length bin by 2 if negative if(length(sign(Fs[length(Fs)-1])) > 0 && sign(Fs[length(Fs)-1]) == -1){Fs[length(Fs)-1] <- Fs[length(Fs)-1]*0.5} ## Step F - Adjust for Fi SPV <- sum(Fs[which(Fs > 0)]) # sum of positive values SNV <- sum(Fs[which(Fs < 0)]) # sum of negative values # set -1 to 0 minus1 <- which((1+Fs) < 1e-8 | is.na(Fs)) if(length(minus1)>0) {Fs[minus1] <- 0} # adjust negative numbers isneg <- which(Fs < 0) Fs[isneg] <- Fs[isneg] * (SPV/-SNV) # optional square-root adjustment to emphasize larger length bins with lower counts if(addl.sqrt){ posFs <- which(Fs > 0) if(length(posFs)>0) {Fs[posFs] <- Fs[posFs] / sqrt(1+2/lfq$catch[posFs,i])} #Fs[posFs] / sqrt(1+2/Fs[posFs])} } rcounts[val_pos,i] <- Fs } lfq$rcounts <- rcounts # create peak matrix prep_mat <- lfq$rcounts prep_mat <- ifelse(prep_mat > 0,1,0) peaks_mat <- NA*prep_mat for(i in seq(ncol(peaks_mat))){ vec_peaki <- prep_mat[,i] runs <- rle(vec_peaki) rle_val <- runs$values rle_val[which(rle_val == 1)] <- 1:length(rle_val[which(rle_val == 1)]) peaks_mat[,i] <- rep(rle_val, runs$lengths) } maxn.peaks <- max(peaks_mat, na.rm=TRUE) peaks_mat <- peaks_mat + (prep_mat * maxn.peaks * col(peaks_mat)) lfq$peaks_mat <- peaks_mat # ASP calc sampASP <- NaN*seq(ncol(rcounts)) for(i in seq(ncol(rcounts))){ ## lfq.i <- lfq[i,] tmp <- rle(sign(rcounts[,i])) start.idx <- c(1, cumsum(tmp$lengths[-length(tmp$lengths)])+1) end.idx <- cumsum(tmp$lengths) posrun <- which(tmp$values == 1) peakval <- NaN*posrun if(length(posrun) > 0){ for(p in seq(length(posrun))){ peakval[p] <- max(rcounts[start.idx[posrun[p]]:end.idx[posrun[p]], i ]) } sampASP[i] <- sum(peakval) }else{ sampASP[i] <- 0 } } ASP <- sum(sampASP) lfq$ASP <- ASP lfq$MA <- MA class(lfq) <- "lfq" return(lfq) }
2b7444311f0bdc5f35cb30411096964960642704
c07e1c72dea0b10cce8c9b85b5ea1c79ce545678
/TableOne.R
b2832dc86e502cbcce043ed30a740e08699cccab
[]
no_license
erickawaguchi/CE4-Survival
059eedae450a28de96a75e04fbeb75e13e744ff6
f96f927cb0ca2f6988199c3c7c002175e21eb0a0
refs/heads/master
2022-04-19T16:20:00.570247
2020-04-15T20:27:16
2020-04-15T20:27:16
null
0
0
null
null
null
null
UTF-8
R
false
false
3,152
r
TableOne.R
require(tableone) fulldata_AREDS<-read.csv("Phenotype_AREDS.csv") fulldata_AREDS$smoke<-as.factor(fulldata_AREDS$smoke) fulldata_AREDS$status<-as.factor(fulldata_AREDS$status) ##################################################################### ########## Table 1 ############ ##################################################################### table1_all<-CreateTableOne(data = fulldata_AREDS,vars=c("enrollage","smoke","Sex","SevScaleBL","status")) table1_all$CatTable table1_all$ContTable summary(fulldata_AREDS$enrollage) summary(fulldata_AREDS$SevScaleBL) summary(fulldata_AREDS$Y) table1_all<-CreateTableOne(data = fulldata_AREDS,vars=c("enrollage","smoke","Sex","SevScaleBL","status"),strata="Trt") table1_all$CatTable table1_all$ContTable summary(fulldata_AREDS[which(fulldata_AREDS$Trt==0),]$enrollage) summary(fulldata_AREDS[which(fulldata_AREDS$Trt==1),]$enrollage) summary(fulldata_AREDS[which(fulldata_AREDS$Trt==0),]$SevScaleBL) summary(fulldata_AREDS[which(fulldata_AREDS$Trt==1),]$SevScaleBL) ##################################################################### ########## Table 2 ############ ##################################################################### fulldata_AREDS$target<-ifelse(fulldata_AREDS$rs147106198==0,0,1) fulldata_AREDS$Trt<-as.factor(fulldata_AREDS$Trt) table1_all<-CreateTableOne(data = fulldata_AREDS,vars=c("enrollage","smoke","Sex","Trt","SevScaleBL"),strata="target") table1_all$CatTable table1_all$ContTable summary(fulldata_AREDS[which(fulldata_AREDS$target==0),]$enrollage) summary(fulldata_AREDS[which(fulldata_AREDS$target==1),]$enrollage) summary(fulldata_AREDS[which(fulldata_AREDS$target==0),]$SevScaleBL) summary(fulldata_AREDS[which(fulldata_AREDS$target==1),]$SevScaleBL) ##################################################################### ########## Table 4 ############ ##################################################################### #################AREDS: AREDS formulation arm AREDS<-fulldata_AREDS[which(fulldata_AREDS$Trt==1),] table1_all<-CreateTableOne(data = AREDS,vars=c("enrollage","smoke","Sex","SevScaleBL"),strata="target") table1_all$CatTable table1_all$ContTable summary(AREDS[which(AREDS$target==0),]$enrollage) summary(AREDS[which(AREDS$target==1),]$enrollage) summary(AREDS[which(AREDS$target==0),]$SevScaleBL) summary(AREDS[which(AREDS$target==1),]$SevScaleBL) #################AREDS2: AREDS formulation arm AREDS2_ctrl<-read.csv("Phenotype_AREDS2_trtAREDS.csv") AREDS2_ctrl$target<-ifelse(AREDS2_ctrl$rs147106198==0,0,1) AREDS2_ctrl$smoke<-as.factor(AREDS2_ctrl$smoke) table1_all<-CreateTableOne(data = AREDS2_ctrl,vars=c("enrollage","smoke","Sex","SevScaleBL"),strata="target") table1_all$CatTable table1_all$ContTable summary(AREDS2_ctrl[which(AREDS2_ctrl$target==0),]$enrollage) summary(AREDS2_ctrl[which(AREDS2_ctrl$target==1),]$enrollage) summary(AREDS2_ctrl[which(AREDS2_ctrl$target==0),]$SevScaleBL) summary(AREDS2_ctrl[which(AREDS2_ctrl$target==1),]$SevScaleBL)
c65b046fe9a9b310730bd543ba8a56a4cd7dafe6
e4f181ea65a44819063e0dcb90604db91946a35b
/09. RMarkdown y Shiny/2. Aplicaciones/Ejercicio_GDP/server.R
923855747624c67405638bc8da6451ed1af9ec17
[]
no_license
1789291/Master-Data-Science
93016732264eef456f61c7d22cd4a64145c226f7
cbe58c9b04c22dff927a82e570d789d9ce849cae
refs/heads/master
2021-09-22T11:20:10.204010
2018-09-09T10:35:52
2018-09-09T10:35:52
null
0
0
null
null
null
null
UTF-8
R
false
false
797
r
server.R
# Ejercicio GDP library(shiny) # Definimos Server shinyServer(function(input, output) { # datos <- reactive({ df_sin_kuw %>% filter(year == input$year) }) output$grafico_scatter <- renderPlot({ ggplot(datos()) + aes(x = gdpPercap, y = lifeExp, size = pop, color = continent) + geom_text(x = 35000, y = 42.5, label = as.character(input$year), size = 20, alpha = .1, color = 'grey60') + geom_point() + scale_y_continuous(limits = c(20, 85)) + scale_x_continuous(limits = c(300, 50000)) + ggtitle("Relación entre GDP per cápita y Esperanza de vida") + labs(x = "GDP per cápita", y = "Esperanza de vida", color = "Continente", size = "Población") + guides(size=FALSE) + theme_minimal() }) })
273abd5a2f5814920ba7a5f3c9348706ab1052c3
349e1979fb70286bd79f43949a24fd163b9d2d3e
/wa_income_location_chart.R
aa4cba2b45a40672d8853464cb4a2a189615f2d6
[]
no_license
maxjj9710/info201_project
2dd4bb09a60508f46a226a70458e3b652f81ad64
addd60008deae0b36699c0aa5d47242124de612f
refs/heads/main
2023-03-18T12:12:13.505035
2021-03-18T05:09:49
2021-03-18T05:09:49
332,921,666
0
0
null
null
null
null
UTF-8
R
false
false
1,672
r
wa_income_location_chart.R
# Group 5: Vriana Gatdula, Rona Guo, Aubrey Jones, Max Wang # INFO 201 A // AE # Exploratory Analysis # Setup ------------------------------------------------------------------------ # Load the necessary packages. library(dplyr) install.packages("ggplot2") library(ggplot2) library(stringr) library(tidyverse) # Loading the relevant dataset. Note: Go to Session > Set Working Direction to # change to this folder to have the file path run properly. income_by_location <- read.csv("income_by_location.csv") # Chart 2 ---------------------------------------------------------------------- wa_income_location <- income_by_location %>% filter(`ID.Race` >= "1", `ID.Race` <= "9") %>% mutate("County" = str_sub(Geography, 1, -11)) %>% select(Race, Year, `Household.Income.by.Race`, County) race_plot_data <- wa_income_location %>% group_by(Race, Year) %>% summarize(mean_race = mean(Household.Income.by.Race)) race_plot <- ggplot(data = race_plot_data) + geom_point(mapping = aes(x = Year, y = `mean_race`, color = Race)) + labs(title = "Mean Household Income by Race in WA Between 2013 and 2018", x = "Year", y = "Household Income") county_plot_data <- wa_income_location %>% group_by(County, Year) %>% summarize(mean_county = mean(Household.Income.by.Race)) county_plot <- ggplot(data = county_plot_data) + geom_point(mapping = aes(x = Year, y = `mean_county`, color = County)) + labs(title = "Mean Household Income by County in WA Between 2013 and 2018", x = "Year", y = "Household Income", color = "County")
524ccfaebb11646e3730b9fee1339e43bf068ab4
cba35da0c6e0cdb38db25266040c338e651a37f3
/plot4.R
732ad01ca02faee8f57ff50ddfd213ce6b59eecf
[]
no_license
ananyamathur1999/ExData_Plotting1
c7e70122ed52623c53dfb8e4181cd99ee768525b
9a76388d734516b4ca2e1c0f399978599babfe49
refs/heads/master
2021-01-09T16:42:09.213441
2020-02-22T17:04:34
2020-02-22T17:04:34
242,375,849
0
0
null
2020-02-22T16:44:11
2020-02-22T16:44:10
null
UTF-8
R
false
false
1,203
r
plot4.R
doc<- read.table("household_power_consumption.txt",sep=";",header=TRUE) subset <- doc[doc$Date %in% c("1/2/2007","2/2/2007") ,] subset$Sub_metering_1<-as.numeric(as.character(subset$Sub_metering_1)) subset$Sub_metering_2<-as.numeric(as.character(subset$Sub_metering_2)) subset$Sub_metering_3<-as.numeric(as.character(subset$Sub_metering_3)) subset$Voltage<- as.numeric(as.character(subset$Voltage)) subset$Global_reactive_power<- as.numeric(as.character(subset$Global_reactive_power)) subset$Global_active_power<-as.numeric(as.character(subset$Global_active_power)) time<- strptime(paste(as.character(subset$Date),as.character(subset$Time)),"%d/%m/%Y %H:%M:%S") par(mfrow=c(2,2)) plot(time,subset$Global_active_power,type="l",ylab="Global Active Power(in kilowatts)" ,xlab='') plot(time,subset$Voltage,type="l",ylab="Voltage" ,xlab="datatime") plot(time,subset$Sub_metering_1,type="h",ylab="Energy sub metering",xlab="") lines(time,subset$Sub_metering_2,col="red") lines(time,subset$Sub_metering_3,col="blue") plot(time,subset$Global_reactive_power,type="h",xlab="datatime",ylab="Global_reactive_power") dev.copy(png,file="plot4.png", width=480 , height=480) dev.off()
e4743af758917630bc8507aa26a3a834db571d58
efc256ce1f0b8c70d3f9a59799032f798649faba
/libraries.R
c4ba9e827568e54478aef00fd8d3e43a9e6d8366
[]
no_license
pawelek9/MOR_project
c17ba7ec882c49b243be7a584c33deb1e182448f
4a3ac69e1f69d67db4327835a4d95ab66d8776a4
refs/heads/master
2022-09-20T22:15:39.602039
2020-05-28T00:47:17
2020-05-28T00:47:17
266,324,243
0
0
null
null
null
null
UTF-8
R
false
false
336
r
libraries.R
##This script will contines libraries #install.packages("dplyr") #install.packages('taRifx') #install.packages('rlist') require('dplyr') require('taRifx') require('rlist') require('purrr') require('timeSeries') require('fPortfolio') require('microbenchmark') require("comprehenr") require('ggplot2') require('tidyr') require('reshape2')
749e9e1d2349eca9172a286fd0a68c262fe3329f
69ef2c8abdc375caf7ca9b1c1808f6a33a66fcd0
/man/Target-class.Rd
27d471c68d6a2cebea66ae349fcdb03a34d7fbd5
[]
no_license
doomhammerhell/test-datamart
452342d3a1c12db6c6a920d50cb273149cee1725
3d4763e6739e6436219c5607e6a8db7cf28059be
refs/heads/main
2023-03-20T15:55:26.480353
2021-03-16T16:33:07
2021-03-16T16:33:07
348,417,434
0
0
null
null
null
null
UTF-8
R
false
false
219
rd
Target-class.Rd
% Generated by roxygen2 (4.0.2): do not edit by hand \docType{class} \name{Target-class} \alias{Target-class} \title{Buildable target} \description{ This is an abstract class for defining buildable targets. }
a967c7b12479229f7bd10b7a70fca1cbe24c0b5c
f721abfc612b92538a6a0e9897fffdc21b247f0f
/prediction2.R
813b24b62463c8be05a3712a635f72eb9bf57587
[]
no_license
pchoengtawee/Kaggle_RedhatProject
d33f6a0603ae6357f76351580925b0beaa3d82a4
f971590eaffed6ab1b64c19244cdf26dc3c50878
refs/heads/master
2020-05-29T08:51:04.440891
2016-09-24T17:46:59
2016-09-24T17:46:59
null
0
0
null
null
null
null
UTF-8
R
false
false
3,277
r
prediction2.R
#set.seed(11111) set.seed(123) load("data_prep.RData") library(caret) library(data.table) library(ROCR) library(OptimalCutpoints) #str(data_type1_tr) data_type1_tr = lapply(data_type1_tr, as.numeric) data_type1_tr = data.frame(data_type1_tr) data_type1_tr$outcome = as.factor(data_type1_tr$outcome) X = c(1:dim(data_type1_tr)[1]) samp = sample(X,15000) data_type1_tr = data_type1_tr[samp,] inTrain <- createDataPartition(data_type1_tr$outcome, p=0.7, list = FALSE) tr_set = data_type1_tr[inTrain,] te_set = data_type1_tr[-inTrain,] # # control <- trainControl(method="repeatedcv", number=10, search="random") # start = Sys.time() # rf_random <- train(outcome~., data=tr_set, method="rf", metric="Accuracy", tuneLength=10, trControl=control) # end= Sys.time() # elapsed = end-start # elapsed # print(rf_random) # plot(rf_random) # do prediction on validation set load("rf_random.RData") pred_rf = predict(rf_random, te_set) confmat = table(pred_rf, te_set$outcome) sens = confmat[1,1]/(confmat[1,1]+confmat[1,2]) spec = confmat[2,2]/(confmat[2,2]+confmat[2,1]) result.pr = predict(rf_random, type="prob", newdata=te_set)[,2] result.pred = prediction(result.pr, te_set$outcome) result.perf = performance(result.pred,"tpr","fpr") result.auc = performance(result.pred, "auc") ss = performance(result.pred, "sens", "spec") idx = which.max(ss@x.values[[1]]+ss@y.values[[1]]) ss@x.values[idx] ss@y.values[idx] plot(result.perf,main="ROC Curve for Random Forest",col=2,lwd=2) abline(a=0,b=1,lwd=2,lty=2,col="gray") # now do the prediction on the test set data_type1_te = lapply(data_type1_te, as.numeric) data_type1_te = data.frame(data_type1_te) str(data_type1_te) predict_te = predict(rf_random, data_type1_te) # now build model for the rest of the types data_rest_tr = lapply(data_rest_tr, as.numeric) data_rest_tr = data.frame(data_rest_tr) data_rest_tr$outcome = as.factor(data_rest_tr$outcome) str(data_rest_tr) set.seed(234) X = c(1:dim(data_rest_tr)[1]) samp = sample(X,15000) data_rest_tr = data_rest_tr[samp,] inTrain <- createDataPartition(data_rest_tr$outcome, p=0.7, list = FALSE) tr_set = data_rest_tr[inTrain,] te_set = data_rest_tr[-inTrain,] control <- trainControl(method="repeatedcv", number=10, search="random") start = Sys.time() rf_random2 <- train(outcome~., data=tr_set, method="rf", metric="Accuracy", tuneLength=10, trControl=control) end= Sys.time() print(rf_random2) elapsed = end-start # do prediction on validation set pred_rf = predict(rf_random2, te_set) confmat = table(pred_rf, te_set$outcome) sens = confmat[1,1]/(confmat[1,1]+confmat[1,2]) spec = confmat[2,2]/(confmat[2,2]+confmat[2,1]) # check ROC result.pr = predict(rf_random2, type="prob", newdata=te_set)[,2] result.pred = prediction(result.pr, te_set$outcome) result.perf = performance(result.pred,"tpr","fpr") result.auc = performance(result.pred, "auc") ss = performance(result.pred, "sens", "spec") idx = which.max(ss@x.values[[1]]+ss@y.values[[1]]) ss@x.values[idx] ss@y.values[idx] plot(result.perf,main="ROC Curve for Random Forest",col=2,lwd=2) abline(a=0,b=1,lwd=2,lty=2,col="gray") # now do the prediction on the test set data_rest_te = lapply(data_rest_te, as.numeric) data_rest_te = data.frame(data_rest_te) str(data_rest_te) predict_te = predict(rf_random2, data_rest_te)
a497131f63e5b16ce3fdd16654a36831a04e0c95
98550ab8b21f1d86f5954886911fc01498ef7699
/man/summary.bioconductorRank.Rd
f3d9efb6fb705197dedd0e6c68392b6352f168d2
[]
no_license
lindbrook/packageRank
a68ee94e0ed3621e7f10239f1eb2d12dbb7c6530
a83ebfaa05f6ee82b7e5ae76cf0b8a4c296b4dfb
refs/heads/master
2023-08-04T21:18:01.261280
2023-08-01T22:00:29
2023-08-01T22:00:29
184,319,415
27
1
null
2023-08-01T22:00:20
2019-04-30T19:25:45
R
UTF-8
R
false
true
534
rd
summary.bioconductorRank.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bioconductorRank.R \name{summary.bioconductorRank} \alias{summary.bioconductorRank} \title{Summary method for bioconductorRank().} \usage{ \method{summary}{bioconductorRank}(object, ...) } \arguments{ \item{object}{Object. An object of class "bioconductor_rank" created by \code{bioconductorRank()}} \item{...}{Additional parameters.} } \description{ Summary method for bioconductorRank(). } \note{ This is useful for directly accessing the data frame. }
89d3e6c0d61838fb6d8031ac4acbc5c2ed990f29
1bcd87514ea143f57f5f4b338ad50f2a8d148134
/man/runireg.Rd
ebdc1074e6a08b6157c0ff1778be566d44ffb32e
[]
no_license
cran/bayesm
b491d7f87740082488c8695293f3565b2929f984
8a7211ff5287c42d5bc5cc60406351d97f030bcf
refs/heads/master
2022-12-10T10:51:14.191052
2022-12-02T09:10:02
2022-12-02T09:10:02
17,694,644
19
15
null
null
null
null
UTF-8
R
false
false
2,475
rd
runireg.Rd
\name{runireg} \alias{runireg} \concept{bayes} \concept{regression} \title{IID Sampler for Univariate Regression} \description{ \code{runireg} implements an iid sampler to draw from the posterior of a univariate regression with a conjugate prior. } \usage{runireg(Data, Prior, Mcmc)} \arguments{ \item{Data }{list(y, X)} \item{Prior}{list(betabar, A, nu, ssq)} \item{Mcmc }{list(R, keep, nprint)} } \details{ \subsection{Model and Priors}{ \eqn{y = X\beta + e} with \eqn{e} \eqn{\sim}{~} \eqn{N(0, \sigma^2)} \eqn{\beta} \eqn{\sim}{~} \eqn{N(betabar, \sigma^2*A^{-1})} \cr \eqn{\sigma^2} \eqn{\sim}{~} \eqn{(nu*ssq)/\chi^2_{nu}} } \subsection{Argument Details}{ \emph{\code{Data = list(y, X)}} \tabular{ll}{ \code{y: } \tab \eqn{n x 1} vector of observations \cr \code{X: } \tab \eqn{n x k} design matrix } \emph{\code{Prior = list(betabar, A, nu, ssq)} [optional]} \tabular{ll}{ \code{betabar: } \tab \eqn{k x 1} prior mean (def: 0) \cr \code{A: } \tab \eqn{k x k} prior precision matrix (def: 0.01*I) \cr \code{nu: } \tab d.f. parameter for Inverted Chi-square prior (def: 3) \cr \code{ssq: } \tab scale parameter for Inverted Chi-square prior (def: \code{var(y)}) } \emph{\code{Mcmc = list(R, keep, nprint)} [only \code{R} required]} \tabular{ll}{ \code{R: } \tab number of draws \cr \code{keep: } \tab thinning parameter -- keep every \code{keep}th draw (def: 1) \cr \code{nprint: } \tab print the estimated time remaining for every \code{nprint}'th draw (def: 100, set to 0 for no print) } } } \value{ A list containing: \item{betadraw }{ \eqn{R x k} matrix of betadraws } \item{sigmasqdraw }{ \eqn{R x 1} vector of sigma-sq draws} } \author{Peter Rossi, Anderson School, UCLA, \email{perossichi@gmail.com}.} \references{For further discussion, see Chapter 2, \emph{Bayesian Statistics and Marketing} by Rossi, Allenby, and McCulloch.} \seealso{ \code{\link{runiregGibbs}} } \examples{ if(nchar(Sys.getenv("LONG_TEST")) != 0) {R=2000} else {R=10} set.seed(66) n = 200 X = cbind(rep(1,n), runif(n)) beta = c(1,2) sigsq = 0.25 y = X\%*\%beta + rnorm(n,sd=sqrt(sigsq)) out = runireg(Data=list(y=y,X=X), Mcmc=list(R=R)) cat("Summary of beta and Sigmasq draws", fill=TRUE) summary(out$betadraw, tvalues=beta) summary(out$sigmasqdraw, tvalues=sigsq) ## plotting examples if(0){plot(out$betadraw)} } \keyword{regression}
33ad77bc2afb2711ae42f0ead5a30d3de3eec960
1b8da9dd574d6680b7b48cec83f29aa73e217f13
/R/listFormula.R
ae7d5fd1c0312b94ba304c785a264f89651f22cd
[]
no_license
angelgar/voxel
f7d11459b9877e7cc969c049eb212b2d287abfcd
4e8ebbcd82094eaae1a213a33d735bfd20405097
refs/heads/master
2020-07-05T17:51:45.909305
2019-12-20T20:17:10
2019-12-20T20:17:10
66,569,820
9
4
null
2018-04-18T22:33:40
2016-08-25T15:23:46
R
UTF-8
R
false
false
502
r
listFormula.R
#' Create list of Formulas for each voxel #' #' This function is internal. #' This function creates list of formulas that will be passed for analysis. #' @param x Index of voxels to be analyzed #' @param formula covariates to be included in the analysis #' @keywords internal #' @export #' @examples #' #' #' x <- 1 #' fm1 <- "~ x1" #' formula <- listFormula(x, formula = fm1) listFormula <- function(x, formula) { stats::as.formula(paste(x, formula, sep=""), env = parent.frame(n=3)) }
3a31c076272ad361b20feab6f0cb2da9d28ee259
3a42630716521b58a20d5a9445fd3eb1007188aa
/man/HKernElementKAttribute.Rd
00ce7ec6d2742a9847bc9fa0edcc9b9e24d7083b
[ "MIT", "LicenseRef-scancode-other-permissive" ]
permissive
mslegrand/svgR
2a8addde6b1348db34dee3e5145af976008bf8f0
e781c9c0929a0892e4bc6e23e7194fb252833e8c
refs/heads/master
2020-05-22T01:22:16.991851
2020-01-18T03:16:30
2020-01-18T03:16:30
28,827,655
10
0
null
null
null
null
UTF-8
R
false
true
722
rd
HKernElementKAttribute.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/doc_RegAttrPages.R \name{HKernElementKAttribute} \alias{HKernElementKAttribute} \title{k} \description{ Sets an how much to decrease the spacing between the two glyphs in the kerning pair } \section{Available Attribute Values}{ The value is defined as follows: \describe{ \item{<numeric>}{Specifies an reduction in the spacing, relative to the font coordinate system, between the two \emph{glyphs} of the kerning pair. ( Required.)} } } \section{Animatable}{ Not Animatable } \section{Used by the Elements}{ \describe{ \item{\emph{Uncategorized Elements}}{\code{\link[=hkern]{hkern}}, \code{\link[=vkern]{vkern}}} } } \keyword{internal}
703c73024d083c6e4d857f7ac6818b63682c22d2
5e0de3032d5d3de384396a52bee98d894365af8b
/01-twitter-data-collection.r
6e4bbd1bfb548dcf06ada289ffbdd63e632ffa83
[]
no_license
davidpupovac/social-media-workshop
de3d8b758258ddbddac70a7cfea8721689ee00ac
6c88a8e0e0e0505e17ecc061edf2c1bbd0ec8c16
refs/heads/master
2021-01-22T16:11:21.747918
2015-02-17T17:20:16
2015-02-17T17:20:16
null
0
0
null
null
null
null
UTF-8
R
false
false
7,690
r
01-twitter-data-collection.r
################################################################ ## Workshop: Collecting and Analyzing Social Media Data with R ## February 2nd, 2015 ## Script 1: Collecting Twitter data ## Author: Pablo Barbera, NYU, @p_barbera ################################################################ setwd("~/Dropbox/git/social-media-workshop") #I just edited this ## INSTALLING PACKAGES THAT WE WILL USE TODAY doInstall <- TRUE # Change to FALSE if you don't want packages installed. toInstall <- c("ROAuth", "twitteR", "streamR", "ggplot2", "stringr", "tm", "RCurl", "maps", "Rfacebook", "topicmodels", "devtools") ##################################### ### CREATING YOUR OWN OAUTH TOKEN ### ##################################### ## Step 1: go to apps.twitter.com and sign in ## Step 2: click on "Create New App" ## Step 3: fill name, description, and website (it can be anything, even google.com) ## (make sure you leave 'Callback URL' empty) ## Step 4: Agree to user conditions ## Step 5: copy consumer key and consumer secret and paste below library(ROAuth) requestURL <- "https://api.twitter.com/oauth/request_token" accessURL <- "https://api.twitter.com/oauth/access_token" authURL <- "https://api.twitter.com/oauth/authorize" consumerKey <- "XXXXXXXXXXXX" consumerSecret <- "YYYYYYYYYYYYYYYYYYY" my_oauth <- OAuthFactory$new(consumerKey=consumerKey, consumerSecret=consumerSecret, requestURL=requestURL, accessURL=accessURL, authURL=authURL) ## run this line and go to the URL that appears on screen my_oauth$handshake(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) ## now you can save oauth token for use in future sessions with twitteR or streamR save(my_oauth, file="backup/oauth_token.Rdata") ### NOTE (added February 17, 2015) ### The twitteR package just changed its authentication method ### (streamR remains the same) ### New code to authenticate with twitteR now requires access token and access secret, ### which can be found in 'Keys and Access Tokens' tab in apps.twitter.com accessToken = 'ZZZZZZZZZZZZZZ' accessSecret = 'AAAAAAAAAAAAAAAAAA' ## testing that it works library(twitteR) setup_twitter_oauth(consumer_key=consumerKey, consumer_secret=consumerSecret, access_token=accessToken, access_secret=accessSecret) searchTwitter('obama', n=1) ## from a Windows machine: # searchTwitter("obama", cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) ##################################### ### COLLECTING USER INFORMATION ### ##################################### library(twitteR) # profile information user <- getUser('barackobama') # from a Windows machine # user <- getUser('barackobama', cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) user$toDataFrame() # followers user$getFollowers(n=10) # (10 most recent followers) # from a Windows machine # user$getFollowers(n=10, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) # friends (who they follow) user$getFriends(n=10) # from a Windows machine # user$getFriends(n=10, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) # see also smappR package (https://github.com/SMAPPNYU/smappR) for additional # functions to download users' data for a large number of users ##################################### ### SEARCH RECENT TWEETS ### ##################################### # basic searches by keywords tweets <- searchTwitter("obama", n=20) # from a Windows machine # tweets <- searchTwitter("obama", n=20, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) # convert to data frame tweets <- twListToDF(tweets) # but NOTE: limited to most recent ~3000 tweets in the past few days! tweets <- searchTwitter("#APSA2014") tweets <- searchTwitter("#PoliSciNSF") tweets <- twListToDF(tweets) tweets$created # from a Windows machine # tweets <- searchTwitter("#APSA2014", cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) # tweets <- searchTwitter("#PoliSciNSF", cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) # tweets <- twListToDF(tweets) # tweets$created ############################################# ### DOWNLOADING RECENT TWEETS FROM A USER ### ############################################# ## Here's how you can capture the most recent tweets (up to 3,200) ## of any given user (in this case, @nytimes) ## you can do this with twitteR timeline <- userTimeline('nytimes', n=20) # from a Windows machine # timeline <- userTimeline('nytimes', n=20, cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")) timeline <- twListToDF(timeline) ## but I have written my own function so that I can store the raw JSON data source("functions.r") getTimeline(filename="tweets_nytimes.json", screen_name="nytimes", n=1000, oauth=my_oauth, trim_user="false") # it's stored in disk and I can read it with the 'parseTweets' function in # the streamR package library(streamR) tweets <- parseTweets("tweets_nytimes.json") # see again smappR package (https://github.com/SMAPPNYU/smappR) for more ############################################### ### COLLECTING TWEETS FILTERING BY KEYWORDS ### ############################################### library(streamR) filterStream(file.name="obama_tweets.json", track="obama", timeout=60, oauth=my_oauth) ## Note the options: ## FILE.NAME indicates the file in your disk where the tweets will be downloaded ## TRACK is the keyword(s) mentioned in the tweets we want to capture. ## TIMEOUT is the number of seconds that the connection will remain open ## OAUTH is the OAuth token we are using ## Once it has finished, we can open it in R as a data frame with the ## "parseTweets" function tweets <- parseTweets("obama_tweets.json") ## This is how we would capture tweets mentioning multiple keywords: filterStream(file.name="political_tweets.json", track=c("obama", "bush", "clinton"), tweets=100, oauth=my_oauth) ############################################### ### COLLECTING TWEETS FILTERING BY LOCATION ### ############################################### ## This second example shows how to collect tweets filtering by location ## instead. In other words, we can set a geographical box and collect ## only the tweets that are coming from that area. ## For example, imagine we want to collect tweets from the United States. ## The way to do it is to find two pairs of coordinates (longitude and latitude) ## that indicate the southwest corner AND the northeast corner. ## (NOTE THE REVERSE ORDER, IT'S NOT LAT, LONG BUT LONG, LAT) ## In the case of the US, it would be approx. (-125,25) and (-66,50) ## How to find the coordinates? I use: http://itouchmap.com/latlong.html filterStream(file.name="tweets_geo.json", locations=c(-125, 25, -66, 50), timeout=60, oauth=my_oauth) ## Note that now we choose a different option, "TIMEOUT", which indicates for ## how many seconds we're going to keep open the connection to Twitter. ## But we could have chosen also tweets=100 instead ## We can do as before and open the tweets in R tweets <- parseTweets("tweets_geo.json") ############################################ ### COLLECTING A RANDOM SAMPLE OF TWEETS ### ############################################ ## It's also possible to collect a random sample of tweets. That's what ## the "sampleStream" function does: sampleStream(file.name="tweets_random.json", timeout=30, oauth=my_oauth) ## Here I'm collecting 30 seconds of tweets ## And once again, to open the tweets in R... tweets <- parseTweets("tweets_random.json") ## What are the most common hashtags right now? getCommonHashtags(tweets$text) ## What is the most retweeted tweet? tweets[which.max(tweets$retweet_count),]
e3fc2f3f6de289f68a400ff13e6e2efbdba120ce
859a2bdab8ba9943fffde77a0a930ba877c80fd9
/man/extract_trinucleotide_context.Rd
0ef7d6f4d4b3165cd917a1ebccafc72b244bdff2
[ "MIT" ]
permissive
alkodsi/ctDNAtools
e0ed01de718d3239d58f1bfd312d2d91df9f0f0d
30bab89b85951282d1bbb05c029fc333a139e044
refs/heads/master
2022-03-20T11:47:06.481129
2022-02-20T11:25:48
2022-02-20T11:25:48
208,288,617
30
9
null
null
null
null
UTF-8
R
false
true
1,128
rd
extract_trinucleotide_context.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract_trinucleotide_context.R \name{extract_trinucleotide_context} \alias{extract_trinucleotide_context} \title{Extracts the trinucleotide context for a set of mutations} \usage{ extract_trinucleotide_context(mutations, reference, destrand = TRUE) } \arguments{ \item{mutations}{A data frame having the mutations. Should have the columns CHROM, POS, REF, ALT.} \item{reference}{the reference genome in BSgenome format} \item{destrand}{logical, whether to destrand mutations} } \value{ A data frame with two columns having the substitutions and the trinucleotide context } \description{ Extracts the trinucleotide context for a set of mutations } \examples{ \donttest{ data("mutations", package = "ctDNAtools") ## Use human reference genome from BSgenome.Hsapiens.UCSC.hg19 library suppressMessages(library(BSgenome.Hsapiens.UCSC.hg19)) ## with destranding extract_trinucleotide_context(mutations, BSgenome.Hsapiens.UCSC.hg19) ## without destranding extract_trinucleotide_context(mutations, BSgenome.Hsapiens.UCSC.hg19, destrand = FALSE ) } }
ae9abb417ec3db902024152937ee893e7531208b
ba07f5cbc690640115108e4ee07b46ef8340e5fe
/DA3-labs/lab2/code/Ch16_airbnb_prepare_london.R
a32001d020fa3ca48162e0e790273e353bf6eadf
[]
no_license
ozkrleal/london-prediction-r
08a16f4c6b3416d57d3b2cea24b10c797eafed41
f81488a92dae37b7e54074d6ebb76b62f95fbfa7
refs/heads/master
2020-12-20T13:12:59.718332
2020-02-15T00:32:02
2020-02-15T00:32:02
236,085,457
0
0
null
null
null
null
UTF-8
R
false
false
4,558
r
Ch16_airbnb_prepare_london.R
############################################################ # # DATA ANALYSIS TEXTBOOK # MODEL SELECTION # ILLUSTRATION STUDY # Airbnb London 2017 march 05 data # # # WHAT THIS CODES DOES: # Transform variables and filter dataset # Generate new features ############################################################ # IN airbnb_london_workfile.csv # OUT airbnb_london_workfile_adj.csv library(tidyverse) library(skimr) # location folders data_in <- "lab2/data/" data_out <- "lab2/data/" output <- "lab2/output/" # load ggplot theme function source("helper_functions/theme_bg.R") source("helper_functions/da_helper_functions.R") source("lab2/code/Ch14_airbnb_prediction_functions.R") # Import data data <- read_csv(paste(data_in, "airbnb_london_workfile.csv", sep = "")) ##################### ### look at price ### ##################### summary(data$price) data <- data %>% mutate(ln_price = log(price)) data <- data %>% filter(price <=1000) # Squares and further values to create data <- data %>% mutate(n_accommodates2=n_accommodates^2, ln_accommodates=log(n_accommodates) , ln_accommodates2=log(n_accommodates)^2, ln_beds = log(n_beds), ln_number_of_reviews = log(n_number_of_reviews+1) ) # Pool accomodations with 0,1,2,10 bathrooms data <- data %>% mutate(f_bathroom = cut(n_bathrooms, c(0,1,2,10), labels=c(0,1,2), right = F) ) # Pool num of reviews to 3 categories: none, 1-51 and >51 data <- data %>% mutate(f_number_of_reviews = cut(n_number_of_reviews, c(0,1,51,max(data$n_number_of_reviews)), labels=c(0,1,2), right = F)) # Pool and categorize the number of minimum nights: 1,2,3, 3+ data <- data %>% mutate(f_minimum_nights= cut(n_minimum_nights, c(1,2,3,max(data$n_minimum_nights)), labels=c(1,2,3), right = F)) # Change Infinite values with NaNs for (j in 1:ncol(data) ) data.table::set(data, which(is.infinite(data[[j]])), j, NA) #------------------------------------------------------------------------------------------------ # where do we have missing variables now? to_filter <- sapply(data, function(x) sum(is.na(x))) to_filter[to_filter > 0] # what to do with missing values? # 1. drop if no target data <- data %>% drop_na(price) # 2. imput when few, not that important data <- data %>% mutate( n_bathrooms = ifelse(is.na(n_bathrooms), median(n_bathrooms, na.rm = T), n_bathrooms), #assume at least 1 bath n_beds = ifelse(is.na(n_beds), n_accommodates, n_beds), #assume n_beds=n_accomodates f_bathroom=ifelse(is.na(f_bathroom),1, f_bathroom), f_minimum_nights=ifelse(is.na(f_minimum_nights),1, f_minimum_nights), f_number_of_reviews=ifelse(is.na(f_number_of_reviews),1, f_number_of_reviews), ln_beds=ifelse(is.na(ln_beds),0, ln_beds), ) # 3. drop columns when many missing not imortant to_drop <- c("usd_cleaning_fee", "p_host_response_rate") data <- data %>% select(-one_of(to_drop)) to_filter <- sapply(data, function(x) sum(is.na(x))) to_filter[to_filter > 0] # 4. Replace missing variables re reviews with zero, when no review + add flags data <- data %>% mutate( flag_days_since=ifelse(is.na(n_days_since),1, 0), n_days_since = ifelse(is.na(n_days_since), median(n_days_since, na.rm = T), n_days_since), flag_review_scores_rating=ifelse(is.na(n_review_scores_rating),1, 0), n_review_scores_rating = ifelse(is.na(n_review_scores_rating), median(n_review_scores_rating, na.rm = T), n_review_scores_rating), flag_reviews_per_month=ifelse(is.na(n_reviews_per_month),1, 0), n_reviews_per_month = ifelse(is.na(n_reviews_per_month), median(n_reviews_per_month, na.rm = T), n_reviews_per_month), flag_n_number_of_reviews=ifelse(n_number_of_reviews==0,1, 0) ) table(data$flag_n_days_since) # redo features # Create variables, measuring the time since: squared, cubic, logs data <- data %>% mutate( ln_days_since = log(n_days_since+1), ln_days_since2 = log(n_days_since+1)^2, ln_days_since3 = log(n_days_since+1)^3 , n_days_since2=n_days_since^2, n_days_since3=n_days_since^3, ln_review_scores_rating = log(n_review_scores_rating), ln_days_since=ifelse(is.na(ln_days_since),0, ln_days_since), ln_days_since2=ifelse(is.na(ln_days_since2),0, ln_days_since2), ln_days_since3=ifelse(is.na(ln_days_since3),0, ln_days_since3), ) # Look at data skim(data) # where do we have missing variables now? to_filter <- sapply(data, function(x) sum(is.na(x))) to_filter[to_filter > 0] write_csv(data, paste0(data_out, "airbnb_london_workfile_adj.csv"))
1922d49cde5e32e503148438748f6c4686416c7d
33402080460833242c40f141cbff1b9e1bb5041d
/join-network-spatial-data.R
438b27c11ecb98fc0c2f9c00f1c0ef5b127aa0f9
[]
no_license
dylanbeaudette/network-paper-2020
4e8df3ba646c5518bb5b06a7a37b03a415030b11
59bdabae8b918738cf3e1e0be8dde67b87520a0b
refs/heads/master
2022-05-30T16:30:48.976299
2022-05-12T20:10:53
2022-05-12T20:11:05
251,530,010
0
0
null
null
null
null
UTF-8
R
false
false
5,378
r
join-network-spatial-data.R
## 2020 Soilscapes / Networks ## P. Roudier, D.E. Beaudette, Dion O'Neal library(igraph) library(RColorBrewer) library(sharpshootR) library(aqp) library(rgdal) library(rgeos) library(sp) library(sf) library(raster) library(rasterVis) library(ggplot2) library(stringr) library(dplyr) source('local-functions.R') # load relevant data x <- readRDS('data/component-data.rda') mu <- readRDS('data/spatial-data.rda') g <- readRDS('data/graph.rda') d <- readRDS('data/vertices_df.rda') leg <- readRDS('data/legend.rda') # list of records by cluster number # used to search for map unit component names clust.list <- split(d, d$cluster) # compute cluster membership by map unit # create mu (mukey)-> graph (cluster) look-up table # also computes membership percentage and Shannon H mu.LUT <- lapply(split(x, x$mukey), mu.agg.membership) mu.LUT <- do.call('rbind', mu.LUT) # check: OK head(mu.LUT) ## sanity checks # all clusters should be allocated in the LUT # OK setdiff(unique(mu.LUT$cluster), V(g)$cluster) # spatial data LEFT JOIN network cluster LUT mu <- sp::merge(mu, mu.LUT, by.x='mukey', by.y='mukey', all.x = TRUE) ## TODO: eval via SDA # investigate map units (mukey) that aren't represented in the graph missing.mukey <- setdiff(mu$mukey, x$mukey) saveRDS(missing.mukey, file = 'data/missing-mukey.rds') # filter-out polygons with no assigned cluster # 98% of polygons are assigned a cluster idx <- which(!is.na(mu$cluster)) length(idx) / nrow(mu) mu <- mu[idx, ] write_sf(st_as_sf(mu), 'data/mu-with-cluster-membership.gpkg') # aggregate geometry based on cluster labels # mu.simple <- gUnionCascaded(mu, as.character(mu$cluster)) # mu.simple.spdf <- SpatialPolygonsDataFrame( # mu.simple, # data = data.frame( # ID = sapply(slot(mu.simple, 'polygons'), slot, 'ID') # ), # match.ID = FALSE # ) # aggregate geometry based on cluster labels mu.simple.sf <- mu %>% sf::st_as_sf() %>% dplyr::group_by(cluster) %>% dplyr::summarise() mu.simple.spdf <- as(mu.simple.sf, "Spatial") mu.simple.spdf <- spTransform(mu.simple.spdf, CRS(st_crs(mu.simple.sf)$input)) ## viz using raster methods # this assumes projected CRS r <- rasterize(mu, raster(extent(mu), resolution = 90), field = 'cluster') projection(r) <- proj4string(mu) ## kludge for plotting categories # convert to categorical raster r <- as.factor(r) rat <- levels(r)[[1]] # use previously computed legend of unique cluster IDs and colors # note that the raster legend is missing 3 clusters rat$color <- leg$color[match(rat$ID, leg$cluster)] # copy over associated legend entry rat$notes <- leg$notes[match(rat$ID, leg$cluster)] # pack RAT back into raster levels(r) <- rat # sanity-check: do the simplified polygons have the same IDs (cluster number) as raster? # yes e <- sampleRegular(r, 1000, sp = TRUE) e$check <- over(e, mu.simple.spdf)$ID e <- as.data.frame(e) e <- na.omit(e) all(as.character(e$layer) == as.character(e$check)) ## colors suck: pick a new palette, setup so that clusters are arranged via similarity # simple plot in R, colors hard to see png(file='graph-communities-mu-data.png', width=1600, height=1200) levelplot(r, col.regions=levels(r)[[1]]$color, xlab="", ylab="", att='notes', maxpixels=1e5, colorkey=list(space='right', labels=list(cex=1.25))) dev.off() # Simple plot using sf, to try and debug where things go wrong map_sf <- ggplot(data = mu.simple.sf) + geom_sf(aes(fill = as.factor(cluster)), colour = "gray30", lwd = 0.1) + scale_fill_manual( "", values = leg$color, labels = leg$notes ) + theme_bw() mu_parsed_leg <- mu.simple.sf %>% left_join(leg) %>% mutate( leg = str_sub(notes, 4, str_length(notes)), landscape = str_split(leg, pattern = "\\|", simplify = TRUE)[,1], parent_material = str_split(leg, pattern = "\\|", simplify = TRUE)[,2], texture = str_split(leg, pattern = "\\|", simplify = TRUE)[,3], landscape = str_trim(landscape), parent_material = str_trim(parent_material), texture = str_trim(texture), landscape = tolower(landscape), parent_material = tolower(parent_material), texture = tolower(texture) ) map_landscape <- mu_parsed_leg %>% group_by(landscape) %>% summarise() %>% ggplot() + geom_sf(data = mu.simple.sf, fill = "gray80", colour = "gray30", lwd = 0.05) + geom_sf(aes(fill = landscape), colour = "gray30", lwd = 0.1) + theme_bw() + facet_wrap(~landscape) map_pm <- mu_parsed_leg %>% group_by(parent_material) %>% summarise() %>% ggplot() + geom_sf(data = mu.simple.sf, fill = "gray80", colour = "gray30", lwd = 0.05) + geom_sf(aes(fill = parent_material), colour = "gray30", lwd = 0.1) + theme_bw() + facet_wrap(~parent_material) map_texture <- mu_parsed_leg %>% group_by(texture) %>% summarise() %>% ggplot() + geom_sf(data = mu.simple.sf, fill = "gray80", colour = "gray30", lwd = 0.05) + geom_sf(aes(fill = texture), colour = "gray30", lwd = 0.1) + theme_bw() + facet_wrap(~texture) ## only useful for a quick preview # writeRaster(r, file='data/mu-polygons-graph-clusters.tif', datatype='INT1U', format='GTiff', options=c("COMPRESS=LZW"), overwrite=TRUE) # save to external formats for map / figure making sf::write_sf(mu.simple.sf, dsn = 'data', layer = 'graph-and-mu-polygons', driver = 'ESRI Shapefile') sf::write_sf(mu.simple.sf, 'data/graph-and-mu-polygons.gpkg')
76b62d5faeba2c75fa52a80fcab20d2c430ee498
6425368575e5e96942cec0eaa07428acd047fb88
/R/old/gen_graphs_ttest/attach_sw_labels/driver_Controls.R
ac669c8d010e8208ea8a11a96e2e16498ed5c1d7
[]
no_license
emanuelepesce/dti_fmri_networks
4ff55dc75055d12e23c7cd9bd995a2e4060f7d3e
25c46f60f417b5b3f63286178ddc964377bf0003
refs/heads/master
2021-01-10T19:53:41.442692
2015-11-11T10:57:59
2015-11-11T10:57:59
42,808,978
0
0
null
null
null
null
UTF-8
R
false
false
1,455
r
driver_Controls.R
#' driver_Controls #' #' Attach 'strong' label to all graph in a directory. #' #' If strong == 1 then the edge belongs to strong ties set. #' This file use the object borda_sw_cut_objects.RData which cointains the #' results of SW cutting procedure, done when mask has been extracted. #' #' Author: Emanuele Pesce rm(list=ls()) source("./../../gen_graphs/sw_labels/attachLabelsSW.R", chdir = T) # -------------------------- Inititialization ---------------------------------- verbose = 1 if(verbose > 0){ print("Initialization..") } path_borda_controls <- "./../../../data/other/borda/borda_matrix_controls.txt" path_borda_sla2 <- "./../../../data/other/borda/borda_matrix_SLA2.txt.txt" path_borda_sla3 <- "./../../../data/other/borda/borda_matrix_SLA3.txt" pathIn_data <- "./../../../data/other/t_test_005/t_test_sw_cut_objects.RData" pathTarget <- "./../../../data/graphs_integration/ttest_005/Controls/" # -------------------------- Running ------------------------------------------- ptm <- proc.time() # get borda matrix # g_controls <- i_adjacencyFromFile(path_borda_controls) # g_sla2 <- i_adjacencyFromFile(path_borda_sla2) # g_sla3 <- i_adjacencyFromFile(path_borda_sla3) # load result objects of cutting procedure in order to retrieve the correct set # of strong ties load(pathIn_data) # get labels labels <- getLabels(RC) applyAttachLabel(pathTarget, pathTarget, labels, labels, labels) time <- proc.time() - ptm print(time)
272544b795cc59121ae2cc36f939a9b7b7214258
a9c969942e38a663babe110e085baf6fc02a2b1a
/src/library/R/install_required_lib.r
4eca4e2ccefbf4d61ffd0dd6049954697611b6af
[ "BSD-3-Clause" ]
permissive
TTSHR/econ-project-R-Ning
034c2f4928d9513fff45daa288a5f432ce85e1e3
ced36462ff33e5b1af43052fe21ad7e9dffc02a0
refs/heads/master
2021-01-01T19:11:18.769125
2015-08-26T14:35:07
2015-08-26T14:35:07
41,430,485
0
0
null
null
null
null
UTF-8
R
false
false
569
r
install_required_lib.r
' The file "install_required_lib.r" checks whether a library can be found in "PATH_OUT_LIBRARY_R" and installs it if this fails. In case of failure, we require an Internet connection. ' source("project_paths.r") cran <- "http://cran.rstudio.com/" lib_name <- commandArgs(trailingOnly = TRUE) .libPaths(PATH_OUT_LIBRARY_R) tryCatch({ library(lib_name, lib=PATH_OUT_LIBRARY_R, character.only=TRUE) }, error = function(e) { install.packages(lib_name, lib=PATH_OUT_LIBRARY_R, repos=cran) library(lib_name, lib=PATH_OUT_LIBRARY_R, character.only=TRUE) })
52f218b89953d13db971b024c31a48f1d242d95c
0f3fa0bc7b1de9c5f6f53bf2d09ad761a100cc20
/Module Exercises/Module 3 - Exercises.R
81df15501d3361a43125ee41d180411dffed9a10
[]
no_license
Mikkelgbc/Tools-for-Analytics-R-Part
36874476de203b10013e3d4d5d59782fd58368e1
5d3824c84fe4235ebf1e849e770b96e53e9e3b73
refs/heads/main
2023-03-07T23:51:57.946396
2021-02-14T17:01:40
2021-02-14T17:01:40
303,754,252
0
0
null
null
null
null
UTF-8
R
false
false
6,071
r
Module 3 - Exercises.R
# Module 3 # 3.10.1 Exercise (group work) # Before you start, it is a good idea to agree on a set of group rules: # Create a shared folder and project for your group. # Agree on a coding convention. # Agree about the rules of how to meet etc. # 3.10.2 Exercise (install packages) # 1. Install the package devtools # 2. Have a look at the documentation for function install_github # 3. Install the package tfa # 3.10.3 Exercise (piping) #Intro head(mtcars) ?mtcars library(tidyverse) mtcars %>% select(cyl,gear,hp,mpg) %>% filter(gear == 4 & cyl == 4) # Task 1 mtcars %>% select(mpg,hp,gear,am,gear) # Task 2 mtcars %>% select(mpg,hp,gear,am,gear) %>% filter(mpg < 20 & gear == 4) # Task 3 mtcars %>% select(mpg,hp,gear,am,gear) %>% filter(mpg < 20 | gear == 4) # Task 4 mtcars %>% filter(mpg < 20 & gear == 4) %>% select(wt,vs) # Task 5 dat <- mtcars dat <- filter(dat,mpg < 20 & gear == 4) dat <- select(dat,wt,vs) dat #3.10.4 Exercise (working dir) #Do from console # Intro dir.create("subfolder", showWarnings = FALSE) write_file("Some text in a file", path = "test1.txt") write_file("Some other text in a file", path = "subfolder/test2.txt") # Taksk 1 read_file("test1.txt") # Task 2 read_file("subfolder/test2.txt") # Task 3 & 4 setwd("subfolder") # done in Q3 read_file("../test1.txt") read_file("test2.txt") # 3.10.5 Exercise (vectors) #Task 1 n <- 100 n * (n+1)/2 # Alternative solution: n <- 100 v <- c(1:100) sum(v) # Task 2 n <- 1000 n * (n+1)/2 # Task 3 n <- 1000 x <- seq(1, n) sum(x) # Answer: b) # Task 4 set.seed(123) v <- sample.int(100,30) v # Answer: It makes 30 numbers between 1 and 100 # Task 5 sum(v) mean(v) sd(v) # Task 6 v[c(1,6,4,15)] # Task 7 v[v>50] # Task 8 v[v > 75 | v < 25] # Task 9 v[v == 43] # Task 10 v[is.na(v)] # Task 11 which(v > 75| v < 25) # 3.10.6 Exercise (matrices) #Intro m1 <- matrix(c(37, 8, 51, NA, 50, 97, 86, NA, 84, 46, 17, 62L), nrow = 3) m1 m2 <- matrix(c(37, 8, 51, NA, 50, 97, 86, NA, 84, 46, 17, 62L), nrow = 3, byrow = TRUE) m2 m3 <- matrix(c(37, 8, 51, NA, 50, 97, 86, NA, 84, 46, 17, 62L), ncol = 3) m3 # Task 1 # Question: What is the difference between the three matrices? # Answer: m1 has 3 rows filling one column at a time, m2 has 3 row filling one row at a time and m3 has 3 columns filling one column at a time # Task 2 rowSums(m1,na.rm=TRUE) colSums(m2,na.rm = TRUE) # Task 3 rbind(m1,c(1,2,3,4)) # Task 4 rbind(c(1,2,3,4),m1) # Task 5 cbind(m3,c(1,2,3,4)) # Task 6 m1[2,4] # Task 7 m1[2:3,1:2] # Task 8 m1[3, c(1,3,4)] # Task 9 m1[3,] # Task 10 m2[is.na(m2)] # Task 11 m2[m2 > 50] # 3.10.7 Exercise (data frames) # Intro str(mtcars) glimpse(mtcars) ?mtcars # Task 1 head(mtcars) tail(mtcars) # Task 2 mtcars[,4] mtcars[,"hp"] mtcars$hp # Task 3 data(mtcars) #Resets data mtcars <- rbind(mtcars,c(34, 3, 87, 112, 4.5, 1.515, 167, 1, 1, 5, 3)) rownames(mtcars)[33] <- "Phantom XE" # Task 4 col <- c(NA, "green", "blue", "red", NA, "blue", "green", "blue", "red", "red", "blue", "green", "blue", "blue", "green", "red", "red", NA, NA, "red", "green", "red", "red", NA, "green", NA, "blue", "green", "green","red", "green", "blue", NA) mtcars <- cbind(mtcars,col) class(mtcars$col) # Task 5 mtcars[mtcars$vs==0,] # 3.10.8 Exercise (lists) # Intro lst <- list(45, "Lars", TRUE, 80.5) lst x <- lst[2] x y <- lst[[2]] y # Task 1 # What is the class of the two objects x and y? # Answer: class(x) # List class(y) # character # What is the difference between using one or two brackets? # Answer: One corresponds to the list, while two corresponds to the character (same result) # Task 2 names(lst) <- c("age","Name","Male?","Weight") lst # Task 3 lst$Name #Text lst$height <- 173 # add component lst$name <- list(first = "Lars", last = "Nielsen") # change the name component lst$male <- NULL # remove male component lst # Task 4 lst$name$last # 3.10.9 Exercise (string management) # Intro str1 <- "Business Analytics (BA) refers to the scientific process of transforming data into insight for making better decisions in business." str2 <- 'BA can both be seen as the complete decision making process for solving a business problem or as a set of methodologies that enable the creation of business value.' str3 <- c(str1, str2) # vector of strings str3 # The stringr package in tidyverse provides many useful functions for string manipulation. We will consider a few. str4 <- str_c(str1, str2, "As a process it can be characterized by descriptive, predictive, and prescriptive model building using data sources.",sep = " ") # join strings str4 str_c(str3, collapse = " ") # collapse vector to a string str_replace(str2, "BA", "Business Analytics") # replace first occurrence str_replace_all(str2, "the", "a") # replace all occurrences str_remove(str1, " for making better decisions in business") str_detect(str2, "BA") # detect a pattern # Task 1 - Is Business (case sensitive) contained in str1 and str2? str_detect(str1,"Business") str_detect(str2,"Business") # Task 2 - Define a new string that replace BA with Business Analytics in str2 str5 <- str_replace(str2, "BA", "Business Analytics") str5 # Task 3 - In the string from Question 2, remove or as a set of methodologies that enable the creation of business value str_remove(str5, " or as a set of methodologies that enable the creation of business value") str5 # Task 4 - In the string from Question 3, add This course will focus on programming and descriptive analytics. str5 <- str_c(str5, "This course will focus on programming and descriptive analytics.",sep=" ") str5 # Task 5 str5 <- str_replace(str5, "analytics", "business analytics") str5 # Task 6 - Do all calculations in Question 2-5 using pipes. library(tidyverse) str_replace(str2, "BA", "Business Analytics") %>% str_remove(" or as a set of methodologies that enable the creation of business value") %>% str_c("This course will focus on programming and descriptive analytics.",sep=" ") %>% str_replace("analytics", "business analytics")
6d454daf396aa21899c8a0c6f2c30bd0974ff3b2
3f6abec568a7d1804534bc16f4421e817560d122
/Kagglemachinelearning/xgbfunc.R
9452bfe886ae0f7f2275c2e18ac78875d75e3dcd
[]
no_license
HHA123/Examplefiles
adc38a24d887d6a3157f67f5f90eb8adeae698e3
2a65a25ab146020a087b5d36ed3c04fee1acc1e1
refs/heads/master
2020-12-24T15:22:15.659339
2015-09-02T13:24:20
2015-09-02T13:24:20
41,799,286
0
0
null
null
null
null
UTF-8
R
false
false
1,676
r
xgbfunc.R
#xgboost crossvalidation model for kaggle competion:Springleaf library(xgboost); library(verification); xgbfunc <- function(data2){ set.seed(666) data2 <-data.frame(data2) k = 4 #n = floor(dim(data)[1]/k) #cv.err = rep(NA,k) target <- length(names(data2)) #choping up the training set in k subsets with the following indeces #st = (i-1)*n+1#start of subset #ed = i*n #subset = st:ed #index for subset subset <- sample(1:dim(data2)[1],0.3*dim(data2)[1]) cvtest <- data2[subset,] y<- data2[subset,target] cvtest <- as.matrix(cvtest) mode(cvtest) <- "numeric" cvtest <- xgb.DMatrix(cvtest[,-target],label=cvtest[,target]) data2 <- data2[-subset,] cvtrain <- as.matrix(data2) rm(data2) mode(cvtrain) <- "numeric" cvtrain <- xgb.DMatrix(cvtrain[,-target],label=cvtrain[,target]) nround.cv = 150 #best.cv <- xgb.cv(param=param,data=cvtrain,nfold=k,nrounds=nround.cv,prediction=T) #max.auc =which.max(best.cv$dt[,test.auc.mean]) #max.auc = nround.cv for(i in 1:k){ param <- list(objective='binary:logistic',max.depth=7,eta=0.01,eval_metric="auc", subsample=1) max.auc = 650 -50*i best <- xgboost(param=param,data=cvtrain,nrounds=max.auc,verbose=0) pred <- predict(best,cvtest) #cv.err[i] = roc.area(cvtest[,target],pred)$A print(paste("model",i,"auc",roc.area(y,pred)$A,sep="")) #print(max.auc) } #return(best) #print(paste(paste("AUC for subset ",i),cv.err[i],sep=" ")) # save(mod,file=paste("modgbm",i,".rda",sep="")) #} #print(paste("Average AUC ",mean(cv.err))) }
4910850285478504b293f6e6c1e04ebd50d63443
3ee59de4098c0a5087e09569b0aff28186d826a8
/meetup/app.R
3a666ca3955e2d3f5c5fc66f5f2541ada117ce40
[]
no_license
svarn27/shiny-server
89f6a6dc82a7a8c6dff098aa5757d7aee5424b47
e2d7c665b311b663104bcb43fe98fd6c590ba42d
refs/heads/master
2019-08-09T16:22:37.038727
2017-12-04T15:44:43
2017-12-04T15:44:43
66,736,750
0
0
null
null
null
null
UTF-8
R
false
false
450
r
app.R
library(shiny) ui <- fluidPage( h2("Meetup - Data Analysis using R/Shiny"), a("Presentation", href="https://docs.google.com/presentation/d/1v7rt7Mz1Gd7gm2M1YoMjGqhw0cOxHzPH-30CMvr2WZ8/edit?usp=sharing"), br(), a("Graphing Example", href="/example2"),br(), a("Clustering Example", href="/example1"),br(), a("Regression Example", href="/example3") ) server <- function(input,output, session){ } shinyApp(ui=ui, server=server)
0d8d0c9041a6e72c73f3822603bab7caadaf0769
45c6daee8252befbc9c8812ef7b7a87ae4876e89
/Quantative Reasoning/Week 6/Week 6 Code 2.R
a8322cad7797a21bb94ba014f7a31e2d75cdeea8
[]
no_license
mrmightyq/Quantitative-Reasoning-Bayesian-Frequentist-Stats-
327a46796c02ad1cb91bbf2c9094f2a4c9e6b8c6
9d6f9be7ce89cf698135fbaf97d4aa20133bec66
refs/heads/main
2023-06-24T01:37:01.412281
2021-07-18T21:38:27
2021-07-18T21:38:27
null
0
0
null
null
null
null
UTF-8
R
false
false
309
r
Week 6 Code 2.R
#Week 6 Asycn Code library(BayesFactor) chickBayesOut <- anovaBF(weight~feed, data=chickwts) chickBayesOut #Good because odds ratio ## 3:1 not worth mentioning, 3:1-20:1 positive evidence for favored hypothesis ## 20:1 to 150:1 strong evidence ## 150:1 + very strong evidence for favored hypothesis
2b3829034ba3109fd179245af9d28474ea6e3e3e
e3b70a106252542597985a5df1cba35dad9bc27c
/kate_programs_v2.0/k.hbsdist.R
91e57020dd66bc578094d5c07dd26bb7852391ed
[]
no_license
tkangk/tdm
5749d5834264a68286c58a8532af517aec8442d1
5081bff9b7b5793a82d39141a4b0d0de7fd248ac
refs/heads/master
2020-03-24T21:39:12.691526
2018-04-09T21:36:39
2018-04-09T21:36:39
null
0
0
null
null
null
null
UTF-8
R
false
false
5,123
r
k.hbsdist.R
#k.hbsdist.R # Weighted Average Logsum mf.hsls <- HBS.lsLowWeight*mf.hslsl + HBS.lsMidWeight*mf.hslsm + HBS.lsHighWeight*mf.hslsh east2westhill<-as.matrix(array(0,c(numzones,numzones))) east2westhill[ensemble.gw==2,ensemble.gw==1]<-1 westhill2east<-as.matrix(array(0,c(numzones,numzones))) westhill2east[ensemble.gw==1,ensemble.gw==2]<-1 east2westriv<-as.matrix(array(0,c(numzones,numzones))) east2westriv[ensemble.gr==2,ensemble.gr==1]<-1 westriv2east<-as.matrix(array(0,c(numzones,numzones))) westriv2east[ensemble.gr==1,ensemble.gr==2]<-1 ############################################################# # Raw HBS Utility # ############################################################# mf.util <- exp(sweep(HBS.lsCoeff * mf.hsls + HBS.logdistXorwaCoeff * mf.orwa * log (mf.tdist + 1) + HBS.logdistXwaorCoeff * mf.waor * log (mf.tdist + 1) + HBS.logdistXnoXingCoeff * ((mf.orwa + mf.waor)==0) * log (mf.tdist + 1) + HBS.logdistXewWestHillsCoeff * east2westhill * log (mf.tdist + 1) + HBS.logdistXweWestHillsCoeff * westhill2east * log (mf.tdist + 1) + HBS.logdistXewWillRiverCoeff * east2westriv * log (mf.tdist + 1) + HBS.logdistXweWillRiverCoeff * westriv2east * log (mf.tdist + 1) , 2, log (HBS.aerCoeff * ma.aer + HBS.amfCoeff * ma.amf + HBS.conCoeff * ma.con + HBS.eduCoeff * ma.edu + HBS.fsdCoeff * ma.fsd + HBS.govCoeff * ma.gov + HBS.hssCoeff * ma.hss + HBS.mfgCoeff * ma.mfg + HBS.mhtCoeff * ma.mht + HBS.osvCoeff * ma.osv + HBS.pbsCoeff * ma.pbs + HBS.rcsCoeff * ma.rcs + HBS.twuCoeff * ma.twu + HBS.wtCoeff * ma.wt + 1), "+")) ma.utsum <- apply(mf.util,1,sum) mf.utsum <- matrix(ma.utsum,length(ma.utsum),length(ma.utsum)) # Low Income Distribution mf.hbsdtl <- matrix(0,numzones,numzones) mf.hbsdtl[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0] mf.hbsdtl <- sweep(mf.hbsdtl,1,ma.hbsprl,"*") if (mce) { ma.hsldcls <- log(ma.utsum) # save (ma.hbsldcls, file="ma.hbsldcls.dat") # write.table(ma.hbsldcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbsldcls.csv", col.names=c("hbsldcls")) # write.table(ma.hbsprl, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbsprl.csv", col.names=c("hbsprl")) } # Middle Income Distribution mf.hbsdtm <- matrix(0,numzones,numzones) mf.hbsdtm[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0] mf.hbsdtm <- sweep(mf.hbsdtm,1,ma.hbsprm,"*") if (mce) { ma.hsmdcls <- log(ma.utsum) # save (ma.hbsmdcls, file="ma.hbsmdcls.dat") # write.table(ma.hbsmdcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbsmdcls.csv", col.names=c("hbsmdcls")) # write.table(ma.hbsprm, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbsprm.csv", col.names=c("hbsprm")) } # High Income Distribution mf.hbsdth <- matrix(0,numzones,numzones) mf.hbsdth[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0] mf.hbsdth <- sweep(mf.hbsdth,1,ma.hbsprh,"*") if (mce) { ma.hshdcls <- log(ma.utsum) # save (ma.hbshdcls, file="ma.hbshdcls.dat") # write.table(ma.hbshdcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbshdcls.csv", col.names=c("hbshdcls")) # write.table(ma.hbsprh, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbsprh.csv", col.names=c("hbsprh")) } if (mce) { purpose_dc <- 'hs' omxFileName <- paste(project.dir,"/_mceInputs/",project,"_",year,"_",alternative,"_dest_choice_",purpose_dc,".omx",sep='') create_omx(omxFileName, numzones, numzones, 7) write_omx(file=omxFileName, matrix=get(paste("ma.",purpose_dc,"ldcls",sep='')), name=paste("ma.",purpose_dc,"ldcls",sep=''), replace=TRUE) write_omx(file=omxFileName, matrix=get(paste("ma.",purpose_dc,"mdcls",sep='')), name=paste("ma.",purpose_dc,"mdcls",sep=''), replace=TRUE) write_omx(file=omxFileName, matrix=get(paste("ma.",purpose_dc,"hdcls",sep='')), name=paste("ma.",purpose_dc,"hdcls",sep=''), replace=TRUE) } ############################################################# # Total HBS Distribution # ############################################################# mf.hbsdt <- mf.hbsdtl + mf.hbsdtm + mf.hbsdth # Remove temporary matrices rm(ma.utsum,mf.utsum,mf.util) # 8-district summaries if (file.access("hbsdist.rpt", mode=0) == 0) {system ("rm hbsdist.rpt")} distsum("mf.hbsdt", "HBshop Distribution - Total", "ga", 3, "hbsdist", project, initials) distsum("mf.hbsdtl", "HBshop Distribution - LowInc", "ga", 3, "hbsdist", project, initials) distsum("mf.hbsdtm", "HBshop Distribution - MidInc", "ga", 3, "hbsdist", project, initials) distsum("mf.hbsdth", "HBshop Distribution - HighInc", "ga", 3, "hbsdist", project, initials)