blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b76a1d52903d944f6e89026814e6b38b4e3ebf4f | bd664fa8c415fb7cac646d61c0f6738228a0fdc5 | /code/neon_organism_api_export.R | 7d22128687d1d7ddf6def3b33fb273c92321363b | [] | no_license | sydnerecord/NEONss2019-biodiv | e37617df704719b2dfab6c61c925bc3ae48b303e | fba60146f9a5771d4fb9ccab4d07ea1bfbaec937 | refs/heads/master | 2021-07-04T12:46:50.661197 | 2020-12-22T14:08:40 | 2020-12-22T14:08:40 | 215,184,635 | 4 | 10 | null | 2020-10-01T16:44:21 | 2019-10-15T02:08:15 | R | UTF-8 | R | false | false | 12,778 | r | neon_organism_api_export.R | ## THIS SCRIPT IS NOW OUT OF DATE IN TERMS OF THE WAY TO PULL NEON DATA; KEEPING HERE FOR REFERENCE/BITS OF CODE ##
##
## TITLE: NEON Organismal Data: read in via API, export data
## AUTHOR: Phoebe Zarnetske, Quentin Read
## COLLABORATORS: Sydne Record (Bryn Mawr), Ben Baiser (UFL), Angela Strecker (PSU),
## John M. Grady (MSU/Bryn Mawr), Jonathan Belmaker (Tel Aviv U), Mao-Ning Tuanmu (Academia Sinica),
## Lydia Beaudrot (Rice U), Kate Thibault
## DATA: NEON organismal data: all species, all years, all sites
## PROJECT: "NEON's continental-scale biodiversity"
## DATE: initiated: June 18, 2018; last run:
## This script reads in NEON's organismal data across all available sites,
# computes diversity measures per site and year, and cumulatively,
# and exports those data. The API portion of the script is based on
# QDR's neon_api_grad_lab_rawcode.R available at: https://github.com/NEON-biodiversity/teaching/tree/master/grad_lab
#Clear all existing data
rm(list=ls())
#Close graphics devices
graphics.off()
#set working directory
setwd("/Volumes/GoogleDrive/My Drive/Research/ScalingUp/NEON_EAGER/Manuscript4_NEON_Organisms") # GD location
#setwd("/Volumes/neon/final_data") # HPCC location
#Install/load packages
for (package in c("ggplot2", "lme4", "dplyr")) {
if (!require(package, character.only=T, quietly=T)) {
install.packages(package)
library(package, character.only=T)
}
}
# This code for ggplot2 sets the theme to mostly black and white
# (Arial font, and large font, base size=24)
theme_set(theme_bw(12))
theme_update(axis.text.x = element_text(size = 10, angle = 90),
axis.text.y = element_text(size = 10))
## Code below from https://github.com/NEON-biodiversity/teaching/tree/master/grad_lab/neon_api_grad_lab_rawcode.R
#### R functions for pulling NEON data from the server ####
##*******************************************************##
## Function to display what data are available
# This function takes a NEON product code `productCode` as an argument, gets a list of the files that are available, and displays a representative set of file names from one site-month combination.
display_neon_filenames <- function(productCode) {
require(httr)
require(jsonlite)
req <- GET(paste0("http://data.neonscience.org/api/v0/products/", productCode))
avail <- fromJSON(content(req, as = 'text'), simplifyDataFrame = TRUE, flatten = TRUE)
urls <- unlist(avail$data$siteCodes$availableDataUrls)
get_filenames <- function(x) fromJSON(content(GET(x), as = 'text'))$data$files$name
files_test <- sapply(urls, get_filenames, simplify = FALSE)
files_test[[which.max(sapply(files_test,length))]]
}
## Function to pull all data for a given data product
# The first argument, `productCode` is a NEON product code, and the second, `nametag`, is an identifying string that tells the function which CSV to download for each site-month combination. There are usually a lot of metadata files that we aren't interested in for now that go along with the main data file for each site-month combination, and the `nametag` argument tells the function which file is the one that really has the data we want (details below). The `pkg` argument defaults to download the "basic" data package which is usually all we would want. Finally, the `bind` argument defaults to `TRUE` which means return a single data frame, not a list of data frames.
# There are two steps to what the function does: first it queries the API to get a list of URLs of the CSV files available for all site-month combinations for the desired data product. Second it loops through the subset of those URLs that match the `nametag` argument and tries to download them all. If one gives an error, there is a `try()` function built in so that the function will just skip that file instead of quitting.
pull_all_neon_data <- function(productCode, nametag, pkg = 'basic', bind = TRUE) {
require(httr)
require(jsonlite)
require(dplyr)
# Get list of URLs for all site - month combinations for that data product.
req <- GET(paste0("http://data.neonscience.org/api/v0/products/", productCode))
avail <- fromJSON(content(req, as = 'text'), simplifyDataFrame = TRUE, flatten = TRUE)
urls <- unlist(avail$data$siteCodes$availableDataUrls)
# Loop through and get the data from each URL.
res <- list()
pb <- txtProgressBar(min=0, max=length(urls), style=3)
count <- 0
for (i in urls) {
count <- count + 1
setTxtProgressBar(pb, count)
# Get the URLs for the site-month combination
req_i <- GET(i)
files_i <- fromJSON(content(req_i, as = 'text'))
urls_i <- files_i$data$files$url
# Read data from the URLs given by the API, skipping URLs that return an error.
data_i <- try(read.delim(
grep(paste0('(.*',nametag,'.*', pkg, '.*)'), urls_i, value = TRUE),
sep = ',', stringsAsFactors = FALSE), TRUE)
if (!inherits(data_i, 'try-error')) res[[length(res) + 1]] <- data_i
}
close(pb)
# Return as a single data frame or as a list of data frames,
# depending on what option was selected.
if (bind) {
do.call(rbind, res)
} else {
res
}
}
## Function to get spatial information (coordinates) for a site or plot
# The spatial locations and metadata for sites and plots are stored in a different location on NEON's API from the actual data. This function should be called for a single site at a time (`siteID` argument). The second argument, `what`, is a string. The default is `"site"` which will return a single row of a data frame with spatial location for the entire site as a single point. If `what` is set to another string such as `"bird"` it will go through the spatial location data URLs, find all that have `"bird"` in the name, pull the spatial information from them, and return a data frame with one row per bird plot. In either case the data frame has 19 columns (the number of location attributes NEON has listed for each site or plot).
get_site_locations <- function(siteID, what = 'site') {
require(httr)
require(jsonlite)
require(purrr)
# URLs of all spatial information about the site
req <- GET(paste0("http://data.neonscience.org/api/v0/locations/", siteID))
site_loc <- fromJSON(content(req, as = 'text'), simplifyDataFrame = TRUE, flatten = TRUE)
if (what == 'site') {
# If only coordinates of the entire site are needed, return them
return(data.frame(site_loc$data[1:19]))
} else {
# If "what" is some type of plot, find all URLs for that plot type
urls <- grep(what, site_loc$data$locationChildrenUrls, value = TRUE)
# Get the coordinates for each of those plots from each URL and return them
loc_info <- map_dfr(urls, function(url) {
req <- GET(url)
loc <- fromJSON(content(req, as = 'text'), simplifyDataFrame = TRUE, flatten = TRUE)
loc[[1]][1:19]
})
return(loc_info)
}
}
#### Download organism data ####
##****************************##
# You can look in the data product catalog (http://data.neonscience.org/data-product-catalog) and manually figure out what the product codes are for small mammal trap data and for bird point count data, but I've provided them here. The `DP1` in the code indicates that this is Level 1 data. For Level 1 data, quality controls were run (Level 0 would be `DP0` meaning completely raw data) but the actual values are still raw values measured in the field, not some kind of calculated quantity (Level 2 and higher would be derived values).
# Breeding landbird point counts
# http://data.neonscience.org/data-product-view?dpCode=DP1.10003.001
bird_code <- 'DP1.10003.001'
# Fish electrofishing, gill netting, and fyke netting counts
# http://data.neonscience.org/data-product-view?dpCode=DP1.20107.001
fish_code <- 'DP1.20107.001'
# Aquatic plant, bryophyte, lichen, and macroalgae point counts in wadeable streams
# http://data.neonscience.org/data-product-view?dpCode=DP1.20072.001
aquaplant <- 'DP1.20072.001'
# Ground beetles sampled from pitfall traps
# http://data.neonscience.org/data-product-view?dpCode=DP1.10022.001
beetle_code <- 'DP1.10022.001'
# Macroinvertebrate collection
# http://data.neonscience.org/data-product-view?dpCode=DP1.20120.001
macroinv_code <- 'DP1.20120.001'
# Mosquitoes sampled from CO2 traps
# http://data.neonscience.org/data-product-view?dpCode=DP1.10043.001
mosquito_code <- 'DP1.10043.001'
# Periphyton, seston, and phytoplankton collection
# http://data.neonscience.org/data-product-view?dpCode=DP1.20166.001
periphyton_code <- 'DP1.20166.001'
# Riparian composition and structure
# http://data.neonscience.org/data-product-view?dpCode=DP1.20275.001
riparian_code<-'DP1.20275.001'
# Plant presence and percent cover
# http://data.neonscience.org/data-product-view?dpCode=DP1.10058.001
plant_code<-'DP1.10058.001'
# Small mammal box trapping
# http://data.neonscience.org/data-product-view?dpCode=DP1.10072.001
mammal_code <- 'DP1.10072.001'
# Soil microbe community composition
# http://data.neonscience.org/data-product-view?dpCode=DP1.10081.001
microbe_code<-'DP1.10081.001'
# Ticks sampled using drag cloths
# http://data.neonscience.org/data-product-view?dpCode=DP1.10093.001
tick_code <-'DP1.10093.001'
# Woody plant vegetation structure
# http://data.neonscience.org/data-product-view?dpCode=DP1.10098.001
woody_code<-'DP1.10098.001'
# Zooplankton collection
# http://data.neonscience.org/data-product-view?dpCode=DP1.20219.001
zoop_code<-'DP1.20219.001'
# Let's take a look at what files are available for NEON small mammal trapping data for a given site-month combination. Running this takes a minute or two and requires an internet connection because we are querying the API.
display_neon_filenames(mammal_code)
# You can see that there are a lot of files available for one site. However the one we are interested in is the file containing the mammals caught per trap per night in the basic data package (expanded data package contains other variables that might be needed for quality control but that we are not interested in here). Let's pull that CSV file for all site-month combinations and combine it into one huge data frame that we can run analysis on. We specify we want everything belonging to the mammal code that contains the string `pertrapnight` in the file name, and by default only get the basic data package. Running this code on your own machine will take quite a few minutes since it has to download a lot of data, but you should get a progress bar showing how much time is remaining.
mammal_data <- pull_all_neon_data(productCode = mammal_code,
nametag = 'pertrapnight')
mammal_data_plot <- pull_all_neon_data(productCode = mammal_code,
nametag = 'perplotnight')
# Now let's take a look at what is in that data frame . . .
str(mammal_data)
# The mammal data frame is huge, with over 600K rows. Most of the rows record trap-nights where no mammal was captured. Let's get rid of those.
nrow(mammal_data)
table(mammal_data$trapStatus)
# You can see that only status 4 and 5 correspond to one or more mammals caught in the trap. Filter the data frame to only keep those rows. We use the function `grepl()` which matches a regular expression to a vector of strings and returns `TRUE` if they match. The regular expression `"4|5"` means any string with the numerals 4 or 5 in it.
mammal_data <- mammal_data %>%
filter(grepl('4|5', trapStatus))
nrow(mammal_data)
# Make a year column so we can subset by year.
mammal_data$year<-strptime(mammal_data$collectDate, "%Y-%m-%d")$year+1900
# How many datasets are available?
table(bird_data$siteID, lubridate::year(bird_data$startDate))
table(mammal_data$siteID, lubridate::year(mammal_data$collectDate))
# export file to HPCC as final data
write.csv(mammal_data,file="/Volumes/neon/raw_data/organismal_data_june2018/mammal_data.csv",row.names=F)
## Bird data
# Next, let's look at what data are available for birds.
display_neon_filenames(bird_code)
# Since the string `count` is in the name of the data file that we want for each site-month combination (the raw point count data for birds), we use that to pull point count data for each month and stick it all into one big data frame.
bird_data <- pull_all_neon_data(productCode = bird_code,
nametag = 'count')
# Let's see what is in that data frame . . .
str(bird_data)
# Find the species richness at each site by counting the number of unique taxa.
bird_richness <- bird_data %>%
group_by(siteID) %>%
summarize(richness = length(unique(taxonID)))
write.csv(bird_data,file="/Volumes/neon/raw_data/organismal_data_june2018/mammal_data.csv",row.names=F)
|
a8122d3829d85bb80ab603463ff84cf6e490f1ef | e1ebc5da547e60c517aa99c3ebcb9557064b2f54 | /plot3.R | c7bfb60fbf3be943def2dd57e0fd37217f650c09 | [] | no_license | datatony/ExData_Plotting1 | f14152c9d4782b9eb5985a77ce431f0c22d50ac9 | 2479f00bce45ff6f7af215feecf87848f746bc45 | refs/heads/master | 2020-12-29T00:41:35.757966 | 2015-05-09T16:40:04 | 2015-05-09T16:40:04 | 35,334,161 | 0 | 0 | null | 2015-05-09T15:42:02 | 2015-05-09T15:42:02 | null | UTF-8 | R | false | false | 1,414 | r | plot3.R | ##download file from source
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", temp)
##read and clean data
dat <- read.table(unz(temp,"household_power_consumption.txt"), sep = ";")
unlink(temp)
dat[,1] <- as.Date(dat[,1], "%d/%m/%Y")
dat <- subset(dat, dat[,1] == "2007-02-01" | dat[,1] == "2007-02-02")
colnames(dat) <- c("Date", "Time", "Global_Active_Power", "Global_Reactive_Power", "Voltage", "Global_Intensity", "Sub_Metering_1", "Sub_Metering_2", "Sub_Metering_3")
datetimes <- paste(dat[,1], dat[,2])
dat[,10] <- datetimes
dat[,3] <- as.numeric(as.character(dat[,3]))
dat[,4] <- as.numeric(as.character(dat[,4]))
dat[,5] <- as.numeric(as.character(dat[,5]))
dat[,7] <- as.numeric(as.character(dat[,7]))
dat[,8] <- as.numeric(as.character(dat[,8]))
dat[,9] <- as.numeric(as.character(dat[,9]))
library(lubridate)
dat[,10] <- ymd_hms(dat[,10])
## Create Plot 3
png(file="plot3.png", width=480, height=480, bg="transparent")
plot(dat[,10], dat[,7], type="l", ylim=c(0,38), xlab="", ylab="Energy sub metering")
par(new=T)
plot(dat[,10], dat[,8], type="l", col="red", axes=F, ylim=c(0,38), xlab="", ylab="")
par(new=T)
plot(dat[,10], dat[,9], type="l", col="blue", axes=F, ylim=c(0,38), xlab="", ylab="")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col= c("black", "red", "blue"), lwd=1)
dev.off()
|
8e649ac438e4017a6bfe6dde608babeeed24a584 | 7c39da976f28af016e5b1f847e68473c659ea05d | /man/PreprocessViews2-coercion.Rd | 850d37ef7726444bb251115930fff7dfb714b217 | [] | no_license | cancer-genomics/trellis | b389d5e03959f8c6a4ee7f187f7749048e586e03 | 5d90b1c903c09386e239c01c10c0613bbd89bc5f | refs/heads/master | 2023-02-24T05:59:44.877181 | 2023-01-09T20:38:36 | 2023-01-09T20:38:36 | 59,804,763 | 3 | 1 | null | 2023-01-11T05:22:52 | 2016-05-27T04:45:14 | R | UTF-8 | R | false | true | 728 | rd | PreprocessViews2-coercion.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Preprocess-class.R
\docType{methods}
\name{setAs}
\alias{setAs}
\alias{coerce,PreprocessViews2,RangedSummarizedExperiment-method}
\title{Coerce a \code{PreprocessViews2} object to a \code{RangedSummarizedExperiment}}
\arguments{
\item{from}{character string ('PreprocessViews2')}
\item{to}{character string ('RangedSummarizedExperiment')}
}
\value{
a \code{RangedSummarizedExperiment}
}
\description{
This method pulls the assay data from disk through the views object
interface, and then creates a \code{SummarizedExperiment} object
with an assay named 'copy'.
}
\examples{
pviews <- PreprocessViews2()
as(pviews, "RangedSummarizedExperiment")
}
|
178e4589f7ea749c28750dd80994653dee45347e | c756acb3a3d25a279e8e5209ab524d421f2fd236 | /run_analysis.R | 42005724db2771d60d5ef2b56083c68ac0a54437 | [] | no_license | ru-garcia/TidyData | fdc3351ee083a62a8e75301407b8dc2fcb489c5e | 581ce8294b8afc60b547a9fb2e1f8eb6de953b65 | refs/heads/master | 2020-07-08T10:55:29.781638 | 2015-02-23T05:12:01 | 2015-02-23T05:12:01 | 31,172,802 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,302 | r | run_analysis.R | ## Getting and Cleaning Data Course Project
##
## This R srcipt creates the function run_analysis, which imports and merges
## data files in the UCI HAR Dataset directory and creates the two required tidy data files.
##
## To use, run this script in R Studio to create the function, then call the function
## with the path to the UCI HAR Dataset directory on your machine.
##
## Example function call:
## run_analysis("~/Documents/Ruben/Coursera Data Science/Getting And Cleaning Data/CourseProject/UCI HAR Dataset")
##
## Output:
## Two files will be created in the UCI HAR Dataset.
## measures.txt contains only measurements on the mean and standard deviation.
## average_measures.txt contains average of each variable in measures_.txt,
## for each activity and and each subject.
run_analysis <- function(dir) {
## Read in test and training data files
testSubject <- read.table(paste(dir, "test", "subject_test.txt", sep = "/"), colClasses="factor")
testActivity <- read.table(paste(dir, "test", "y_test.txt", sep = "/"), colClasses="factor")
testMeasures <- read.table(paste(dir, "test", "X_test.txt", sep = "/"), colClasses=c(rep("numeric", 561)))
trainSubject <- read.table(paste(dir, "train", "subject_train.txt", sep = "/"), colClasses="factor")
trainActivity <- read.table(paste(dir, "train", "y_train.txt", sep = "/"), colClasses="factor")
trainMeasures <- read.table(paste(dir, "train", "X_train.txt", sep = "/"), colClasses=c(rep("numeric", 561)))
## Read in features and activity label data files
feature <- read.table(paste(dir, "features.txt", sep = "/"))
activity <- read.table(paste(dir, "activity_labels.txt", sep = "/"))
## Rename measurement column names to features
colnames(testMeasures) <- feature$V2
colnames(trainMeasures) <- feature$V2
## Combine test data files
test <- cbind("subject" = testSubject$V1,
"group" = "TEST",
"activity" = testActivity$V1,
testMeasures[ , 1:561])
## Combine training data files
train <- cbind("subject" = trainSubject$V1,
"group" = "TRAIN",
"activity" = trainActivity$V1,
trainMeasures[ , 1:561])
## Merge test and training data
allData <- rbind(test, train)
## Update activity label with activity name
allData$activity <- as.character(allData$activity)
allData$activity[allData$activity == "1"] <- "WALKING"
allData$activity[allData$activity == "2"] <- "WALKING_UPSTAIRS"
allData$activity[allData$activity == "3"] <- "WALKING_DOWNSTAIRS"
allData$activity[allData$activity == "4"] <- "SITTING"
allData$activity[allData$activity == "5"] <- "STANDING"
allData$activity[allData$activity == "6"] <- "LAYING"
allData$activity <- as.factor(allData$activity)
## Get columns containing mean and standard deviation measurements
columnId <- data.frame(sapply(c("mean\\(\\)", "std\\(\\)"),
grepl,
colnames(allData),
ignore.case = TRUE))
columnList <- which(columnId[ , 1] == 1 | columnId[ , 2] == 1)
# length(columnList)
## Tidy Data Set #1: create file measures.txt containing only
## measurements on the mean and standard deviation.
subset <- allData[ , c(1:3, columnList)]
write.table(subset, paste(dir, "measures.txt", sep = "/"), row.name = FALSE)
## Tidy Data Set #2: create file average_measures.txt containing
## average of each variable in measures.txt, for each activity and subject.
library(dplyr)
grouped <- group_by(subset, subject, group, activity)
groupedAverage <- summarise_each(grouped, funs(mean))
colnames(groupedAverage)[4:69] <- paste("avg", colnames(groupedAverage)[4:69], sep = "-")
write.table(groupedAverage, paste(dir, "average_measures.txt", sep = "/"), row.name = FALSE)
}
|
08965038c8accd641c0f76038dc68a1fef610706 | df575dfbb2687921a72b56230f961461e53234ff | /src/pbr_plots.R | e875e6f779a059b91451073a3c9e0a13522bce9e | [] | no_license | ECGen/pb_removal_nets | ae3eb07aa7ddbf13cdff0a39a8ed2a0af1ce275b | 1ce0ed51235230b942fe7e2779da99587db8e54f | refs/heads/master | 2022-03-27T07:32:46.793140 | 2020-01-08T21:23:21 | 2020-01-08T21:23:21 | 13,688,660 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,448 | r | pbr_plots.R | ### PBR one off plots
### MKLau
### 18 Jun 2015
source("../R/packages.R")
coa <- read.csv('../data/pbr_coa.csv')
coa <- coa[c(4,5,8,9),]
x <- unlist(lapply(strsplit(as.character(coa$X),split='\\.'),function(x) x[length(x)]))
x[x == 'avg'] <- 1;x[x == 'npb'] <- 0
trace <- substr(coa$X,1,1)
z <- coa$z
par(cex.lab=1.25,cex.axis=1.25)
interaction.plot(x,trace,z,type='b',pch=c('c','x'),legend=FALSE,xlab=expression(italic('P. betae')),ylab='z-score (Standardized Modularity)',lty=c(1,2))
null <- rnorm(1000,mean=coa[4,5],sd=coa[4,6])
par(cex.lab=1.25,cex.axis=1.25)
hist(null,xlim=c(0,coa[4,4]+0.05),xlab='modularity',ylab='frequency',main='')
abline(v=coa[4,4],lty=2,col='black',lwd=2)
### see the office window
cmSppCavg <- lapply(dir('../data/conModspecies/cmCavg/results/',full.names=TRUE),read.csv)
cmSppXavg <- lapply(dir('../data/conModspecies/cmXavg/results/',full.names=TRUE),read.csv)
names(cmSppCavg) <- dir('../data/conModspecies/cmCavg/results/')
names(cmSppXavg) <- dir('../data/conModspecies/cmXavg/results/')
cmSppCavg <- lapply(cmSppCavg,function(x) x[,2])
cmSppXavg <- lapply(cmSppXavg,function(x) x[,2])
cmSppCavg <- do.call(cbind,cmSppCavg)
cmSppXavg <- do.call(cbind,cmSppXavg)
cmSppCavg <- (cmSppCavg[,sapply(colnames(cmSppCavg),function(x,y) x %in% y,y=colnames(cmSppXavg))])
cmSppXavg <- (cmSppXavg[,sapply(colnames(cmSppXavg),function(x,y) x %in% y,y=colnames(cmSppCavg))])
z.cmSppC <- apply(cmSppCavg,2,function(x,obs) (obs - mean(x)) / sd(x),obs=coa[1,4])
z.cmSppX <- apply(cmSppXavg,2,function(x,obs) (obs - mean(x)) / sd(x),obs=coa[3,4])
p.cmSppC <- apply(cmSppCavg,2,function(x,obs) length(x[x > obs])/length(x),obs=coa[1,4])
p.cmSppX <- apply(cmSppXavg,2,function(x,obs) length(x[x > obs])/length(x),obs=coa[3,4])
par(mfrow=c(1,2),mai=c(1.02*1.5, 0.82, 0.82, 0.42),cex.axis=0.5)
barplot(sort(z.cmSppC),las=2,ylab='Species CM (Control)')
barplot(sort(z.cmSppX),las=2,ylab='Species CM (Exclusion)')
par(mfrow=c(1,1),mai=c(1.02, 0.82, 0.82, 0.42))
axis.min <- floor(min(c(z.cmSppC,z.cmSppX)))
axis.max <- ceiling(max(c(z.cmSppC,z.cmSppX)))
plot(z.cmSppX~z.cmSppC,xlim=c(axis.min,axis.max),ylim=c(axis.min,axis.max),xlab='Species CM (Control)',ylab='Species CM (Exclusion)',pch='')
lines((axis.min:axis.max),(axis.min:axis.max),lwd=0.75)
text(y=z.cmSppX,x=z.cmSppC,labels=names(z.cmSppX),cex=0.75)
t.test(I(z.cmSppX-z.cmSppC))
MU <- c(mean(z.cmSppC),mean(z.cmSppX))
SE <- c(se(z.cmSppC),se(z.cmSppX))
barplot2(MU,plot.ci=TRUE,ci.l= MU-SE,ci.u= MU+SE,names=c('Control','Exclusion'),ylab='Contribution to Modularity (z)')
plot(density(z.cmSppC-z.cmSppX),main='',xlab='Change in Contribution to Modularity (z_C - z_E)')
### Trees
cmCavg <- lapply(dir('../data/conMod/cmCavg/results/',full.names=TRUE),read.csv)
cmXavg <- lapply(dir('../data/conMod/cmXavg/results/',full.names=TRUE),read.csv)
names(cmCavg) <- dir('../data/conMod/cmCavg/results/')
names(cmXavg) <- dir('../data/conMod/cmXavg/results/')
cmCavg <- lapply(cmCavg,function(x) x[,2])
cmXavg <- lapply(cmXavg,function(x) x[,2])
cmCavg <- do.call(cbind,cmCavg)
cmXavg <- do.call(cbind,cmXavg)
cmCavg <- cmCavg[,order(as.numeric(colnames(cmCavg)))]
cmXavg <- cmXavg[,order(as.numeric(colnames(cmXavg)))]
colnames(cmCavg) <- 1:ncol(cmCavg)
colnames(cmXavg) <- 1:ncol(cmXavg)
cmCavg <- (cmCavg[,sapply(colnames(cmCavg),function(x,y) x %in% y,y=colnames(cmXavg))])
cmXavg <- (cmXavg[,sapply(colnames(cmXavg),function(x,y) x %in% y,y=colnames(cmCavg))])
z.cmC <- apply(cmCavg,2,function(x,obs) (obs - mean(x)) / sd(x),obs=coa[1,4])
z.cmX <- apply(cmXavg,2,function(x,obs) (obs - mean(x)) / sd(x),obs=coa[3,4])
par(mfrow=c(1,2),mai=c(1.02*2.5, 0.82, 0.82, 0.42))
barplot(sort(z.cmC),las=2,ylab='Species CM (Control)')
barplot(sort(z.cmX),las=2,ylab='Species CM (Exclusion)')
par(mfrow=c(1,1),mai=c(1.02, 0.82, 0.82, 0.42))
axis.min <- floor(min(c(z.cmC,z.cmX)))
axis.max <- ceiling(max(c(z.cmC,z.cmX)))
plot(z.cmX~z.cmC,xlim=c(axis.min,axis.max),ylim=c(axis.min,axis.max),xlab='Species CM (Control)',ylab='Species CM (Exclusion)',pch=19)
lines((axis.min:axis.max),(axis.min:axis.max),lwd=0.75)
### genotypes?
cmgeno <- factor(as.character(geno.09$x[as.numeric(colnames(cmCavg))]))
z.cm <- c(z.cmX,z.cmC)
trt <- c(rep('X',length(z.cmX)),rep('C',length(z.cmC)))
geno <- factor(c(as.character(cmgeno),as.character(cmgeno)))
pb <- unlist(pb.A)
cm.data <- list(c=data.frame(geno=cmgeno,pb=pb.A$c,cm=z.cmC),x=data.frame(geno=cmgeno,pb=pb.A$x,cm=z.cmX))
anova(glm(I(cm^2) ~ geno * pb,data=cm.data$c),test='Chi')
anova(glm(I(cm^2) ~ geno * pb,data=cm.data$x),test='Chi')
par(cex.lab=1.25,cex.axis=1.25)
interaction.plot(trt,geno,z.cm,type='b')
### cross hair plots
ch.col <- rep('black',length(z.cmC))
ch.col <- rainbow(max(as.numeric(cmgeno)))[as.numeric(cmgeno)]
ch.pch <- rep(19,length(z.cmC))
chPlot(cbind(z.cmC,z.cmX),f=cmgeno,col=ch.col,pch=ch.pch,xlim=c(0,1.5),ylim=c(0,1.5))
lines(seq(0,1.5,by=0.5),seq(0,1.5,by=0.5),lty=2)
legend('bottomright',legend=levels(cmgeno),col=unique(ch.col),pch=19)
### Does the contribution to modularity of pb vary by genotype?
### Correlate PB abundance, Removal effect, richness and Z
pb.A <- list((pbr.08$c[,1]+pbr.09$c[,1])/2,(pbr.08$x[,1]+pbr.09$x[,1])/2)
names(pb.A) <- c('c','x')
richness <- list(apply(pbr.09$c,1,function(x) sum(sign(x))),apply(pbr.09$x,1,function(x) sum(sign(x))))
names(richness) <- c('c','x')
### ANCOVA
abundance <- unlist(pb.A)
Z <- c(z.cmC,z.cmX)
genotype <- unlist(geno.09)
X <- cbind(Z,A=abundance)
X <- apply(X,2,function(x) x/max(x))
tree <- rep(1:length(z.cmC),2)
summary(lm(I((Z^2)) ~ trt*tree/abundance))
z.data <- data.frame(tree=1:length(z.cmC),geno=geno.09$c,pb.a=I(pb.A$c-pb.A$x),cm.c=z.cmC,cm.x=z.cmX)
z.data <- z.data[!(z.data$geno %in% c(1008,1020)),]
z.data <- make.rm(constant=c("tree","pb.a","geno"),repeated=c("cm.c","cm.x"),data=z.data)
summary(aov(repdat~contrasts*geno*pb.a+Error(tree),z.data))
z.data <- data.frame(tree=1:length(z.cmC),geno=geno.09$c,pb.a=I(pb.A$c-pb.A$x),cm.c=z.cmC,cm.x=z.cmX,z.d=I(abs(z.cmC-z.cmX)))
lmer(cm ~ pb * (1 | geno))
### Plots
g.pal <- grey(c(0,0.3,0.7,0.95))
g.names <- toupper(as.character(levels(cmgeno))[c(2,4,5,1,8,9,7,10,6,3)])
g.names[g.names == 'COAL 3'] <- 'Coal-3'
g.pts <- data.frame(cmgeno = as.character(levels(cmgeno))[c(2,4,5,1,8,9,7,10,6,3)],g.names,
pch = c(21,21,25,24,22,22,23,23,24,25),
col = rep(1,10),
bg = g.pal[c(1,3,2,4,2,3,1,3,2,4)])
ch.dat <- list(c = data.frame(cmgeno,pb.A$c,z.cmC)[!(cmgeno %in% c('996','1008','1020')),],
x = data.frame(cmgeno,pb.A$x,z.cmX),
s = data.frame(cmgeno,pb.A$c,z.cmC)[cmgeno %in% c('996','1008','1020'),],
d = data.frame(cmgeno,(pb.A$c - pb.A$x),(z.cmX - z.cmC)))
### figure 3
par(mfrow = c(1,1))
fig3out <- 'png'
if (fig3out == 'pdf'){
pdf('../results/fig3.pdf')
line.lwd <- 3
}else if (fig3out == 'png'){
png('../results/fig3.png',height = 1400,width = 1400,res = 82, pointsize = 34)
line.lwd <- 3
}else{
line.lwd <- 1
}
chPlot(ch.dat$x[,2:3],f=ch.dat$x[,1],
add = FALSE,
col = rep('lightgrey',nrow(ch.dat$x)),
pch = g.pts[match(ch.dat$x[,'cmgeno'],g.pts$cmgeno),'pch'],
cex = 1.5,
bg = as.character(g.pts[match(ch.dat$x[,'cmgeno'],g.pts$cmgeno),'bg']),
xlim = c(0,75),ylim = c(0,1.5),se = TRUE,
line.lm = TRUE,line.col = 'darkgrey',line.lty = 1,line.lwd = line.lwd,
xlab = expression(italic('Pemphigus betae')~' abundance'),
ylab = 'Tree genotype contribution to modularity (Z)')
abline(v = max(tapply(pb.A$x,cmgeno,mean)+tapply(pb.A$x,cmgeno,se)),col = 'lightgrey',lty = 2,lwd = line.lwd)
chPlot(ch.dat$c[,2:3],f=ch.dat$c[,1],
add = TRUE,
col = rep('black',nrow(ch.dat$c)),
pch = g.pts[match(ch.dat$c[,'cmgeno'],g.pts$cmgeno),'pch'],
bg = as.character(g.pts[match(ch.dat$c[,'cmgeno'],g.pts$cmgeno),'bg']),
xlim = c(0,75),ylim = c(0,1.5),se = TRUE,
line.lm = TRUE,line.col = 'black',line.lty = 1,line.lwd = line.lwd,
xlab = expression(italic('Pemphigus betae')~' abundance'),
ylab = 'Tree genotype contribution to modularity (Z)',cex=1.5)
chPlot(ch.dat$s[,2:3],f=ch.dat$s[,1],
add = TRUE,
col = rep('red',nrow(ch.dat$s)),
pch = g.pts[match(ch.dat$s[,'cmgeno'],g.pts$cmgeno),'pch'],
cex = 1.5,
bg = as.character(g.pts[match(ch.dat$s[,'cmgeno'],g.pts$cmgeno),'bg']),
xlim = c(0,75),ylim = c(0,1.5),se = TRUE,
line.lm = TRUE,line.col = 'red',line.lty = 2,line.lwd = line.lwd,
xlab = '',
ylab = '')
legend('topright',title = expression(italic('P. betae')),
legend = c('Present','Present (Resistant)','Excluded'),
col=c(1,'grey','red'),lty=c(1,1,2),lwd = line.lwd,bg='white',box.col='black')
legend('bottomright',
legend = rep(' ',length(g.pts[,'g.names'])),
lty = 1,lwd = line.lwd,
col = 'black',
bg = 'white')
legend('bottomright',
legend = paste0(rep(' ',length(g.pts[,'g.names'])),g.pts[,'g.names']),
pch = g.pts[,'pch'],
col = g.pts[,'col'],
pt.bg = as.character(g.pts[,'bg']),
bty = 'n')
if (fig3out == 'pdf' | fig3out == 'png'){dev.off()}else{}
### network plots
net.c <- floor(meanMat(pbr.08$c,pbr.09$c))
net.x <- floor(meanMat(pbr.08$x,pbr.09$x))
### unimodal representation of the bipartite networks
## uni <- lapply(list(c08=pbr.08$c,x08=pbr.08$x,c09=pbr.09$c,x09=pbr.09$x),unipart)
uni <- lapply(list(c=net.c,x=net.x),unipart,rm.zero=TRUE,std=FALSE,thresh=0.0001)
## uni <- lapply(list(c08=pbr.08$c,x08=pbr.08$x,c09=pbr.09$c,x09=pbr.09$x),cdNet,alpha=0.001)
uni <- lapply(uni,function(x) x[order(apply(x,1,sum),decreasing=TRUE),order(apply(x,2,sum),decreasing=TRUE)])
uni.sub <- lapply(uni,function(x,n) x[1:n,1:n],n=35)
uni.col <- lapply(uni,colnames)
for (i in 1:length(uni.col)){
uni.col[[i]][tolower(uni.col[[i]]) == 'pb'] <- 'black';uni.col[[i]][tolower(uni.col[[i]]) != 'black'] <- 'darkgrey'
}
cen <- lapply(uni,evcent,rescale=TRUE)
cen.sub <- lapply(uni,function(x,n) x[1:n],n=35)
par(mfrow=c(1,2),mai=c(0,0,0.5,0))
pc <- gplot.target(uni.sub$c,cen.sub$c,gmode='graph',circ.col='darkgrey',circ.lab=FALSE,vertex.col='white',displaylabels=FALSE,edge.col='lightgrey',edge.lwd=0.01,vertex.border='white',main='Aphid Present',vertex.cex=1)
points(pc,col=uni.col$c,pch=19,cex=1.2)
points(pc,col=uni.col$c,pch=19,cex=as.numeric(uni.col$c == 'black'))
px <- gplot.target(uni.sub$x,cen.sub$x,gmode='graph',circ.col='darkgrey',circ.lab=FALSE,vertex.col='white',displaylabels=FALSE,edge.col='lightgrey',edge.lwd=0.01,vertex.border='white',main='Aphid Present',vertex.cex=1)
points(px,col=uni.col$x,pch=19,cex=1.2)
points(px,col=uni.col$x,pch=19,cex=as.numeric(uni.col$x == 'black'))
deg <- lapply(uni,degree,rescale=TRUE)
d.deg <- deg[[2]] - deg[[1]]
d.deg <- (d.deg - mean(d.deg))/sd(d.deg)
plot(d.deg,pch='')
text(d.deg,labels=colnames(uni[[1]]))
### bipartite representation
net.thresh <- 2
net <- floor(meanMat(pbr.08$c,pbr.09$c))
net <- as.matrix(net)
net <- net[,na.omit(match(names(z.cmSppC),colnames(net)))] ###match species
net[net < net.thresh] <- 0
rownames(net) <- paste(as.character(geno.09$c),1:nrow(net),sep='_')
tree.net <- net %*% t(net)
tree.net <- tree.net/max(tree.net)
spp.net <- t(net) %*% net
spp.net <- spp.net/max(spp.net)
tree.net[tree.net < 0.01] <- 0
tree.col <- ch.col
spp.col <- 'red' ###
cm.top3 <- names(z.cmSppC)[order(I(z.cmSppC-z.cmSppX))][1:3]
spp.lab <- colnames(net)
spp.lab[spp.lab%in%cm.top3 == FALSE] <- ''
gplot(tree.net,gmode='graph',edge.lwd=tree.net,vertex.col=tree.col)
gplot(spp.net,gmode='graph',edge.lwd=spp.net,vertex.col=spp.col,displaylabels=TRUE,label=spp.lab,edge.col='lightgrey')
plotweb(sortMat(net),text.rot=90,
col.low=tree.col[order(apply(net,1,sum),decreasing=TRUE)],
col.high=spp.col[order(apply(net,2,sum),decreasing=TRUE)],
method='normal',
)
### Final stats 8Feb2016
## 1. Network modularity was
coa[c(2,4),]
## 2. PB increased modularity at the scale of all trees
## randomizing PB leads to community networks that are
## less modular
sort(z.cmSppC[abs(z.cmSppC) > 1.5])
sort(z.cmSppX[abs(z.cmSppX) > 1.5])
sort(p.cmSppC[abs(z.cmSppC) > 1.5])
sort(p.cmSppX[abs(z.cmSppX) > 1.5])
## 3. PB impacts tree genotype modularity
anova(glm(I(cm^2) ~ geno * pb,data=cm.data$c),test='F')
anova(glm(I(cm^2) ~ geno * pb,data=cm.data$x),test='F')
|
4b75262821bb20ab7dd0ab507b6fd45947bfb4d4 | 39851ccdf21d02180a5d214ae84082e9c210dd97 | /man/dumpDatabase.Rd | 04ecc6b3fe413575621b675c5afbfce59075878a | [
"MIT"
] | permissive | KWB-R/kwb.db | 61a8b40c56f0d4a3f39ae7d502a9c2971f7fcdc5 | 358aa98df81d5a55188d50825be660d874df4050 | refs/heads/master | 2022-06-22T15:01:24.911765 | 2021-07-07T11:19:45 | 2021-07-07T11:19:45 | 137,391,271 | 0 | 1 | MIT | 2022-06-07T17:00:00 | 2018-06-14T17:55:29 | R | UTF-8 | R | false | true | 1,586 | rd | dumpDatabase.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dumpDatabase.R
\name{dumpDatabase}
\alias{dumpDatabase}
\title{Export Database Tables to CSV Files}
\usage{
dumpDatabase(
db,
pattern = "^tbl",
target_dir = NULL,
create_target_dir = FALSE,
sep = ",",
dec = ".",
as.is = FALSE,
qmethod = "double",
row.names = FALSE,
...
)
}
\arguments{
\item{db}{full path to database or name of ODBC data source}
\item{pattern}{pattern matching names of tables to be exported. Default:
"^tbl", i.e. tables starting with "tbl"}
\item{target_dir}{target directory. By default a new directory is created in
the same directory as mdb resides in. The new directory has the same name
as the database file with dots substituted with underscores}
\item{create_target_dir}{if \code{TRUE}, the target directory \code{tdir} is
created if it does not exist.}
\item{sep}{passed to \code{\link[utils]{write.table}}}
\item{dec}{passed to \code{\link[utils]{write.table}}}
\item{as.is}{passed to \code{\link[RODBC]{sqlGetResults}}. If \code{TRUE}
(the default is \code{FALSE}), original data types are kept when the table
is read into R. By default the types are converted to appropriate R data
types (e.g. dates are converted from strings to date objects).}
\item{qmethod}{passed to \code{\link[utils]{write.table}}}
\item{row.names}{passed to \code{\link[utils]{write.table}}}
\item{\dots}{further arguments passed to \code{\link[utils]{write.table}}}
}
\description{
Exports all tables of a database of which the names match a given pattern to
csv files.
}
|
2cb7f063cc9a192ee9a4f81a52bf5b5720ae8c05 | f4d86d015400a9972f9b9c38b02913ba4162a50b | /R/limma_diff.R | 3e33fe17fe20399ee94c5f739510519c899973d9 | [] | no_license | aidanmacnamara/epiChoose | 2ba4e6c6348bf763a40edd7e1285098e1bc7a14a | 11cbb78cf063afa767943c8d6e0779f451317550 | refs/heads/master | 2021-12-22T13:12:04.352433 | 2021-12-10T07:33:51 | 2021-12-10T07:33:51 | 83,414,546 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,002 | r | limma_diff.R | #' @title What are the features that are separating 2 groups?
#' @description TO ADD
#' @description This is a new line ...
#' @details What's this?
#' @param dat PCA matrix
#' @return TO ADD
limma_diff <- function(dat, groups, thresh=5000) {
require(limma)
dat = t(log(dat))
dat[is.infinite(dat)] = 0
c_idx = groups[[1]]
p_idx = groups[[2]]
colnames(dat) = c(paste("Cell Lines", c_idx, sep="_"), paste("Primary", p_idx, sep="_"))
samples = data.frame(
Group=factor(c(rep("Cell Lines", length(c_idx)), rep("Primary", length(p_idx))))
)
rownames(samples) = colnames(dat)
design <- model.matrix(~0 + samples$Group)
colnames(design) = c("Cell_Lines", "Primary")
fit <- lmFit(dat, design)
contr <- makeContrasts(diff=Cell_Lines-Primary, levels=design)
fits <- contrasts.fit(fit, contr)
ebayes_fits <- eBayes(fits)
res <- topTableF(ebayes_fits, number=thresh)
# return(as.numeric(rownames(res)))
return(res)
}
|
2bbf99dadfa28a7d5d18710389ef63dc32a312ad | 9f0447180a6b23d51f00c2ffff4a61baf01ea82d | /man/ParamsInfo.Rd | 34146da1822c6313a48d1fcb84f8bf52b128ba5d | [] | no_license | aleksandra197/airPL | 7653dfd651e4b78637a38564d773f6844030c31e | 76e60f935f1bfc1e532a39238c5b69549bfc7b23 | refs/heads/master | 2023-02-20T23:16:50.573587 | 2021-01-25T21:36:38 | 2021-01-25T21:36:38 | 332,884,290 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 275 | rd | ParamsInfo.Rd | \name{ParamsInfo}
\alias{ParamsInfo}
\title{ParamsInfo}
\usage{
ParamsInfo()
}
\description{
Displays info about sensors available for selected air pollution measuring station.
Information from this function is neccessary to use GetData() function.
}
\examples{
ParamsInfo
}
|
e583240d86a81b518388de52cb6caf2279e93d85 | b3a191d2b4c7e3011375135b87f3dd876cf05111 | /man/ConvertModelList.Rd | e20aaf702ef81a0c4f0af8e92186089161d47b5e | [] | no_license | tdhock/PeakSegJoint | 3d712782cf601f4e0478653ebab7e4eea5684d04 | c80a250b00f6e6a60a0c4dcb3d02e98c9d95cebd | refs/heads/master | 2023-04-27T05:14:53.542606 | 2023-04-24T23:53:31 | 2023-04-24T23:53:31 | 33,544,447 | 6 | 3 | null | 2020-02-14T19:26:32 | 2015-04-07T13:22:43 | R | UTF-8 | R | false | false | 778 | rd | ConvertModelList.Rd | \name{ConvertModelList}
\alias{ConvertModelList}
\title{ConvertModelList}
\description{Convert a model list from the non-repetitive format that we get
from the C code to the repetitive format that is more useful for
plotting.}
\usage{ConvertModelList(model.list)}
\arguments{
\item{model.list}{List from PeakSegJointHeuristic(...) or PeakSegJointSeveral(...).}
}
\value{List of data.frames: segments has 1 row for each segment mean,
sample, and model size (peaks, sample.id, sample.group,
chromStart, chromEnd, mean); peaks is the same kind of data.frame
as segments, but with only the second/peak segments; loss has one
row for each model size; modelSelection has one row for each model
size that can be selected, see exactModelSelection.}
\author{Toby Dylan Hocking}
|
1940f9438850fb66926a9147b10d36270415e266 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/fugeR/R/fugeR.load.R | 18278bdbd9f00101e3f8138822a55e75d79fdbde | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,335 | r | fugeR.load.R | ######################################################################################
# fugeR.load
#
#' Load a fuzzy system.
#'
#' Load a fuzzy system saved into a file with \code{fugeR.save}
#'
#' @param file [\"\"] A character string naming a file.
#'
#' @examples
#' ##
#' ##
#' \dontrun{
#' fis <- fugeR.run (
#' In,
#' Out,
#' generation=100,
#' population=200,
#' elitism=40,
#' verbose=TRUE,
#' threshold=0.5,
#' sensiW=1.0,
#' speciW=1.0,
#' accuW=0.0,
#' rmseW=1.0,
#' maxRules=10,
#' maxVarPerRule=2,
#' labelsMf=2
#' )
#'
#' fugeR.save( fis, file=\'./myFis.R\' )
#'
#' savedFis <- fugeR.load( file=\'./myFis.R\' )
#' }
#'
#' @seealso \code{\link{fugeR.save}}
#'
#' @author Alexandre Bujard, HEIG-VD, Jul'2012
#'
#' @export
######################################################################################
fugeR.load <-
function(file="") {
#check if a path was specified
if(file == "") {
stop("File name can't be empty")
}
dget(file)
}
|
728f86683d60dca9245a0919ce6318673da16189 | fa1a17fca2d7025c4815e3fc6fd7ef6e32f251b2 | /Script_final_PME.R | 993e00c8953adbcf9b9657a61c53627f8f545440 | [] | no_license | JeromeLaurent/R_Hg_PME | 8d7acde2a992d7b1776d6f71aa0e9d3111f160e5 | cac2d62d7b58d9a7b91b34b6fe88c12ea7b29aae | refs/heads/master | 2021-01-23T09:29:40.526565 | 2014-07-18T10:45:23 | 2014-07-18T10:45:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 107,929 | r | Script_final_PME.R | ############################
##### SCRIPT FINAL PME #####
############################
rm(list = ls () ) # nettoyage memoire de R
require(MASS)
require(ggplot2)
require(plyr)
require(dplyr)
require(scales)
require(reshape) # pr fction melt, afin chgt format wide -> long
require(reshape2)
require(FactoMineR)
require(gridExtra)
# require(gtable) # alternative pour arranger graphes ensembles
require(missMDA)
require(agricolae)
# require(xtable)
require(gtools)
require(cluster)
require(ade4)
require(FactoClass)
source("Scripts/functions.R")
source("Scripts/data_cleaning.R")
# Reprendre le script et remplacer dat$ind par dat[,'ind'] ?
######################0000000000000########################
#### SOMMAIRE ####
######################0000000000000########################
#o# scatterplot poids fonction taille + projection marginale des distributions
#### pds fction de longueur
scatter <- ggplot(BDD_PME, aes(x = ls_mm, y = pds_g)) +
geom_point(aes(color = Regime_alter, shape = Regime_principal)) +
theme(legend.position = c(1, 1), legend.justification = c(1, 1)) +
scale_x_continuous(limits = c(0, 200)) +
scale_y_continuous(limits = c(0, 100)) +# 2 Hoplias aimara de presque 2 kg qui entrainent le tassement de la majorite du jeu de donnees
guides(shape = FALSE) # Pas de légende pour les formes
#marginal density of x - plot on top
plot_top <- ggplot(BDD_PME, aes(ls_mm, fill=Regime_alter)) +
geom_density(alpha = .5) +
scale_x_continuous(limits = c(0, 200)) + # pour apercu plus detaille de la distrib de la majorite des poissons
theme(legend.position = "none")
#marginal density of y - plot on the right
plot_right <- ggplot(BDD_PME, aes(pds_g, fill=Regime_alter)) +
geom_density(alpha = .5) +
scale_x_continuous(limits = c(0, 50)) + # limites d'apres divers essais. poissons tres legers
coord_flip() +
theme(legend.position = "none")
#arrange the plots together, with appropriate height and width for each row and column
grid.arrange(plot_top, empty, scatter, plot_right, ncol = 2, nrow = 2, widths = c(4, 1), heights = c(1, 4))
# two dimensional density plot ggplot2
dens <- ggplot(BDD, aes(x = ls_mm, y = pds_g)) +
#geom_point() +
stat_density2d(aes(fill=..density..), geom="raster", contour=FALSE, h=c(5,2.5)) +
expand_limits(x = 0, y = 0) +
scale_x_continuous(limits = c(0, 150), expand = c(0, 0)) +
scale_y_continuous(limits = c(0, 50), expand = c(0, 0)) + # 2 Hoplias aimara de presque 2 kg qui entrainent le tassement de la majorite du jeu de donnees
scale_fill_continuous(low = "white", high = "black") +
labs( x = "Longueur standard (mm)", y = "Masse (g)") +
theme_bw()
######################0000000000000########################
#o# Répartition des régimes au niveau des trois stations les plus étudiées (Crique Chien, Crique Nouvelle-france et 3 Sauts)
### Repartition des regimes pr chaque groupe de stations pour BDD_PME generale
p1 <- ggplot(BDD_PME, aes(Groupe_station)) +
geom_bar(aes(fill = Regime_alter), position = "fill")
# repartition des regimes pr chaque groupe de stations (sans prendre en compte le nb d'individus)
p2 <- ggplot(BDD_PME, aes(x = Groupe_station)) +
geom_bar(aes(fill = Regime_alter))
# repartition des regimes pr chaque groupe de stations (en prenant en compte le nb d'individus)
grid.arrange(p1, p2, ncol = 1, nrow = 2, widths = c(1, 1), heights = c(1, 1))
### Repartition des regimes pr chaque groupe de stations pour subset BDD_PME : uniquement les échantillons ayant des éléments traces dosés
p11 <- ggplot(sub_BDD_PME, aes(Groupe_station)) +
geom_bar(aes(fill = Regime_alter), position = "fill")
# repartition des regimes pr chaque groupe de stations (sans prendre en compte le nb d'individus)
p22 <- ggplot(sub_BDD_PME, aes(x = Groupe_station)) +
geom_bar(aes(fill = Regime_alter))
# repartition des regimes pr chaque groupe de stations (en prenant en compte le nb d'individus)
grid.arrange(p11, p22, ncol = 1, nrow = 2, widths = c(1, 1), heights = c(1, 1))
######################0000000000000########################
#o# Répartition des régimes au niveau des toutes les stations
### Repartition des regimes des échantillons ayant du Hg dosés dans les muscles sur chaque station de l'ensemble de la BDD
p10 <- ggplot(BDD.sansNA, aes(Code_Station)) +
geom_bar(aes(fill = Regime_alter), position = "fill") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) # graduations de l'axe x écrites verticalement
# repartition des regimes sur chaque station (sans prendre en compte le nb d'individus)
p20 <- ggplot(BDD.sansNA, aes(x = Code_Station)) +
geom_bar(aes(fill = Regime_alter)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
# repartition des regimes sur chaque station (en prenant en compte le nb d'individus)
grid.arrange(p10, p20, ncol = 1, nrow = 2, widths = c(1, 1), heights = c(1, 1))
######################0000000000000########################
#o# Répartition des régimes en fonction des pressions anthropiques exercées sur les stations
### Repartition des regimes des échantillons ayant du Hg dosés dans les muscles sur chaque station de l'ensemble de la BDD
Bd <- BDD.sansNA
levels(Bd$Regime_alter) <- sub("^Carnivore_Charognard$", "Carnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Carnivore_Insectivore$", "Carnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Carnivore_Scaliphage$", "Carnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Omnivore_Herbivore$", "Omnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Omnivore_Piscivore$", "Omnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Herbivore$", "Herbivore_Phyllophage", levels(Bd$Regime_alter))
p10 <- ggplot(Bd, aes(Pression_anthro2)) +
geom_bar(aes(fill = Regime_alter), position = "fill") +
theme_bw() +
scale_x_discrete(limits = c( "Reference_Trois_Sauts", "Reference", "Agriculture", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"),
labels = c("Trois Sauts", "Référence", "Agriculture", "Déforestation", "Piste", "Orpaillage \nancien", "Orpaillage \nillégal récent", "Barrage")) +
labs( y = "Proportion",
x = "Pression anthropique") +
scale_fill_manual(name = "Régime trophique",
labels = c("Carnivore Piscivore", "Carnivore (autres)", "Carnivore Invertivore", "Omnivore (autres)", "Omnivore Invertivore", "Détritivore", "Herbivore Périphytophage", "Herbivore Phyllophage", "Informations manquantes"),
values = colo8,
guide = guide_legend(reverse=TRUE))
pdf("Graph/Pression_anthropique/Repartition-regime_pression-anthropique.pdf", width = 9.5, height = 5) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p10)
dev.off()
# repartition des regimes sur chaque station (sans prendre en compte le nb d'individus)
p20 <- ggplot(BDD.sansNA, aes(x = Pression_anthro2)) +
geom_bar(aes(fill = Regime_alter)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) # graduations de l'axe x écrites verticalement
# repartition des regimes sur chaque station (en prenant en compte le nb d'individus)
grid.arrange(p10, p20, ncol = 1, nrow = 2, widths = c(1, 1), heights = c(1, 1))
######################0000000000000########################
#o# Ensemble de la BDD
### Impact des pressions anthropiques
BD <- select(BDD.sansNA, conc_Hg_muscle_ppm, Pression_anthro2, Regime_principal, Regime_alter, Genre) # Subset plus simple a manipuler
BD <- BD[BD$Pression_anthro2 != "NA",]
means.pression <- aggregate(conc_Hg_muscle_ppm ~ Pression_anthro2, BD, mean)
means.pression$conc_Hg_muscle_ppm <- round(means.pression$conc_Hg_muscle_ppm, digits = 2)
kruskal.test(conc_Hg_muscle_ppm ~ Pression_anthro2, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$conc_Hg_muscle_ppm, BD$Pression_anthro2, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
p0 <- ggplot(BD, aes(x = Pression_anthro2 , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means.pression, aes(label = conc_Hg_muscle_ppm, y = conc_Hg_muscle_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Pression_anthro2), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Pression_anthro2", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = -0.1, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + geom_text(aes(Pression_anthro2, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
stat_summary(fun.data = n_fun, geom = "text") +
scale_x_discrete(limits = c( "Reference_Trois_Sauts", "Reference", "Agriculture", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"),
labels = c("Trois Sauts", "Référence", "Agriculture", "Déforestation", "Piste", "Orpaillage \nancien", "Orpaillage \nillégal récent", "Barrage")) +
labs( y = "[Hg] dans les muscles de poissons, en mg/kg de poids sec",
x = "Pression anthropique", title = "[Hg] dans les muscles de poissons selon les pressions anthropiques exercées sur les stations") +
geom_hline(aes(yintercept = 2.5), color = "red") + theme_bw()
pdf("Graph/Pression_anthropique/Hg-muscle_pression-anthropique.pdf", width = 9, height = 6) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p0)
dev.off()
setEPS(horizontal = FALSE, onefile = FALSE, paper = "special")
postscript("Graph/Pression_anthropique/Hg-muscle_pression-anthropique.ps", width = 12, height = 9) # la fction postscript enregistre directement ds le dossier et sous format encapsulated posrtscript
print(p0)
dev.off()
win.metafile("Graph/Pression_anthropique/Hg-muscle_pression-anthropique.wmf", width = 12, height = 9) # la fction win.metafile enregistre directement ds le dossier et sous format wmf
# Légends lisibles mais points déformés
print(p0)
dev.off()
png("Graph/Pression_anthropique/Hg-muscle_pression-anthropique.png", width = 12, height = 9, units = 'in', res = 300) # la fction png enregistre directement ds le dossier et sous format png
# Graphiques conservés mais text flou
print(p0)
dev.off()
tiff("Graph/Pression_anthropique/Hg-muscle_pression-anthropique.tif", width = 12, height = 9, units = 'in', res = 300) # la fction png enregistre directement ds le dossier et sous format png
print(p0)
dev.off()
jpeg("Graph/Pression_anthropique/Hg-muscle_pression-anthropique2.jpg", width = 12, height = 9, units = 'in', res = 300) # la fction png enregistre directement ds le dossier et sous format png
print(p0)
dev.off()
## Détail des moyennes de Hg par station pr chaque pression anthropique
means <- aggregate(conc_Hg_muscle_ppm ~ Code_Station + Pression_anthro2, BDD.sansNA, mean)
means$conc_Hg_muscle_ppm <- round(means$conc_Hg_muscle_ppm, digits = 3)
ggplot(BDD.sansNA, aes(x = Code_Station , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "darkred", geom = "point",
shape = 18, size = 3,show_guide = FALSE) +
# geom_text(data = means, aes(label = conc_Hg_muscle_ppm, y = conc_Hg_muscle_ppm + 0.08), color = "red") +
facet_wrap(~ Pression_anthro2, scales = "free_x") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
# Modification du nom des facets en remplaçant les niveaux du facteur par des chiffres
means$Pression_anthro2 <- mapvalues(means$Pression_anthro2, from = c("Reference_Trois_Sauts", "Reference", "Agriculture", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"), to = c(1:8))
p <- ggplot(means, aes(x = conc_Hg_muscle_ppm, y = Code_Station)) +
geom_segment(aes(yend = Code_Station, colour = Pression_anthro2), xend=0, show_guide = FALSE) +
geom_point(size=3, aes(colour = Pression_anthro2), show_guide = FALSE) +
theme_bw() +
labs( x = "[Hg] moyenne dans les muscles de poissons (mg/kg ps)", y = "Nom des stations subissant une même pression anthropique") +
theme(panel.grid.major.y = element_blank(),
axis.text.y = element_text(size = 8)) +
facet_grid(Pression_anthro2 ~ ., scales="free_y", space = "free")
# Dans un sens plus classique
ggplot(means, aes(y = conc_Hg_muscle_ppm, x = Code_Station)) +
geom_segment(aes(xend = Code_Station), yend=0, colour="grey50") +
geom_point(size=3, aes(colour = Pression_anthro2), show_guide = FALSE) +
theme_bw() +
facet_grid(. ~ Pression_anthro2, scales="free_x", space = "free")
pdf("Graph/Pression_anthropique/Moyenne_pression-anthropique.pdf", width = 9, height = 6) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p)
dev.off()
tapply(BDD.sansNA, "Pression_anthro2")
## Est ce que des espèces communes existent entre les stations soumises à déforestation et orpaillage ?
def <- BDD.sansNA[BDD.sansNA$Pression_anthro2 %in% "Deforestation", ]
orp <- BDD.sansNA[BDD.sansNA$Pression_anthro2 %in% "Orpaillage_illegal" |
BDD.sansNA$Pression_anthro2 %in% "Orpaillage_ancien", ]
sp <- intersect(def$Code, orp$Code)
deforp <- BDD.sansNA[BDD.sansNA$Pression_anthro2 %in% "Orpaillage_illegal" |
BDD.sansNA$Pression_anthro2 %in% "Orpaillage_ancien" |
BDD.sansNA$Pression_anthro2 %in% "Deforestation", ]
ggplot(deforp, aes(x = Code, y = conc_Hg_muscle_ppm, color = Pression_anthro2)) +
geom_boxplot() +
scale_x_discrete(limits = sp)
ftable(xtabs(~ Pression_anthro2 + Code, data = deforp))
#0000000000000#
### Impact des pressions anthropiques sur les régimes principaux
BD.carn <- BD[BD$Regime_principal %in% "Carnivore",]
BD.omni <- BD[BD$Regime_principal %in% "Omnivore",]
# BD.herbi <- BD[BD$Regime_principal %in% "Herbivore",] # Trop peu d'échantillons, qui plus est inégalement répartis
# BD.detri <- BD[BD$Regime_principal %in% "Detritivore",] # Uniquement 16 indiv donc pas assez de données
# Carnivores
means.pression <- aggregate(conc_Hg_muscle_ppm ~ Pression_anthro2, BD.carn, mean)
means.pression$conc_Hg_muscle_ppm <- round(means.pression$conc_Hg_muscle_ppm, digits = 2)
kruskal.test(conc_Hg_muscle_ppm ~ Pression_anthro2, data = BD.carn) # Il existe des differences significatives
comparison <- kruskal(BD.carn$conc_Hg_muscle_ppm, BD.carn$Pression_anthro2, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD.carn, aes(x = Pression_anthro2 , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "darkred", geom = "point",
shape = 18, size = 3,show_guide = FALSE) +
geom_text(data = means.pression, aes(label = conc_Hg_muscle_ppm, y = conc_Hg_muscle_ppm + 0.08), color = "blue")
lettpos <- function(BD.carn) boxplot(BD.carn$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD.carn, .(Pression_anthro2), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Pression_anthro2", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + geom_text(aes(Pression_anthro2, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = c( "Reference_Trois_Sauts", "Reference", "Agriculture", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"),
labels = c("Trois Sauts", "Référence", "Agriculture", "Déforestation", "Piste", "Orpaillage ancien", "Orpaillage illégal récent", "Barrage")) +
labs( y = "[Hg] dans les muscles de poissons, en mg/kg de poids sec",
x = "Pression anthropique", title = "[Hg] dans les muscles de carnivores selon les pressions anthropiques exercées sur les stations") +
geom_hline(aes(yintercept = 2.5), color = "red")
pdf("Graph/Pression_anthropique/Hg-muscle_pression-anthropique_carnivores.pdf", width = 12, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p0)
dev.off()
# Omnivores
means.pression <- aggregate(conc_Hg_muscle_ppm ~ Pression_anthro2, BD.omni, mean)
means.pression$conc_Hg_muscle_ppm <- round(means.pression$conc_Hg_muscle_ppm, digits = 2)
kruskal.test(conc_Hg_muscle_ppm ~ Pression_anthro2, data = BD.omni) # Il existe des differences significatives
comparison <- kruskal(BD.omni$conc_Hg_muscle_ppm, BD.omni$Pression_anthro2, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD.omni, aes(x = Pression_anthro2 , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "darkred", geom = "point",
shape = 18, size = 3,show_guide = FALSE) +
geom_text(data = means.pression, aes(label = conc_Hg_muscle_ppm, y = conc_Hg_muscle_ppm + 0.08), color = "blue")
lettpos <- function(BD.omni) boxplot(BD.omni$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD.omni, .(Pression_anthro2), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Pression_anthro2", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + geom_text(aes(Pression_anthro2, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = c( "Reference_Trois_Sauts", "Reference", "Agriculture", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"),
labels = c("Trois Sauts", "Référence", "Agriculture", "Déforestation", "Piste", "Orpaillage ancien", "Orpaillage illégal récent", "Barrage")) +
labs( y = "[Hg] dans les muscles de poissons, en mg/kg de poids sec",
x = "Pression anthropique", title = "[Hg] dans les muscles d'omnivores selon les pressions anthropiques exercées sur les stations") +
geom_hline(aes(yintercept = 2.5), color = "red")
pdf("Graph/Pression_anthropique/Hg-muscle_pression-anthropique_omnivores.pdf", width = 12, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p0)
dev.off()
#0000000000000#
### Impact des pressions anthropiques sur les régimes détaillés
BD.omn.inver <- BD[BD$Regime_alter %in% "Omnivore_Invertivore",]
BD.car.inver <- BD[BD$Regime_alter %in% "Carnivore_Invertivore",]
BD.car.pisc <- BD[BD$Regime_alter %in% "Carnivore_Piscivore",]
# BD.car.insec <- BD[BD$Regime_alter %in% "Carnivore_Insectivore",] # pas de données partout
# Omnivores Invertivores
means.pression <- aggregate(conc_Hg_muscle_ppm ~ Pression_anthro2, BD.omn.inver, mean)
means.pression$conc_Hg_muscle_ppm <- round(means.pression$conc_Hg_muscle_ppm, digits = 2)
kruskal.test(conc_Hg_muscle_ppm ~ Pression_anthro2, data = BD.omn.inver) # Il existe des differences significatives
comparison <- kruskal(BD.omn.inver$conc_Hg_muscle_ppm, BD.omn.inver$Pression_anthro2, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD.omn.inver, aes(x = Pression_anthro2 , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means.pression, aes(label = conc_Hg_muscle_ppm, y = conc_Hg_muscle_ppm + 0.08), color = "blue")
lettpos <- function(BD.omn.inver) boxplot(BD.omn.inver$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD.omn.inver, .(Pression_anthro2), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Pression_anthro2", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = 0, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + geom_text(aes(Pression_anthro2, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
stat_summary(fun.data = n_fun, geom = "text") +
scale_x_discrete(limits = c( "Reference_Trois_Sauts", "Reference", "Agriculture", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"),
labels = c("Trois Sauts", "Référence", "Agriculture", "Déforestation", "Piste", "Orpaillage \nancien", "Orpaillage \nillégal récent", "Barrage")) +
labs( y = "[Hg]muscle (mg/kg ps)",
x = "Pression anthropique") +
geom_hline(aes(yintercept = 2.5), color = "red") + theme_bw()
pdf("Graph/Pression_anthropique/Hg-muscle_pression-anthropique_omnivores-invertivores.pdf", width = 8, height = 4) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p0)
dev.off()
# Carnivores Invertivores
means.pression <- aggregate(conc_Hg_muscle_ppm ~ Pression_anthro2, BD.car.inver, mean)
means.pression$conc_Hg_muscle_ppm <- round(means.pression$conc_Hg_muscle_ppm, digits = 2)
kruskal.test(conc_Hg_muscle_ppm ~ Pression_anthro2, data = BD.car.inver) # Il existe des differences significatives
comparison <- kruskal(BD.car.inver$conc_Hg_muscle_ppm, BD.car.inver$Pression_anthro2, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD.car.inver, aes(x = Pression_anthro2 , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means.pression, aes(label = conc_Hg_muscle_ppm, y = conc_Hg_muscle_ppm + 0.08), color = "blue")
lettpos <- function(BD.car.inver) boxplot(BD.car.inver$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD.car.inver, .(Pression_anthro2), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Pression_anthro2", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = 0, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + geom_text(aes(Pression_anthro2, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
stat_summary(fun.data = n_fun, geom = "text") +
scale_x_discrete(limits = c( "Reference_Trois_Sauts", "Reference", "Agriculture", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"),
labels = c("Trois Sauts", "Référence", "Agriculture", "Déforestation", "Piste", "Orpaillage \nancien", "Orpaillage \nillégal récent", "Barrage")) +
labs( y = "[Hg]muscle (mg/kg ps)",
x = "Pression anthropique") +
geom_hline(aes(yintercept = 2.5), color = "red") + theme_bw()
pdf("Graph/Pression_anthropique/Hg-muscle_pression-anthropique_carnivores-invertivores.pdf", width = 8, height = 4) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p0)
dev.off()
# Carnivores Piscivores
means.pression <- aggregate(conc_Hg_muscle_ppm ~ Pression_anthro2, BD.car.pisc, mean)
means.pression$conc_Hg_muscle_ppm <- round(means.pression$conc_Hg_muscle_ppm, digits = 2)
kruskal.test(conc_Hg_muscle_ppm ~ Pression_anthro2, data = BD.car.pisc) # Il existe des differences significatives
comparison <- kruskal(BD.car.pisc$conc_Hg_muscle_ppm, BD.car.pisc$Pression_anthro2, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD.car.pisc, aes(x = Pression_anthro2 , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
geom_hline(aes(yintercept = 2.5), color = "red") +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means.pression, aes(label = conc_Hg_muscle_ppm, y = conc_Hg_muscle_ppm + 0.15), color = "blue")
lettpos <- function(BD.car.pisc) boxplot(BD.car.pisc$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD.car.pisc, .(Pression_anthro2), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Pression_anthro2", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = 0, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + geom_text(aes(Pression_anthro2, upper - 0.6, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
stat_summary(fun.data = n_fun, geom = "text") +
scale_x_discrete(limits = c("Reference_Trois_Sauts", "Reference", "Agriculture", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"),
labels = c("Trois Sauts", "Référence", "Agriculture", "Déforestation", "Piste", "Orpaillage \nancien", "Orpaillage \nillégal récent", "Barrage")) +
labs( y = "[Hg]muscle (mg/kg ps)",
x = "Pression anthropique") +
theme_bw()
pdf("Graph/Pression_anthropique/Hg-muscle_pression-anthropique_carnivores-piscivores.pdf", width = 8, height = 4) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p0)
dev.off()
#0000000000000#
### Impact des pressions anthropiques chez Mohenkausia
BD.moen <- BD[BD$Genre %in% "Mohenkausia",]
means.pression <- aggregate(conc_Hg_muscle_ppm ~ Pression_anthro2, BD.moen, mean)
means.pression$conc_Hg_muscle_ppm <- round(means.pression$conc_Hg_muscle_ppm, digits = 2)
kruskal.test(conc_Hg_muscle_ppm ~ Pression_anthro2, data = BD.moen) # Il existe des differences significatives
comparison <- kruskal(BD.moen$conc_Hg_muscle_ppm, BD.moen$Pression_anthro2, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD.moen, aes(x = Pression_anthro2 , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
geom_hline(aes(yintercept = 2.5), color = "red") +
stat_summary(fun.y = mean, colour = "darkred", geom = "point",
shape = 18, size = 3,show_guide = FALSE) +
geom_text(data = means.pression, aes(label = conc_Hg_muscle_ppm, y = conc_Hg_muscle_ppm + 0.15), color = "blue")
lettpos <- function(BD.moen) boxplot(BD.moen$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD.moen, .(Pression_anthro2), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Pression_anthro2", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = 0, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + geom_text(aes(Pression_anthro2, upper - 0.5, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = c("Reference_Trois_Sauts", "Reference", "Deforestation", "Piste", "Orpaillage_ancien", "Orpaillage_illegal", "Barrage"),
labels = c("Trois Sauts", "Référence", "Déforestation", "Piste", "Orpaillage ancien", "Orpaillage illégal récent", "Barrage")) +
stat_summary(fun.data = n_fun, geom = "text") +
labs( y = "[Hg] dans les muscles de poissons, en mg/kg de poids sec",
x = "Pression anthropique", title = "[Hg] dans les muscles de Moenkhausia selon les pressions anthropiques exercées sur les stations")
pdf("Graph/Pression_anthropique/Hg-muscle_pression-anthropique_moenkhausia.pdf", width = 12, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(p0)
dev.off()
######################0000000000000########################
### Contamination en Hg selon régime alimentaire et d15N sur toute la BDD
# [Hg] muscle pour individus ayant concentrations mesurées dans tous les organes
pl1 <- ggplot(BDD[!(is.na(BDD$conc_Hg_muscle_ppm)), ], aes(x = d15N, y = conc_Hg_muscle_ppm)) +
# geom_point(data = df.sp.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, fill = Code), show_guide = FALSE) +
geom_point(data = df.reg.org, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter), size = 4) +
geom_text(data = df.reg.org, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter, label = c("Piscivore", "Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Périphytophage", "Herbivore","Phyllophage")), hjust=1.02, vjust=-1, size = 6.5) +
geom_errorbarh(data = df.reg.org, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_muscle_mean, x = d15N_mean, colour = Regime_alter), height = .025) +
geom_errorbar(data = df.reg.org, aes(ymin = Hg_muscle_mean - Hg_muscle_se, ymax = Hg_muscle_mean + Hg_muscle_se, x = d15N_mean, y = Hg_muscle_mean, colour = Regime_alter), width = .05) +
scale_color_manual(name = "Régime trophique",
labels = c("Carnivore Piscivore", "Carnivore Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Herbivore Périphytophage", "Herbivore","Herbivore Phyllophage"),
values = colo) +
ylab("[Hg] dans le muscle de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
theme_bw() +
ggtitle(expression(paste("[Hg] dans le muscle de poissons en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-muscle_d15N_regime.pdf", width = 12, height = 9)
print(pl1)
dev.off()
# nombre d'individus ayant Hg dosé dans muscle
xtabs(~ Regime_alter, data = BDD[!(is.na(BDD$conc_Hg_muscle_ppm)) & !(is.na(BDD$d15N)), ])
# nombre d'individus ayant Hg dosé dans tous les organes
xtabs(~ Regime_alter, data = BDD[!(is.na(BDD$conc_Hg_muscle_ppm)) & !(is.na(BDD$conc_Hg_branchie_ppm))
& !(is.na(BDD$conc_Hg_foie_ppm)), ])
# [Hg] muscle pour tous les individus ayant des concentrations mesurées
# ecart type pour remplacer erreurs standards ; test
df.reg.muscle <- BDD[!(is.na(BDD$conc_Hg_muscle_ppm)) & !(is.na(BDD$d15N)), ] %.% # Selection BDD globale
group_by(Regime_alter) %.% # Sélection par régime
filter(Regime_alter != "Carnivore" & Regime_alter != "Carnivore_Scaliphage" & Regime_alter != "Herbivore") %.% # régimes mineurs ou trop flous retirés
summarise(Hg_muscle_mean = mean(na.omit(conc_Hg_muscle_ppm)), d15N_mean = mean(na.omit(d15N)), Hg_muscle_se = se(na.omit(conc_Hg_muscle_ppm)),
d15N_se = se(na.omit(d15N)), d13C_se = se(na.omit(d13C)), d13C_mean = mean(na.omit(d13C))) # Sélection des données à calculer
df.reg.muscle <- na.omit(df.reg.muscle)
pl <- ggplot(BDD[!(is.na(BDD$conc_Hg_muscle_ppm)) & !(is.na(BDD$d15N)), ], aes(x = d15N, y = conc_Hg_muscle_ppm)) +
# geom_point(data = df.sp.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, fill = Code), show_guide = FALSE) +
geom_point(data = df.reg.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter), size = 2) +
geom_text(data = df.reg.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter, label = c("1", "2", "3", "4", "5", "6", "7", "8", "9")), hjust = 1.3, vjust = 1.3, size = 6.5) +
geom_errorbarh(data = df.reg.muscle, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_muscle_mean, x = d15N_mean, colour = Regime_alter), height = .025) +
geom_errorbar(data = df.reg.muscle, aes(ymin = Hg_muscle_mean - Hg_muscle_se, ymax = Hg_muscle_mean + Hg_muscle_se, x = d15N_mean, y = Hg_muscle_mean, colour = Regime_alter), width = .05) +
#scale_x_continuous(limits = c(7.5, 11.8)) +
scale_color_manual(name = "Régime trophique",
labels = c("1 : Carnivore Piscivore", "2 : Carnivore Insectivore", "3 : Carnivore Invertivore", "4 : Carnivore Charognard", "5 : Omnivore Invertivore", "6 : Omnivore Herbivore", "7 : Détritivore", "8 : Herbivore Périphytophage","9 : Herbivore Phyllophage"),
values = colo2) +
ylab("[Hg] dans le muscle de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
theme_bw() +
ggtitle(expression(paste("[Hg] dans le muscle de poissons en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-muscle-only_d15N_regime2.pdf", width = 8, height = 6)
print(pl)
dev.off()
# id mais dans sites les plus contaminées uniquement
pl <- ggplot(BDD[!(is.na(BDD$conc_Hg_muscle_ppm)) & !(is.na(BDD$d15N)), ], aes(x = d15N, y = conc_Hg_muscle_ppm)) +
# geom_point(data = df.sp.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, fill = Code), show_guide = FALSE) +
geom_point(data = df.reg.conta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter), size = 4) +
geom_text(data = df.reg.conta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter, label = Regime_alter), hjust=1.02, vjust=-1, size = 6.5) +
geom_errorbarh(data = df.reg.conta, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_muscle_mean, x = d15N_mean, colour = Regime_alter), height = .025) +
geom_errorbar(data = df.reg.conta, aes(ymin = Hg_muscle_mean - Hg_muscle_se, ymax = Hg_muscle_mean + Hg_muscle_se, x = d15N_mean, y = Hg_muscle_mean, colour = Regime_alter), width = .05) +
#scale_color_manual(name = "Régime trophique",
# labels = c("Carnivore Piscivore", "Carnivore Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Herbivore Périphytophage", "Herbivore","Herbivore Phyllophage"),
# values = colo) +
ylab("[Hg] dans le muscle de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
theme_bw() +
ggtitle(expression(paste("[Hg] dans le muscle de poissons en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-muscle-conta-only_d15N_regime.pdf", width = 12, height = 9)
print(pl)
dev.off()
# [Hg] foie pour individus ayant concentrations mesurées dans tous les organes
pl2 <- ggplot( BDD[!(is.na(BDD$conc_Hg_foie_ppm)), ], aes(x = d15N, y = conc_Hg_foie_ppm)) +
# geom_point(aes(color = Regime_alter), alpha = 0.65) +
geom_point(data = df.reg.org, aes(x = d15N_mean, y = Hg_foie_mean, color = Regime_alter), size = 4) +
geom_text(data = df.reg.org, aes(x = d15N_mean, y = Hg_foie_mean, color = Regime_alter, label = c("Piscivore", "Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Périphytophage", "Herbivore","Phyllophage")), hjust=1.02, vjust=-1, size = 6.5) +
geom_errorbarh(data = df.reg.org, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_foie_mean, x = d15N_mean, colour = Regime_alter), height = .05) +
geom_errorbar(data = df.reg.org, aes(ymin = Hg_foie_mean - Hg_foie_se, ymax = Hg_foie_mean + Hg_foie_se, x = d15N_mean, y = Hg_foie_mean, colour = Regime_alter), width = .05) +
scale_color_manual(name = "Régime trophique",
labels = c("Carnivore Piscivore", "Carnivore Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Herbivore Périphytophage", "Herbivore","Herbivore Phyllophage"),
values = colo) +
ylab("[Hg] dans le foie de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
ggtitle(expression(paste("[Hg] dans le foie de poissons en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-foie_d15N_regime.pdf", width = 12, height = 9)
print(pl2)
dev.off()
# [Hg] branchie pour individus ayant concentrations mesurées dans tous les organes
pl3 <- ggplot( BDD[!(is.na(BDD$conc_Hg_branchie_ppm)), ], aes(x = d15N, y = conc_Hg_branchie_ppm)) +
# geom_point(aes(color = Regime_alter), alpha = 0.65) +
geom_point(data = df.reg.org, aes(x = d15N_mean, y = Hg_branchie_mean, color = Regime_alter), size = 4) +
geom_text(data = df.reg.org, aes(x = d15N_mean, y = Hg_branchie_mean, color = Regime_alter, label = c("Piscivore", "Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Périphytophage", "Herbivore","Phyllophage")), hjust=1.02, vjust=-1, size = 6.5) +
geom_errorbarh(data = df.reg.org, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_branchie_mean, x = d15N_mean, colour = Regime_alter), height = .01) +
geom_errorbar(data = df.reg.org, aes(ymin = Hg_branchie_mean - Hg_branchie_se, ymax = Hg_branchie_mean + Hg_branchie_se, x = d15N_mean, y = Hg_branchie_mean, colour = Regime_alter), width = .05) +
scale_color_manual(name = "Régime trophique",
labels = c("Carnivore Piscivore", "Carnivore Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Herbivore Périphytophage", "Herbivore","Herbivore Phyllophage"),
values = colo) +
ylab("[Hg] dans les branchies de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
ggtitle(expression(paste("[Hg] dans les branchies de poissons en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-branchies_d15N_regime.pdf", width = 12, height = 9)
print(pl3)
dev.off()
# Association des graphiques
ggplot(df, aes(x = Regime_alter, y = value, color = variable)) + geom_point() # Muscle = organe le plus concentré sauf chez Carnivores indéfinis
legend <- g_legend(pl1)
grid.arrange(pl1, pl2, pl3, ncol = 1, nrow = 3) # Basic
grid.arrange(arrangeGrob(pl1 + theme(legend.position="none"),
pl2 + theme(legend.position="none"),
pl3 + theme(legend.position="none"),
ncol = 1),
legend, ncol = 2, nrow = 1, widths = c(9, 1), heights = c(1, 1))
pdf("Graph/Hg_isotopie/Hg-d15N_regime.pdf", width = 20, height = 15) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(grid.arrange(arrangeGrob(pl1 + theme(legend.position="none"),
pl2 + theme(legend.position="none"),
pl3 + theme(legend.position="none"),
ncol = 1),
legend, ncol = 2, nrow = 1, widths = c(9, 1), heights = c(1, 1)))
dev.off()
###### Organotropisme : rapport des concentrations en Hg pour les différents organes
ggplot(df.ratio.conc, aes(x = value, y = Regime_alter)) +
geom_segment(aes(yend = Regime_alter), xend=0, colour="grey50") +
geom_point(size=3, aes(colour = variable, shape = variable)) +
theme_bw() +
scale_shape_discrete(guide = FALSE) +
scale_color_discrete(name = "Rapport de \nconcentrations", labels =
c("[muscle]/[foie]", "[muscle]/[branchie]", "[foie]/[branchie]" )) +
scale_y_discrete(labels = c("Carnivore \n Piscivore", "Carnivore \n Invertivore", "Omnivore \n Invertivore", "Omnivore \n Herbivore", "Herbivore \n Périphytophage")) +
labs( x = "Rapport des concentrations moyennes mesurées dans les organes", y = "Régime trophique") +
theme(panel.grid.major.y = element_blank())
# autre version avec boxplot pour améliorer visibilité
ggplot(df.sp.ratio.melt, aes(x = Regime_alt, y = value, color = variable)) +
geom_boxplot() +
scale_x_discrete(limits =c("Carnivore_Piscivore", "Carnivore_Invertivore", "Omnivore_Invertivore", "Omnivore_Herbivore", "Herbivore_Periphytophage"), labels = c("Carnivore \n Piscivore", "Carnivore \n Invertivore", "Omnivore \n Invertivore", "Omnivore \n Herbivore", "Herbivore \n Périphytophage"))
organotropism <- ggplot(df.sp.ratio.melt, aes(color = Regime_alt, y = value, x = variable)) +
geom_boxplot() +
scale_color_discrete(name = "Régime trophique", labels =
c("Carnivore Piscivore,\nn = 30", "Carnivore Invertivore, \nn = 81", "Omnivore Invertivore, \nn = 162", "Omnivore Herbivore, \nn = 10", "Herbivore Périphytophage, \nn = 10")) +
scale_x_discrete(name = "Ratios", labels =
c("[muscle]/[foie]", "[muscle]/[branchie]" )) +
ylab("Rapport des concentrations de mercure\nmesurées dans les organes de poissons") +
theme_bw()
df.krusk <- filter(df.sp.ratio, Regime_alt != "Carnivore" & Regime_alt != "Carnivore_Insectivore" & Regime_alt != "Herbivore" & Regime_alt != "Detritivore" & Regime_alt != "Herbivore_Phyllophage") # simplification de la BDD pour faire comparaison ruskal
kruskal.test(muscle.foie ~ Regime_alt, data = df.krusk)
comparison.foie <- kruskal(df.krusk$muscle.foie, df.krusk$Regime_alt, alpha = 0.05, p.adj = "holm")
kruskal.test(muscle.branchie ~ Regime_alt, data = df.krusk)
comparison.branchie <- kruskal(df.krusk$muscle.branchie, df.krusk$Regime_alt, alpha = 0.05, p.adj = "holm")
pdf("Graph/Hg_isotopie/organotropisme.pdf", width = 8, height = 5)
print(organotropism)
dev.off()
#### ACP pour détailler l'organotropisme
res.pca <- PCA(df.sp.ratio, ncp = 3, scale.unit = TRUE,) # quali.sup = 5,
plotellipses(res.pca,habillage = 5)
res.hcpc <- HCPC(res.pca, method = "ward", nb.clust = 5) # Création des groupes automatique, si besoin précision ac argument nb.clust
res.hcpc$desc.var$test.chi2 # Variables qui caractérisent le mieux la séparation entre les groupes
res.hcpc$desc.var$category # Pr chq cluster, informations sur sa composition
res.hcpc$desc.ind # indiv caractéristiques de chaque groupe & indiv de chq les plus éloignés des autres groupes
classif = agnes(res.pca$ind$coord,method="ward")
plot(classif,main="Dendrogram",ask=F,which.plots=2,labels=FALSE)
Bd <- res.pca$ind$coord[, 1:3]
# Ward Hierarchical Clustering
d <- dist(Bd, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward")
plot(fit) # display dendogram
groups <- cutree(fit, k=5) # cut tree into 5 clusters
# draw dendogram with red borders around the 5 clusters
rect.hclust(fit, k=5, border="red")
#### MCA pour détailler organotropisme
Bd <- filter(df.sp.ratio, Regime_alt != "Carnivore" & Regime_alt != "Carnivore_Insectivore" & Regime_alt != "Herbivore" & Regime_alt != "Detritivore" & Regime_alt != "Herbivore_Phyllophage")
Bd$muscle.foie <- quantcut(Bd[,'muscle.foie'], q = seq(0, 1, by = 0.2))
Bd$muscle.foie <- as.factor(Bd$muscle.foie)
Bd$muscle.branchie <- quantcut(Bd[,'muscle.branchie'], q = seq(0, 1, by = 0.2))
Bd$muscle.branchie <- as.factor(Bd$muscle.branchie)
Bd$foie.branchie <- quantcut(Bd[,'foie.branchie'], q = seq(0, 1, by = 0.2))
Bd$foie.branchie <- as.factor(Bd$foie.branchie)
# cats <- NULL
cats <- apply(Bd, 2, function(x) nlevels(as.factor(x)))
mca1 <- MCA(Bd)
# mca1_vars_df <- NULL
mca1_vars_df <- data.frame(mca1$var$coord, Variable = rep(names(cats), cats))
#(rownames(mca1_vars_df[1,])) <- "Chien contaminée"
# data frame with observation coordinates
# mca1_obs_df <- NULL
mca1_obs_df <- data.frame(mca1$ind$coord)
# MCA plot of observations and categories
p <- ggplot(data = mca1_obs_df, aes(x = Dim.1, y = Dim.2)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(colour = "gray50", alpha = 0.7) +
geom_density2d(colour = "gray80") +
geom_text(data = mca1_vars_df,
aes(x = Dim.1, y = Dim.2,
label = rownames(mca1_vars_df), colour = Variable), size = 6.5) +
ggtitle("Analyse des correspondances multiples : organotropisme") +
xlab("Dimension 1. 12,8 % de variance expliquée") +
ylab("Dimension 2. 9,6 % de variance expliquée") +
theme_bw()
#p + scale_colour_discrete(name = "Variable", label = c("Intervalle [Hg] branchie en mg/kg ps", "Intervalle [Hg] foie en mg/kg ps", "Intervalle [Hg] muscle en mg/kg ps", 'Intervalle de d15N', "Régime trophique"))
res.hcpc = HCPC(mca1, nb.clust = 5, graph = FALSE, method = "ward")
res.hcpc$desc.var$test.chi2 # Variables qui caractérisent le mieux la séparation entre les groupes
res.hcpc$desc.var$category # Pr chq cluster, informations sur sa composition
res.hcpc$desc.ind # indiv caractéristiques de chaque groupe & indiv de chq les plus éloignés des autres groupes
### MCA pour vue d'ensemble
# Sélection de toute la BDD
Bd <- select(BDD, d15N, d13C, Regime_alter, conc_Hg_muscle_ppm, conc_Hg_foie_ppm, conc_Hg_branchie_ppm)
Bd <- Bd[Bd$Regime_alter %in% c("Carnivore_Piscivore", "Carnivore_Insectivore", "Carnivore_Invertivore", "Carnivore", "Omnivore_Invertivore", "Omnivore_Herbivore", "Herbivore_Periphytophage", "Herbivore","Herbivore_Phyllophage") ,]
levels(Bd$Regime_alter) <- sub("^Carnivore_Piscivore$", "Piscivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Carnivore_Insectivore$", "Carnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Carnivore_Invertivore$", "Carnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Omnivore_Herbivore$", "Omnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Omnivore_Invertivore$", "Omnivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Herbivore_Periphytophage$", "Herbivore", levels(Bd$Regime_alter))
levels(Bd$Regime_alter) <- sub("^Herbivore_Phyllophage$", "Herbivore", levels(Bd$Regime_alter))
Bd$Regime_alter <- droplevels(Bd$Regime_alter) # Drop unused levels
# Sélection uniquement des valeurs de [Hg] communes à ts les organes
Bd <- na.omit(Bd)
Bd$Hg_muscle <- quantcut(Bd[,'conc_Hg_muscle_ppm'], q = seq(0, 1, by = 0.2))
Bd$Hg_muscle <- as.factor(Bd$Hg_muscle)
Bd$Hg_foie <- quantcut(Bd[,'conc_Hg_foie_ppm'], q = seq(0, 1, by = 0.2))
Bd$Hg_foie <- as.factor(Bd$Hg_foie)
Bd$Hg_branchie <- quantcut(Bd[,'conc_Hg_branchie_ppm'], q = seq(0, 1, by = 0.2))
Bd$Hg_branchie <- as.factor(Bd$Hg_branchie)
Bd$N <- quantcut(Bd[,'d15N'], q = seq(0, 1, by = 0.2))
Bd$N <- as.factor(Bd$N)
Bd$C <- quantcut(Bd[,'d13C'], q = seq(0, 1, by = 0.2))
Bd$C <- as.factor(Bd$C)
Bd2 <- Bd[,c(-4:-6, -1:-2, -11)]
# cats <- NULL
cats <- apply(Bd2, 2, function(x) nlevels(as.factor(x)))
mca1 <- MCA(Bd2)
# mca1_vars_df <- NULL
mca1_vars_df <- data.frame(mca1$var$coord, Variable = rep(names(cats), cats))
#(rownames(mca1_vars_df[1,])) <- "Chien contaminée"
# data frame with observation coordinates
# mca1_obs_df <- NULL
mca1_obs_df <- data.frame(mca1$ind$coord)
# MCA plot of observations and categories
p <- ggplot(data = mca1_obs_df, aes(x = Dim.1, y = Dim.2)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(colour = "gray50", alpha = 0.7) +
geom_density2d(colour = "gray80") +
geom_text(data = mca1_vars_df,
aes(x = Dim.1, y = Dim.2,
label = rownames(mca1_vars_df), colour = Variable), size = 6.5) +
ggtitle("Analyse des correspondances multiples : contamination mercurielle") +
xlab("Dimension 1. 16,5 % de variance expliquée") +
ylab("Dimension 2. 9,7 % de variance expliquée") +
theme_bw()
p + scale_colour_discrete(name = "Variable", label = c("Intervalle [Hg] branchie en mg/kg ps", "Intervalle [Hg] foie en mg/kg ps", "Intervalle [Hg] muscle en mg/kg ps", 'Intervalle de d15N', "Régime trophique"))
#0000000000000#
### Contamination en Hg selon régime alimentaire sur Chien, 3 sauts et Nouvelle France
means <- aggregate(conc_Hg_muscle_ppm ~ Groupe_station, sub_BDD, mean)
means$Se_ppm <- round(means$Se_ppm, digits = 2)
kruskal.test(conc_Hg_muscle_ppm ~ Groupe_station, data = sub_BDD) # Il existe des differences significatives
comparison <- kruskal(sub_BDD$conc_Hg_muscle_ppm, sub_BDD$Groupe_station, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(sub_BDD, aes(x = Groupe_station , y = conc_Hg_muscle_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE)
lettpos <- function(sub_BDD) boxplot(sub_BDD$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(sub_BDD, .(Groupe_station), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Groupe_station", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = 0, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + geom_text(aes(Groupe_station, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
stat_summary(fun.data = n_fun, geom = "text") +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
labs( y = "[Hg] dans les muscles de poissons, en mg/kg de poids sec",
x = "Groupes de stations", title = "[Hg] dans les muscles poissons selon groupes de stations") +
geom_hline(aes(yintercept = 2.5), color = "red") + theme_bw()
pdf("Graph/Hg-muscle_groupe_stations.pdf", width = 9, height = 5)
print(p0)
dev.off()
# NF : comparaison entre site 3 et 4
p0 <- ggplot(sub_BDD, aes(x = Code_Station , y = conc_Hg_muscle_ppm)) +
geom_boxplot()
lettpos <- function(sub_BDD) boxplot(sub_BDD$conc_Hg_muscle_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(sub_BDD, .(Code_Station), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Code_Station", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = 0, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
p0 <- p0 + stat_summary(fun.data = n_fun, geom = "text") +
scale_x_discrete(limits = c("NFS3", "NFS4"), labels = c("Site 3", "Site 4")) +
labs( y = "[Hg] (mg/kg ps)") +
geom_hline(aes(yintercept = 2.5), color = "red") + theme_bw()
### Contamination en Hg selon régime alimentaire et d15N sur Chien, 3 sauts et Nouvelle France
# 3 Sauts : pas d'isotopie réalisée ; pas de graphique
# Crique Chien
## Non contaminée
pCnC <- ggplot(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)), ], aes(x = d15N, y = conc_Hg_muscle_ppm)) +
# geom_point(data = df.sp.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, fill = Code), show_guide = FALSE) +
geom_point(data = df.chien.nonconta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter), size = 4) +
geom_text(data = df.chien.nonconta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter, label = c("Piscivore", "Insectivore", "Carnivore Invertivore", "Charognard", "Omnivore Invertivore", "Détritivore", "Périphytophage")), hjust= 0, vjust=-1, size = 6.5) +
geom_errorbarh(data = df.chien.nonconta, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_muscle_mean, x = d15N_mean, colour = Regime_alter), height = .025) +
geom_errorbar(data = df.chien.nonconta, aes(ymin = Hg_muscle_mean - Hg_muscle_se, ymax = Hg_muscle_mean + Hg_muscle_se, x = d15N_mean, y = Hg_muscle_mean, colour = Regime_alter), width = .05) +
scale_color_manual(name = "Régime trophique",
labels = c("Carnivore Piscivore", "Carnivore Insectivore", "Carnivore Invertivore", "Carnivore Charognard", "Omnivore Invertivore", "Détritivore", "Herbivore Périphytophage"),
values = c( "#F8766D", "#D89000", "#A3A500", "#FF62BC", "#E76BF3", "#00B0F6", "#39B600", "#00BFC4", "#00BF7D")) +
ylab("[Hg] dans le muscle de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
ggtitle(expression(paste("[Hg] dans le muscle des poissons de la zone non contaminée de Crique Chien en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-muscle_d15N_regime_Chien-nonconta.pdf", width = 16.5, height = 9)
print(pCnC)
dev.off()
# sort(table(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)) & !(is.na(BDD_PME$d15N)) & BDD_PME$Groupe_station %in% "Chien_non_conta", ]$Regime_alter),decreasing=TRUE)
## Contaminée
pCC <- ggplot(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)), ], aes(x = d15N, y = conc_Hg_muscle_ppm)) +
# geom_point(data = df.sp.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, fill = Code), show_guide = FALSE) +
geom_point(data = df.chien.conta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter), size = 4) +
geom_text(data = df.chien.conta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter, label = c("Piscivore", "Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Périphytophage")), hjust=1.02, vjust=-1, size = 6.5) +
geom_errorbarh(data = df.chien.conta, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_muscle_mean, x = d15N_mean, colour = Regime_alter), height = .025) +
geom_errorbar(data = df.chien.conta, aes(ymin = Hg_muscle_mean - Hg_muscle_se, ymax = Hg_muscle_mean + Hg_muscle_se, x = d15N_mean, y = Hg_muscle_mean, colour = Regime_alter), width = .05) +
scale_color_manual(name = "Régime trophique",
labels = c("Carnivore Piscivore", "Carnivore Insectivore", "Carnivore Invertivore", "Carnivore", "Omnivore Invertivore", "Omnivore Herbivore", "Détritivore", "Herbivore Périphytophage"),
values = c( "#F8766D", "#D89000", "#A3A500", "#FF62BC", "#E76BF3", "#9590FF", "#00B0F6", "#39B600", "#00BFC4", "#00BF7D")) +
ylab("[Hg] dans le muscle de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
ggtitle(expression(paste("[Hg] dans le muscle des poissons de la zone contaminée de Crique Chien en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-muscle_d15N_regime_Chien-conta.pdf", width = 13, height = 9)
print(pCC)
dev.off()
# sort(table(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)) & !(is.na(BDD_PME$d15N)) & BDD_PME$Groupe_station %in% "Chien_conta", ]$Regime_alter),decreasing=TRUE)
#0000000000000#
# Crique Nouvelle France
## Non Contaminée
pNFnC <- ggplot(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)), ], aes(x = d15N, y = conc_Hg_muscle_ppm)) +
# geom_point(data = df.sp.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, fill = Code), show_guide = FALSE) +
geom_point(data = df.NF.nonconta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter), size = 4) +
geom_text(data = df.NF.nonconta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter, label = c("Piscivore", "Insectivore", "Carnivore Invertivore", "Scaliphage", "Charognard", "Omnivore Invertivore", "Périphytophage", "Herbivore","Phyllophage")), hjust=1.02, vjust=-1, size = 6.5) +
geom_errorbarh(data = df.NF.nonconta, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_muscle_mean, x = d15N_mean, colour = Regime_alter), height = .025) +
geom_errorbar(data = df.NF.nonconta, aes(ymin = Hg_muscle_mean - Hg_muscle_se, ymax = Hg_muscle_mean + Hg_muscle_se, x = d15N_mean, y = Hg_muscle_mean, colour = Regime_alter), width = .05) +
scale_color_manual(name = "Régime trophique",
labels = c("Carnivore Piscivore", "Carnivore Insectivore", "Carnivore Invertivore", "Carnivore Scaliphage", "Carnivore Charognard", "Omnivore Invertivore", "Herbivore Périphytophage", "Herbivore","Herbivore Phyllophage"),
values = c( "#F8766D", "#D89000", "#A3A500", "#FF62BC", "#9590FF", "#E76BF3", "#39B600", "#00BFC4", "#00BF7D")) +
ylab("[Hg] dans le muscle de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
ggtitle(expression(paste("[Hg] dans le muscle des poissons de la zone non contaminée de Crique Nouvelle France en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-muscle_d15N_regime_NF-nonconta.pdf", width = 14, height = 9)
print(pNFnC)
dev.off()
# sort(table(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)) & !(is.na(BDD_PME$d15N)) & BDD_PME$Groupe_station %in% "NF_non_conta", ]$Regime_alter),decreasing=TRUE)
## Contaminée
pNFC <- ggplot(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)), ], aes(x = d15N, y = conc_Hg_muscle_ppm)) +
# geom_point(data = df.sp.muscle, aes(x = d15N_mean, y = Hg_muscle_mean, fill = Code), show_guide = FALSE) +
geom_point(data = df.NF.conta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter), size = 4) +
geom_text(data = df.NF.conta, aes(x = d15N_mean, y = Hg_muscle_mean, color = Regime_alter, label = c("Piscivore", "Carnivore Invertivore", "Omnivore Invertivore", "Périphytophage")), hjust= 0, vjust=-1, size = 6.5) +
geom_errorbarh(data = df.NF.conta, aes(xmin = d15N_mean + d15N_se, xmax = d15N_mean - d15N_se, y = Hg_muscle_mean, x = d15N_mean, colour = Regime_alter), height = .025) +
geom_errorbar(data = df.NF.conta, aes(ymin = Hg_muscle_mean - Hg_muscle_se, ymax = Hg_muscle_mean + Hg_muscle_se, x = d15N_mean, y = Hg_muscle_mean, colour = Regime_alter), width = .05) +
scale_color_manual(name = "Régime trophique",
labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore Invertivore", "Herbivore Périphytophage"),
values = c("#F8766D", "#A3A500", "#E76BF3", "#39B600")) +
ylab("[Hg] dans le muscle de poissons, en mg/kg de poids sec") +
xlab(expression(paste(delta^{15},'N'))) +
ggtitle(expression(paste("[Hg] dans le muscle des poissons de la zone contaminée de Crique Nouvelle France en fonction de ", delta^{15},"N selon les régimes trophiques")))
pdf("Graph/Hg_isotopie/Hg-muscle_d15N_regime_NF-conta.pdf", width = 13.8, height = 9)
print(pNFC)
dev.off()
# sort(table(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)) & !(is.na(BDD_PME$d15N)) & BDD_PME$Groupe_station %in% "NF_conta", ]$Regime_alter),decreasing=TRUE)
# Informations sur le nombre d'individus dans chaque condition
ggplot(BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)) & !(is.na(BDD_PME$d15N)),], aes(x = Regime_alter, y = conc_Hg_muscle_ppm)) +
geom_point(position="jitter") + facet_wrap(~ Groupe_station)
View(ftable(xtabs(~ Groupe_station + Regime_alter, data = BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)) & !(is.na(BDD_PME$d15N)),])))
ftable(xtabs(~ Groupe_station + Regime_principal, data = BDD_PME[!(is.na(BDD_PME$conc_Hg_muscle_ppm)) & !(is.na(BDD_PME$d15N)),]))
######################0000000000000########################
# Analyse des éléments traces : Camopi, Nouvelle france et 3 Sauts
# sub_BDD
### ACP
# As enlevé car il y a définitivement un problème avec les [c] initiales
# Ensuite, problème avec Se car aucun échantillon de Trois Sauts n'a de valeur
df.trace.Se <- sub_BDD_PME[,c(53:57, 59:62)]
df.trace.sansSe <- sub_BDD_PME[,c(53:57, 60:62)]
# en cas d'imputation, le Se se détache clairement. Mais vu qu'une hypothèse
# Aurait pu être un lien entre Se et Hg et que l'imputation ne peut pas en tenir compte, est ce vraiment pertinent de la réaliser ?
# Toutes données sans Se
res.pca <- PCA(df.trace.sansSe, scale.unit=TRUE)
nb <- estim_ncpPCA(df.trace.sansSe, ncp.min = 0, ncp.max = 5)
res.impute <- imputePCA(df.trace.sansSe, ncp = 2)
res.acp <- PCA (res.impute$completeObs)
## Matrice corrélation de l'ACP
#mcor <- cor(res.impute$completeObs)
#res1 <- cor.mtest(res.impute$completeObs, 0.95)
#corrplot(mcor, p.mat = res1[[1]], sig.level = 0.05)
#corrplot(mcor, p.mat = res1[[1]], sig.level = 0.05, insig = "pch", method="shade", shade.col=NA, tl.col="black", tl.srt=45, addCoef.col="black", addcolorlabel="no", order="FPC")
##### Graph sur l'ensemble des sites
# Now extract variables
#
vPC1 <- res.pca$var$coord[,1]
vPC2 <- res.pca$var$coord[,2]
vlabs <- rownames(res.pca$var$coord)
vPCs <- data.frame(cbind(vPC1,vPC2))
rownames(vPCs) <- vlabs
colnames(vPCs) <- colnames(PCs)
#
# and plot them
#
pv <- ggplot() + theme(aspect.ratio=1) + theme_bw(base_size = 20)
# no data so there's nothing to plot
# put a faint circle there, as is customary
angle <- seq(-pi, pi, length = 50)
df <- data.frame(x = sin(angle), y = cos(angle))
pv <- pv + geom_path(aes(x, y), data = df, colour="grey70")
#
# add on arrows and variable labels
pv <- pv + geom_text(data=vPCs, aes(x=vPC1,y=vPC2,label= c("Cr", "Co", "Ni", "Cu", "Zn", "Cd", "Pb", "Hg")), size=6) +
xlab("Composante principale 1. 18,4% de variance expliquée") + ylab("Composante principale 2. 16,3% de variance expliquée") +
ggtitle("ACP sur les éléments traces : ensemble des sites") +
geom_hline(yintercept = 0, colour = "gray65") +
geom_vline(xintercept = 0, colour = "gray65")
pv <- pv + geom_segment(data=vPCs, aes(x = 0, y = 0, xend = vPC1*0.9, yend = vPC2*0.9), arrow = arrow(length = unit(1/2, 'picas')), color = "grey30")
pdf("Graph/Elements_traces/PCA_ts-sites.pdf", width = 10, height = 10) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(pv)
dev.off()
# res.MI <- MIPCA(df.elt.trace, scale = TRUE, ncp = 2)
# plot(res.MI) # problème de mémoire ?
# Données sans 3 Sauts mais avec Se
df <- sub_BDD_PME[,c(53:57, 59:62)]
df.sans3sauts <- df[!(is.na(df[,'Se_ppm'])),]
res.impute <- imputePCA(df.sans3sauts, ncp = 2)
res.acp <- PCA(df.sans3sauts)
## Matrice corrélation de l'ACP
# mcor <- cor(res.impute$completeObs)
# res1 <- cor.mtest(res.impute$completeObs, 0.95)
# corrplot(mcor, p.mat = res1[[1]], sig.level = 0.05)
# corrplot(mcor, p.mat = res1[[1]], sig.level = 0.05, insig = "pch", method="shade", shade.col=NA, tl.col="black", tl.srt=45, addCoef.col="black", addcolorlabel="no", order="FPC")
# Now extract variables
#
vPC1 <- res.acp$var$coord[,1]
vPC2 <- res.acp$var$coord[,2]
vlabs <- rownames(res.acp$var$coord)
vPCs <- data.frame(cbind(vPC1,vPC2))
rownames(vPCs) <- vlabs
colnames(vPCs) <- colnames(PCs)
#
# and plot them
#
pv <- ggplot() + theme(aspect.ratio=1) + theme_bw(base_size = 20)
# no data so there's nothing to plot
# put a faint circle there, as is customary
angle <- seq(-pi, pi, length = 50)
df <- data.frame(x = sin(angle), y = cos(angle))
pv <- pv + geom_path(aes(x, y), data = df, colour="grey70")
#
# add on arrows and variable labels
pv <- pv + geom_text(data=vPCs, aes(x=vPC1,y=vPC2,label= c("Cr", "Co", "Ni", "Cu", "Zn", "Se", "Cd", "Pb", "Hg")), size=6) +
xlab("Composante principale 1. 19,5% de variance expliquée") + ylab("Composante principale 2. 16,1% de variance expliquée") +
ggtitle("ACP sur les éléments traces : Camopi et Saül") +
geom_hline(yintercept = 0, colour = "gray65") +
geom_vline(xintercept = 0, colour = "gray65")
pv <- pv + geom_segment(data=vPCs, aes(x = 0, y = 0, xend = vPC1*0.9, yend = vPC2*0.9), arrow = arrow(length = unit(1/2, 'picas')), color = "grey30")
pdf("Graph/Elements_traces/PCA_Saul-Camopi.pdf", width = 10, height = 10) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(pv)
dev.off()
#0000000000000#
## Se
# ggplot(sub_BDD_PME2, aes(x = Groupe_station, y = Se_ppm, color = Regime_principal)) +
# geom_boxplot() +
# scale_x_discrete(limits = c("Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
# labels = c("Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
# scale_color_discrete(name = "Régime trophique",
# labels = c("Carnivore", "Omnivore", "Herbivore")) +
# ylab("[Se] dans le muscle de poissons, en mg/kg de poids sec") +
# xlab("Groupe de stations") +
# ggtitle(expression(paste("[Se] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
Se <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = Se_ppm, color = Regime_alter)) +
geom_boxplot() +
scale_x_discrete(limits = c("Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
labels = c("Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[Se] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[Se] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/Se.pdf", width = 12, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Se)
dev.off()
BD <- sub_BDD_PME
means <- aggregate(Se_ppm ~ Groupe_station, BD, mean)
means$Se_ppm <- round(means$Se_ppm, digits = 2)
kruskal.test(Se_ppm ~ Groupe_station, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$Se_ppm, BD$Groupe_station, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD, aes(x = Groupe_station , y = Se_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means, aes(label = Se_ppm, y = Se_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$Se_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Groupe_station), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Groupe_station", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = 0, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
Se <- p0 + geom_text(aes(Groupe_station, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
theme_bw() +
stat_summary(fun.data = n_fun, geom = "text") +
scale_x_discrete(limits = c("Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
labels = c("Chien non \ncontaminée", "Chien \ncontaminée", "Nouvelle France \nnon contaminée", "Nouvelle France \ncontaminée")) +
labs( y = "[Se] dans le muscle de poissons, en mg/kg de poids sec", x = "Groupes de stations")
pdf("Graph/Elements_traces/Se_stations.pdf", width = 5, height = 5) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Se)
dev.off()
dcast(sub_BDD_PME4, Groupe_station ~ Regime_alter, length)
## Ni
# ggplot(sub_BDD_PME, aes(x = Groupe_station, y = Ni_ppm, color = Regime_principal)) +
# geom_boxplot() +
# scale_x_discrete(limits = c("Trois_Sauts", "Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
# labels = c("Trois Sauts", "Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
# scale_color_discrete(name = "Régime trophique",
# labels = c("Carnivore", "Omnivore", "Détritivore", "Herbivore")) +
# ylab("[Ni] dans le muscle de poissons, en mg/kg de poids sec") +
# xlab("Groupe de stations") +
# ggtitle(expression(paste("[Ni] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
Ni <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = Ni_ppm, color = Regime_alter)) +
geom_boxplot() +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[Ni] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[Ni] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/Ni.pdf", width = 13, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Ni)
dev.off()
BD <- sub_BDD_PME
means <- aggregate(Ni_ppm ~ Site, BD, mean)
means$Ni_ppm <- round(means$Ni_ppm, digits = 2)
kruskal.test(Ni_ppm ~ Site, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$Ni_ppm, BD$Site, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD, aes(x = Site , y = Ni_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means, aes(label = Ni_ppm, y = Ni_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$Ni_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Site), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Site", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = -0.3, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
Ni <- p0 + geom_text(aes(Site, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = c("Trois_Sauts", "Camopi", "Saul"),
labels = c("Trois Sauts", "Crique Chien", "Crique \nNouvelle France")) +
stat_summary(fun.data = n_fun, geom = "text") +
theme_bw() +
labs( y = "[Ni] dans le muscle de poissons, en mg/kg de poids sec", x = "Site")
pdf("Graph/Elements_traces/Ni_stations2.pdf", width = 5, height = 5) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Ni)
dev.off()
### MCA sur Ni
Bd <- select(sub_BDD_PME4, Groupe_station, Regime_alter, Ni_ppm)
elt.trace('Ni_ppm')
MCA_Ni <- p + ggtitle("Analyse des correspondances multiples : Ni") +
xlab("Dimension 1. 17,3 % de variance expliquée") +
ylab("Dimension 2. 14,8 % de variance expliquée") +
scale_colour_discrete(name = "Variable", label = c("Intervalle [Ni] en mg/kg ps", "Groupe de stations", "Régime trophique"))
#+ geom_text(label = c ("Chien contaminée", "Chien non contaminée", "NF contaminée", "NF non contaminée", "Trois Sauts", "Carnivore Piscivore", "Carnivore Invertivore", rownames(mca1_vars_df[8:14, ])))
pdf("Graph/Elements_traces/MCA_Ni.pdf", width = 14, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(MCA_Ni)
dev.off()
## Cu
# ggplot(sub_BDD_PME, aes(x = Groupe_station, y = Cu_ppm, color = Regime_principal)) +
# geom_boxplot() +
# scale_x_discrete(limits = c("Trois_Sauts", "Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
# labels = c("Trois Sauts", "Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
# scale_color_discrete(name = "Régime trophique",
# labels = c("Carnivore", "Omnivore", "Détritivore", "Herbivore")) +
# ylab("[Cu] dans le muscle de poissons, en mg/kg de poids sec") +
# xlab("Groupe de stations") +
# ggtitle(expression(paste("[Cu] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
Cu <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = Cu_ppm, color = Regime_alter)) +
geom_boxplot() +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[Cu] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[Cu] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/Cu.pdf", width = 13, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Cu)
dev.off()
BD <- sub_BDD_PME
means <- aggregate(Cu_ppm ~ Groupe_station, BD, mean)
means$Cu_ppm <- round(means$Cu_ppm, digits = 2)
kruskal.test(Cu_ppm ~ Groupe_station, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$Cu_ppm, BD$Groupe_station, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD, aes(x = Groupe_station , y = Cu_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means, aes(label = Cu_ppm, y = Cu_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$Cu_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Groupe_station), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Groupe_station", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
Cu <- p0 + geom_text(aes(Groupe_station, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
theme_bw() +
labs( y = "[Cu] dans le muscle de poissons, en mg/kg de poids sec", x = "Site", title = "[Cu] dans le muscle de poissons en fonction des groupes de stations")
pdf("Graph/Elements_traces/Cu_stations.pdf", width = 11, height = 7) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Cu)
dev.off()
## MCA
Bd <- select(sub_BDD_PME4, Groupe_station, Regime_alter, Cu_ppm)
elt.trace('Cu_ppm')
MCA_Cu <- p + ggtitle("Analyse des correspondances multiples : Cu") +
xlab("Dimension 1. 16,3 % de variance expliquée") +
ylab("Dimension 2. 15,6 % de variance expliquée") +
scale_colour_discrete(name = "Variable", label = c("Intervalle [Cu] en mg/kg ps", "Groupe de stations", "Régime trophique"))
#+ geom_text(label = c ("Chien contaminée", "Chien non contaminée", "NF contaminée", "NF non contaminée", "Trois Sauts", "Carnivore Piscivore", "Carnivore Invertivore", rownames(mca1_vars_df[8:14, ])))
pdf("Graph/Elements_traces/MCA_Cu.pdf", width = 14, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(MCA_Cu)
dev.off()
## Zn
# ggplot(sub_BDD_PME, aes(x = Groupe_station, y = Zn_ppm, color = Regime_principal)) +
# geom_boxplot() +
# scale_x_discrete(limits = c("Trois_Sauts", "Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
# labels = c("Trois Sauts", "Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
# scale_color_discrete(name = "Régime trophique",
# labels = c("Carnivore", "Omnivore", "Détritivore", "Herbivore")) +
# ylab("[Zn] dans le muscle de poissons, en mg/kg de poids sec") +
# xlab("Groupe de stations") +
# ggtitle(expression(paste("[Zn] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
Zn <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = Zn_ppm, color = Regime_alter)) +
geom_boxplot() +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[Zn] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[Zn] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/Zn.pdf", width = 13, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Zn)
dev.off()
BD <- sub_BDD_PME
means <- aggregate(Zn_ppm ~ Groupe_station, BD, mean)
means$Zn_ppm <- round(means$Zn_ppm, digits = 2)
kruskal.test(Zn_ppm ~ Groupe_station, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$Zn_ppm, BD$Groupe_station, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD, aes(x = Groupe_station , y = Zn_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means, aes(label = Zn_ppm, y = Zn_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$Zn_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Groupe_station), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Groupe_station", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
Zn <- p0 + geom_text(aes(Groupe_station, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
theme_bw() +
labs( y = "[Zn] dans le muscle de poissons, en mg/kg de poids sec", x = "Site", title = "[Zn] dans le muscle de poissons en fonction des groupes de stations")
pdf("Graph/Elements_traces/Zn_stations.pdf", width = 11, height = 7) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Zn)
dev.off()
## MCA
Bd <- select(sub_BDD_PME4, Groupe_station, Regime_alter, Zn_ppm)
elt.trace('Zn_ppm')
MCA_Zn <- p + ggtitle("Analyse des correspondances multiples : Zn") +
xlab("Dimension 1. 13,6 % de variance expliquée") +
ylab("Dimension 2. 12 % de variance expliquée") +
scale_colour_discrete(name = "Variable", label = c("Intervalle [Zn] en mg/kg ps", "Groupe de stations", "Régime trophique"))
pdf("Graph/Elements_traces/MCA_Zn.pdf", width = 14, height = 9)
print(MCA_Zn)
dev.off()
########
Bd$elt_qual <- quantcut(Bd[,'Zn_ppm'], q = seq(0, 1, by = 0.2))
Bd$elt_qual <- as.factor(Bd$elt_qual)
Bd2 <- Bd[,- 3]
# cats <- NULL
cats <- apply(Bd2, 2, function(x) nlevels(as.factor(x)))
mca1 <- MCA(Bd2)
#mca1_vars_df <- NULL
mca1_vars_df <<- data.frame(mca1$var$coord, Variable = rep(names(cats), cats))
# data frame with observation coordinates
# mca1_obs_df <- NULL
mca1_obs_df <<- data.frame(mca1$ind$coord)
# MCA plot of observations and categories
ggplot(data = mca1_obs_df, aes(x = Dim.1, y = Dim.2)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(colour = "gray50", alpha = 0.7) +
geom_density2d(colour = "gray80") +
geom_text(data = mca1_vars_df,
aes(x = Dim.1, y = Dim.2,
label = rownames(mca1_vars_df), colour = Variable)) +
scale_colour_discrete(name = "Variable")
# Clustering on principal components
#http://factominer.free.fr/classical-methods/hierarchical-clustering-on-principal-components.html
# Pb actuel : ne prend pas en compte le vrai jeu de données
res.hcpc <- HCPC(mca1, method = "ward.D2") # Création des groupes automatique, si besoin précision ac argument nb.clust
res.hcpc$desc.var$test.chi2 # Variables qui caractérisent le mieux la séparation entre les groupes
res.hcpc$desc.var$category # Pr chq cluster, informations sur sa composition
res.hcpc$desc.ind # indiv caractéristiques de chaque groupe & indiv de chq les plus éloignés des autres groupes
# Comment faire apparaître les cluster sur le graphe ?
## As
As <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = As_ppm, color = Regime_alter)) +
geom_boxplot() +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[As] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[As] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/As.pdf", width = 13, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(As)
dev.off()
## Co
# ggplot(sub_BDD_PME, aes(x = Groupe_station, y = Co_ppm, color = Regime_principal)) +
# geom_boxplot() +
# scale_x_discrete(limits = c("Trois_Sauts", "Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
# labels = c("Trois Sauts", "Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
# scale_color_discrete(name = "Régime trophique",
# labels = c("Carnivore", "Omnivore", "Détritivore", "Herbivore")) +
# ylab("[Co] dans le muscle de poissons, en mg/kg de poids sec") +
# xlab("Groupe de stations") +
# ggtitle(expression(paste("[Co] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
Co <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = Co_ppm, color = Regime_alter)) +
geom_boxplot() +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[Co] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[Co] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/Co.pdf", width = 13, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Co)
dev.off()
BD <- sub_BDD_PME
means <- aggregate(Co_ppm ~ Groupe_station, BD, mean)
means$Co_ppm <- round(means$Co_ppm, digits = 2)
kruskal.test(Co_ppm ~ Groupe_station, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$Co_ppm, BD$Groupe_station, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD, aes(x = Groupe_station , y = Co_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means, aes(label = Co_ppm, y = Co_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$Co_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Groupe_station), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Groupe_station", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
Co <- p0 + geom_text(aes(Groupe_station, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
theme_bw() +
labs( y = "[Co] dans le muscle de poissons, en mg/kg de poids sec", x = "Site", title = "[Co] dans le muscle de poissons en fonction des groupes de stations")
pdf("Graph/Elements_traces/Co_stations.pdf", width = 11, height = 7) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Co)
dev.off()
## MCA
Bd <- select(sub_BDD_PME4, Groupe_station, Regime_alter, Co_ppm)
elt.trace('Co_ppm')
MCA_Co <- p + ggtitle("Analyse des correspondances multiples : Co") +
xlab("Dimension 1. 16,9 % de variance expliquée") +
ylab("Dimension 2. 14,1 % de variance expliquée") +
scale_colour_discrete(name = "Variable", label = c("Intervalle [Co] en mg/kg ps", "Groupe de stations", "Régime trophique"))
pdf("Graph/Elements_traces/MCA_Co.pdf", width = 20, height = 9)
print(MCA_Co)
dev.off()
## Cd
# ggplot(sub_BDD_PME, aes(x = Groupe_station, y = Cd_ppm, color = Regime_principal)) +
# geom_boxplot() + geom_hline(aes(yintercept = 0.5)) +
# scale_x_discrete(limits = c("Trois_Sauts", "Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
# labels = c("Trois Sauts", "Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
# scale_color_discrete(name = "Régime trophique",
# labels = c("Carnivore", "Omnivore", "Détritivore", "Herbivore")) +
# ylab("[Cd] dans le muscle de poissons, en mg/kg de poids sec") +
# xlab("Groupe de stations") +
# ggtitle(expression(paste("[Cd] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
Cd <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = Cd_ppm, color = Regime_alter)) +
geom_boxplot() + geom_hline(aes(yintercept = 0.5)) +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[Cd] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[Cd] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/Cd.pdf", width = 13, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Cd)
dev.off()
BD <- sub_BDD_PME
means <- aggregate(Cd_ppm ~ Groupe_station, BD, mean)
means$Cd_ppm <- round(means$Cd_ppm, digits = 2)
kruskal.test(Cd_ppm ~ Groupe_station, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$Cd_ppm, BD$Groupe_station, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD, aes(x = Groupe_station , y = Cd_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means, aes(label = Cd_ppm, y = Cd_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$Cd_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Groupe_station), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Groupe_station", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
Cd <- p0 + geom_text(aes(Groupe_station, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
theme_bw() +
labs( y = "[Cd] dans le muscle de poissons, en mg/kg de poids sec", x = "Site", title = "[Cd] dans le muscle de poissons en fonction des groupes de stations")
pdf("Graph/Elements_traces/Cd_stations.pdf", width = 11, height = 7) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Cd)
dev.off()
## MCA
# Aucun intérêt ici, pas de conta
#Bd <- select(sub_BDD_PME4, Groupe_station, Regime_alter, Cd_ppm)
#elt.trace('Cd_ppm') + ggtitle("MCA plot of Cd")
## Pb
# ggplot(sub_BDD_PME, aes(x = Groupe_station, y = Pb_ppm, color = Regime_principal)) +
# geom_boxplot() + geom_hline(aes(yintercept = 1.5)) +
# scale_x_discrete(limits = c("Trois_Sauts", "Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
# labels = c("Trois Sauts", "Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
# scale_color_discrete(name = "Régime trophique",
# labels = c("Carnivore", "Omnivore", "Détritivore", "Herbivore")) +
# ylab("[Pb] dans le muscle de poissons, en mg/kg de poids sec") +
# xlab("Groupe de stations") +
# ggtitle(expression(paste("[Pb] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
Pb <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = Pb_ppm, color = Regime_alter)) +
geom_boxplot() + geom_hline(aes(yintercept = 2.5)) +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[Pb] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[Pb] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/Pb.pdf", width = 13, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Pb)
dev.off()
BD <- sub_BDD_PME
means <- aggregate(Pb_ppm ~ Groupe_station, BD, mean)
means$Pb_ppm <- round(means$Pb_ppm, digits = 2)
kruskal.test(Pb_ppm ~ Groupe_station, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$Pb_ppm, BD$Groupe_station, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD, aes(x = Groupe_station , y = Pb_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means, aes(label = Pb_ppm, y = Pb_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$Pb_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Groupe_station), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Groupe_station", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = -0.1, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
Pb <- p0 + geom_text(aes(Groupe_station, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
theme_bw() +
stat_summary(fun.data = n_fun, geom = "text") +
geom_hline(aes(yintercept = 2.5), color = "red") +
labs( y = "[Pb] dans le muscle de poissons, en mg/kg de poids sec", x = "Site", title = "[Pb] dans le muscle de poissons en fonction des groupes de stations")
pdf("Graph/Elements_traces/Pb_stations.pdf", width = 9, height = 5) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Pb)
dev.off()
## MCA
Bd <- select(sub_BDD_PME4, Groupe_station, Regime_alter, Pb_ppm)
elt.trace('Pb_ppm')
MCA_Pb <- p + ggtitle("Analyse des correspondances multiples : Pb") +
xlab("Dimension 1. 17,3 % de variance expliquée") +
ylab("Dimension 2. 15,5 % de variance expliquée") +
scale_colour_discrete(name = "Variable", label = c("Intervalle [Pb] en mg/kg ps", "Groupe de stations", "Régime trophique"))
pdf("Graph/Elements_traces/MCA_Pb.pdf", width = 14, height = 9)
print(MCA_Pb)
dev.off()
## Cr
# ggplot(sub_BDD_PME, aes(x = Groupe_station, y = Cr_ppm, color = Regime_principal)) +
# geom_boxplot() +
# scale_x_discrete(limits = c("Trois_Sauts", "Chien_non_conta", "Chien_conta", "NF_non_conta", "NF_conta"),
# labels = c("Trois Sauts", "Chien non contaminée", "Chien contaminée", "Nouvelle France non contaminée", "Nouvelle France contaminée")) +
# scale_color_discrete(name = "Régime trophique",
# labels = c("Carnivore", "Omnivore", "Détritivore", "Herbivore")) +
# ylab("[Cr] dans le muscle de poissons, en mg/kg de poids sec") +
# xlab("Groupe de stations") +
# ggtitle(expression(paste("[Cr] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
Cr <- ggplot(sub_BDD_PME4, aes(x = Groupe_station, y = Cr_ppm, color = Regime_alter)) +
geom_boxplot()+
scale_x_discrete(limits = limit_groupes,
labels = label_groupes) +
scale_color_manual(name = "Régime trophique", labels = c("Carnivore Piscivore", "Carnivore Invertivore", "Omnivore", "Herbivore"), values = color) +
ylab("[Cr] dans le muscle de poissons, en mg/kg de poids sec") +
xlab("Groupe de stations") +
ggtitle(expression(paste("[Cr] dans le muscle de poissons en fonction des groupes de stations et des régimes trophiques")))
pdf("Graph/Elements_traces/Cr.pdf", width = 13, height = 9) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Cr)
dev.off()
BD <- sub_BDD_PME
means <- aggregate(Cr_ppm ~ Site, BD, mean)
means$Cr_ppm <- round(means$Cr_ppm, digits = 2)
kruskal.test(Cr_ppm ~ Site, data = BD) # Il existe des differences significatives
comparison <- kruskal(BD$Cr_ppm, BD$Site, alpha = 0.05, p.adj = "holm")
posthoc <- comparison[['groups']]
posthoc$trt <- gsub(" ","",posthoc$trt) # Tous les espaces apres le nom doivent etre supprimes pour pouvoir merge par la suite
p0 <- ggplot(BD, aes(x = Site , y = Cr_ppm)) +
geom_boxplot() +
stat_summary(fun.y = mean, colour = "blue", geom = "point",
shape = 18, size = 3,show_guide = FALSE) #+
#geom_text(data = means, aes(label = Cr_ppm, y = Cr_ppm + 0.08), color = "blue")
lettpos <- function(BD) boxplot(BD$Cr_ppm, plot = FALSE)$stats[5,] # determination d'un emplacement > a la "moustache" du boxplot
test <- ddply(BD, .(Site), lettpos) # Obtention de cette information pour chaque facteur (ici, Date)
test_f <- merge(test, posthoc, by.x = "Site", by.y = "trt") # Les 2 tableaux sont reunis par rapport aux valeurs row.names
colnames(test_f)[2] <- "upper"
colnames(test_f)[4] <- "signif"
n_fun <- function(x){return(data.frame(y = -0.2, label = paste0("n = ", length(x))))}
test_f$signif <- as.character(test_f$signif) # au cas ou, pour que l'affichage se produise correctement. Pas forcement utile.
Cr <- p0 + geom_text(aes(Site, upper + 0.1, label = signif), size = 10, data = test_f, vjust = -2, color = "red") +
scale_x_discrete(limits = c("Trois_Sauts", "Camopi", "Saul"),
labels = c("Trois Sauts", "Crique Chien", "Crique \nNouvelle France")) +
theme_bw() +
stat_summary(fun.data = n_fun, geom = "text") +
labs( y = "[Cr] dans le muscle de poissons, en mg/kg de poids sec", x = "Site")
pdf("Graph/Elements_traces/Cr_stations2.pdf", width = 5, height = 5) # la fction pdf enregistre directement ds le dossier et sous format pdf
print(Cr)
dev.off()
## MCA
Bd <- select(sub_BDD_PME4, Groupe_station, Regime_alter, Cr_ppm)
elt.trace('Cr_ppm')
MCA_Cr <- p + ggtitle("Analyse des correspondances multiples : Cr") +
xlab("Dimension 1. 16,3 % de variance expliquée") +
ylab("Dimension 2. 11,5 % de variance expliquée") +
scale_colour_discrete(name = "Variable", label = c("Intervalle [Cr] en mg/kg ps", "Groupe de stations", "Régime trophique"))
pdf("Graph/Elements_traces/MCA_Cr.pdf", width = 20, height = 10)
print(MCA_Cr)
dev.off()
## Hg
# Graphe fction reg. trophiques et stations
### MCA sur Hg
Bd <- na.omit(select(sub_BDD_PME4, Groupe_station, Regime_alter, Hg_ppm))
elt.trace('Hg_ppm') + ggtitle("MCA plot of Hg")
|
39a31fe6123c71672ad3a201c67ba195f0196e22 | 8edfa79c1293f4941dc7308e3b40a845b2aab2ca | /plot3.R | b6a356976dda4c522e0c87ea87b07f7342c1a6f7 | [] | no_license | kublankhan75/ExData_Plotting1 | 1103061427d632aa986f7c666e288ebc654798e9 | 1f36e69e72eb836c47d0466452c4882b83aabfd4 | refs/heads/master | 2021-01-17T06:30:45.399472 | 2015-03-04T21:57:20 | 2015-03-04T21:57:20 | 31,571,667 | 0 | 0 | null | 2015-03-03T00:46:53 | 2015-03-03T00:46:53 | null | UTF-8 | R | false | false | 1,138 | r | plot3.R | # plot3.R
# Check for prior existence of subsetted data
if(!exists("power.sub")){
# Read the source data
power <- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Format Date and Time variables
power$Date <- as.Date(power$Date, format = "%d/%m/%Y")
power$DateTime <- paste(power$Date, power$Time)
power$DateTime <- strptime(power$DateTime,"%Y-%m-%d %H:%M:%S")
# Subset data for only two days
power.sub <- subset(power, Date == "2007-02-01" | Date == "2007-02-02")
}
# Create line chart and save PNG file
png("plot3.png", width = 480, height = 480, pointsize = 12, bg = "transparent")
plot(power.sub$DateTime, power.sub$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(power.sub$DateTime, power.sub$Sub_metering_2, col = "red")
lines(power.sub$DateTime, power.sub$Sub_metering_3, col = "blue")
leg <- colnames(power.sub)[7:9]
colors <- c("black", "red", "blue")
legend(x = "topright", legend = leg, lwd = 1, col = colors)
dev.off() |
407e5555409930fabd75bc77786c739b4ad278c9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/natural/examples/olasso_cv.Rd.R | 30c32c6d5b33bade854c3b0e539febd0b96b8a08 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 274 | r | olasso_cv.Rd.R | library(natural)
### Name: olasso_cv
### Title: Cross-validation for organic lasso
### Aliases: olasso_cv
### ** Examples
set.seed(123)
sim <- make_sparse_model(n = 50, p = 200, alpha = 0.6, rho = 0.6, snr = 2, nsim = 1)
ol_cv <- olasso_cv(x = sim$x, y = sim$y[, 1])
|
1930216b4b50a575fbbc147497c9ca4c793bd980 | 9262e777f0812773af7c841cd582a63f92d398a4 | /man/coef_marssMLE.Rd | 1ee9cdac612d02674ef39da4dfc254ac3da240c6 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | nwfsc-timeseries/MARSS | f0124f9ba414a28ecac1f50c4596caaab796fdd2 | a9d662e880cb6d003ddfbd32d2e1231d132c3b7e | refs/heads/master | 2023-06-07T11:50:43.479197 | 2023-06-02T19:20:17 | 2023-06-02T19:20:17 | 438,764,790 | 1 | 2 | NOASSERTION | 2023-06-02T19:17:41 | 2021-12-15T20:32:14 | R | UTF-8 | R | false | false | 2,922 | rd | coef_marssMLE.Rd | \name{coef.marssMLE}
\alias{coef.marssMLE}
\keyword{coremethods}
\title{ Coefficient function for MARSS MLE objects }
\description{
\code{\link{MARSS}()} outputs \code{\link{marssMLE}} objects. \code{coef(object)}, where \code{object} is the output from a \code{\link{MARSS}()} call, will print out the estimated parameters. The default output is a list with values for each parameter, however the output can be altered using the \code{type} argument to output a vector of all the estimated values (\code{type="vector"}) or a list with the full parameter matrix with the estimated and fixed elements (\code{type="matrix"}). For a summary of the parameter estimates with CIs from the estimated Hessian, use try \code{tidy(object)}.
}
\usage{
\method{coef}{marssMLE}(object, ..., type = "list", form = NULL, what = "par")
}
\arguments{
\item{object}{ A \code{\link{marssMLE}} object. }
\item{...}{ Other arguments. Not used. }
\item{type}{ What to output. Default is "list". Options are
\itemize{
\item{ "list" }{ A list of only the estimated values in each matrix. Each model matrix has it's own list element.}
\item{ "vector" }{ A vector of all the estimated values in each matrix. }
\item{ "matrix" }{ A list of the parameter matrices each parameter with fixed values at their fixed values and the estimated values at their estimated values. Time-varying parameters, including d and c in a marxss form model, are returned as an array with time in the 3rd dimension. }
\item{ parameter name }{ Returns the parameter matrix for that parameter with fixed values at their fixed values and the estimated values at their estimated values. Note, time-varying parameters, including d and c in a marxss form model, are returned as an array with time in the 3rd dimension.}
} }
\item{form}{ This argument can be ignored. By default, the model form specified in the call to \code{\link{MARSS}()} is used to determine how to display the coefficients. This information is in \code{ attr(object$model,"form") }. The default form is \code{"marxss"}; see \code{\link{MARSS.marxss}()}. However, the internal functions convert this to form \code{"marss"}; see \code{\link{MARSS.marss}()}. The marss form of the model is stored (in \code{object$marss}). You can look at the coefficients in marss form by passing in \code{form="marss"}. }
\item{what}{ By default, \code{coef()} shows the parameter estimates. Other options are "par.se", "par.lowCI", "par.upCI", "par.bias", and "start".}
}
\value{
A list of the estimated parameters for each model matrix.
}
\author{
Eli Holmes, NOAA, Seattle, USA.
}
\seealso{
\code{\link[=tidy.marssMLE]{tidy}()}, \code{\link[=print.marssMLE]{print}()}
}
\examples{
dat <- t(harborSeal)
dat <- dat[c(2, 11), ]
fit <- MARSS(dat)
coef(fit)
coef(fit, type = "vector")
coef(fit, type = "matrix")
# to retrieve just the Q matrix
coef(fit, type = "matrix")$Q
} |
d6507c6eb961ff26e2eb000e5a7cbcf8ecce9c30 | 3def32e587e51ac218c0882426cd9277ed568c49 | /tests/testthat/test-parser-missing-sps.R | edfecce4e61387e7006805b631adb8758e2028d6 | [
"MIT"
] | permissive | jacobkap/asciiSetupReader | e94d5f6003a42c8c8411b9b19aec7e67ae84f980 | 8a2620be788b109b7b72327cc23b6b73f7e1ef43 | refs/heads/master | 2023-07-25T14:18:09.646809 | 2023-07-05T19:45:30 | 2023-07-05T19:45:30 | 92,419,358 | 7 | 7 | null | 2017-11-30T00:52:18 | 2017-05-25T15:49:57 | Scheme | UTF-8 | R | false | false | 35,993 | r | test-parser-missing-sps.R | context("test-parser-missing-sps")
test_that("Right number of missing values", {
expect_equal(nrow(sac_parsed_sps$missing), 261)
expect_equal(nrow(sex_offender_parsed_sps$missing), 18)
expect_true(is.null(ucr1960_parsed_sps$missing))
expect_equal(nrow(weimar_parsed_sps$missing), 19)
expect_true(is.null(acs_parsed_sps$missing))
expect_true(is.null(nibrs_parsed_sps$missing))
expect_equal(nrow(parole_parsed_sps$missing), 89)
expect_equal(nrow(prisoners_parsed_sps$missing), 1800)
expect_true(is.null(ca_vital_parsed_sps$missing))
expect_equal(nrow(crosswalk_parsed_sps$missing), 12)
expect_equal(nrow(ucr1985_parsed_sps$missing), 166)
expect_equal(nrow(ucr1986_parsed_sps$missing), 160)
expect_true(is.null(ucr2000_parsed_sps$missing))
expect_equal(nrow(ncvs_parsed_sps$missing), 80)
expect_true(is.null(jail_1987_parsed_sps$missing))
expect_equal(nrow(jail_2010_parsed_sps$missing), 60)
expect_equal(nrow(corrections_parsed_sps$missing), 7)
expect_equal(nrow(sadc_parsed_sps$missing), 312)
expect_true(is.null(well_being_parsed_sps$missing))
#expect_true(is.null(escolar_parsed_sps$missing))
expect_true(is.null(health_nutrition_parsed_sps$missing))
expect_true(is.null(ad_health_parsed_sps$missing))
expect_equal(nrow(india_human_parsed_sps$missing), 1)
expect_equal(nrow(census_police_parsed_sps$missing), 74)
expect_equal(nrow(step_in_parsed_sps$missing), 2)
expect_equal(nrow(cps_1973_parsed_sps$missing), 235)
expect_true(is.null(cps_2004_parsed_sps$missing))
expect_equal(nrow(drug_abuse_parsed_sps$missing), 424)
expect_equal(nrow(british_crime_teen_parsed_sps$missing), 1119)
expect_true(is.null(detroit_parsed_sps$missing))
expect_true(is.null(worry_parsed_sps$missing))
expect_equal(nrow(cambridge_parsed_sps$missing), 1068)
expect_true(is.null(guam_parsed_sps$missing))
expect_true(is.null(china_2002_parsed_sps$missing))
expect_true(is.null(china_1995_parsed_sps$missing))
expect_equal(nrow(china_1998_parsed_sps$missing), 47)
expect_true(is.null(indonesia_parsed_sps$missing))
expect_equal(nrow(UN_crime_parsed_sps$missing), 357)
expect_equal(nrow(county_arrest_parsed_sps$missing), 13)
expect_true(is.null(escolar_2006_parsed_sps$missing))
expect_true(is.null(mtf_1999_parsed_sps$missing))
expect_equal(nrow(mtf_2003_parsed_sps$missing), 108)
expect_equal(nrow(mtf_1990_parsed_sps$missing), 206)
expect_equal(nrow(mtf_1989_parsed_sps$missing), 224)
expect_equal(nrow(mtf_2004_parsed_sps$missing), 230)
expect_equal(nrow(mtf_2002_parsed_sps$missing), 108)
expect_equal(nrow(mtf_1993_parsed_sps$missing), 206)
expect_equal(nrow(mtf_1991_parsed_sps$missing), 206)
expect_equal(nrow(mtf_1992_parsed_sps$missing), 206)
expect_equal(nrow(mtf_1979_parsed_sps$missing), 544)
})
test_that("CA SEDD 2005 has right missing values", {
expect_equal(ca_sedd_2005_ahal_parsed_sps$missing$variable,
c("HOSPSTCO", "HOSPSTCO", "HOSPSTCO", "HOSPSTCO"))
expect_equal(ca_sedd_2005_ahal_parsed_sps$missing$values,
c("-9999", "-8888", "-6666", "-5555"))
expect_equal(unique(ca_sedd_2005_ahal_parsed_sps$missing$variable),
"HOSPSTCO")
})
test_that("missing_value_no_s_parsed_sps has right missing values", {
expect_equal(head(missing_value_no_s_parsed_sps$missing$variable),
c("V10", "V12R1", "V12R1",
"V12R2", "V12R2", "V12R3"))
expect_equal(head(missing_value_no_s_parsed_sps$missing$values),
c("999 THRU HIGHEST", "0", "96 THRU HIGHEST",
"0", "96 THRU HIGHEST", "0"))
expect_equal(tail(missing_value_no_s_parsed_sps$missing$variable),
c("V4018", "V4019", "V4020",
"V4021", "V4022", "V4023"))
expect_equal(tail(missing_value_no_s_parsed_sps$missing$values),
c("99 THRU HIGHEST", "99 THRU HIGHEST", "0",
"0", "0", "0"))
expect_equal(head(unique(missing_value_no_s_parsed_sps$missing$variable)),
c("V10", "V12R1", "V12R2",
"V12R3", "V13R1", "V13R2"))
expect_equal(tail(unique(missing_value_no_s_parsed_sps$missing$variable)),
c("V4018", "V4019", "V4020",
"V4021", "V4022", "V4023"))
})
test_that("Cambridge has right missing values", {
expect_equal(head(cambridge_parsed_sps$missing$variable),
c("V7", "V8", "V10",
"V11", "V12", "V12"))
expect_equal(head(cambridge_parsed_sps$missing$values),
c("0", "0", "0",
"0", "0", "9"))
expect_equal(tail(cambridge_parsed_sps$missing$variable),
c("V876", "V877", "V877",
"V878", "V879", "V880"))
expect_equal(tail(cambridge_parsed_sps$missing$values),
c("98", "0", "98",
"0", "0", "0"))
expect_equal(cambridge_parsed_sps$missing$values[cambridge_parsed_sps$missing$variable == "V747"],
c("6", "8", "9"))
expect_equal(head(unique(cambridge_parsed_sps$missing$variable)),
c("V7", "V8", "V10",
"V11", "V12", "V20"))
expect_equal(tail(unique(cambridge_parsed_sps$missing$variable)),
c("V875", "V876", "V877",
"V878", "V879", "V880"))
})
test_that("China 1998 has right missing values", {
expect_equal(head(china_1998_parsed_sps$missing$variable),
c("RELATION", "GENDER", "AGE",
"STUDENT", "INCOME88", "RESIDENC"))
expect_equal(head(china_1998_parsed_sps$missing$values),
c("9", "9", "999",
"9", "9", "9"))
expect_equal(tail(china_1998_parsed_sps$missing$variable),
c("IT07T", "IT07M", "IT07E",
"IT08T", "IT08M", "IT08E"))
expect_equal(tail(china_1998_parsed_sps$missing$values),
c("9", "99999", "99999",
"9", "99999", "99999"))
expect_equal(head(unique(china_1998_parsed_sps$missing$variable)),
c("RELATION", "GENDER", "AGE",
"STUDENT", "INCOME88", "RESIDENC"))
expect_equal(tail(unique(china_1998_parsed_sps$missing$variable)),
c("IT07T", "IT07M", "IT07E",
"IT08T", "IT08M", "IT08E"))
})
test_that("UN Crime has right missing values", {
expect_equal(head(UN_crime_parsed_sps$missing$variable),
c("NNHOM70N", "NNHOM70N", "NNHOM70N",
"NNHOM71N", "NNHOM71N", "NNHOM71N"))
expect_equal(head(UN_crime_parsed_sps$missing$values),
c("-2", "-3", "-9",
"-2", "-3", "-9"))
expect_equal(tail(UN_crime_parsed_sps$missing$variable),
c("X5", "X5", "X5",
"X6", "X6", "X6"))
expect_equal(tail(UN_crime_parsed_sps$missing$values),
c("-2", "-3", "-9",
"-2", "-3", "-9"))
expect_equal(head(unique(UN_crime_parsed_sps$missing$variable)),
c("NNHOM70N", "NNHOM71N", "NNHOM72N",
"NNHOM73N", "NNHOM74N", "NNHOM75N"))
expect_equal(tail(unique(UN_crime_parsed_sps$missing$variable)),
c("PSTF745", "X2", "X3",
"X4", "X5", "X6"))
})
test_that("County arrest has right missing values", {
expect_equal(head(county_arrest_parsed_sps$missing$variable),
c("V7", "V8", "V9",
"V10", "V11", "V12"))
expect_equal(head(county_arrest_parsed_sps$missing$values),
c("9999999", "9999999", "999999",
"99999", "99999", "9999"))
expect_equal(tail(county_arrest_parsed_sps$missing$variable),
c("V14", "V15", "V16",
"V17", "V18", "V19"))
expect_equal(tail(county_arrest_parsed_sps$missing$values),
c("99999", "99999", "99999",
"99999", "99999", "9999"))
expect_equal(head(unique(county_arrest_parsed_sps$missing$variable)),
c("V7", "V8", "V9",
"V10", "V11", "V12"))
expect_equal(tail(unique(county_arrest_parsed_sps$missing$variable)),
c("V14", "V15", "V16",
"V17", "V18", "V19"))
})
test_that("British Crime Teen has right missing values", {
expect_equal(head(british_crime_teen_parsed_sps$missing$variable),
c("TB_CASE", "TB_CASE", "TB_CASE",
"AR_CODE", "AR_CODE", "AR_CODE"))
expect_equal(tail(british_crime_teen_parsed_sps$missing$variable),
c("T73", "T73", "T73",
"T74", "T74", "T74"))
expect_equal(head(british_crime_teen_parsed_sps$missing$values),
c("-7", "-8", "-9",
"-7", "-8", "-9"))
expect_equal(tail(british_crime_teen_parsed_sps$missing$values),
c("-7", "-8", "-9",
"-7", "-8", "-9"))
expect_equal(head(unique(british_crime_teen_parsed_sps$missing$variable)),
c("TB_CASE", "AR_CODE", "T_SN",
"T_SCRN", "BOOSTER", "CARD_28"))
expect_equal(tail(unique(british_crime_teen_parsed_sps$missing$variable)),
c("T69", "T70", "T71",
"T72", "T73", "T74"))
})
test_that("Drug Abuse has right missing values", {
expect_equal(head(drug_abuse_parsed_sps$missing$variable),
c("ID", "RESPCODE", "SITEID",
"DATE", "DEGREE", "YEAR_DEG"))
expect_equal(tail(drug_abuse_parsed_sps$missing$variable),
c("DOCLEAD", "EOTDIV", "EOTTOL",
"EOTSCO", "EOTOPN", "EOTOPN"))
expect_equal(head(drug_abuse_parsed_sps$missing$values),
c("-9", "-9", "-9",
"-9", "-9", "-9"))
expect_equal(tail(drug_abuse_parsed_sps$missing$values),
c("-9.0", "-9.0", "-9.0",
"-9", "-9.0", "-5.0"))
expect_equal(head(unique(drug_abuse_parsed_sps$missing$variable)),
c("ID", "RESPCODE", "SITEID",
"DATE", "DEGREE", "YEAR_DEG"))
expect_equal(tail(unique(drug_abuse_parsed_sps$missing$variable)),
c("DOCSUP", "DOCLEAD", "EOTDIV",
"EOTTOL", "EOTSCO", "EOTOPN"))
})
test_that("Step In has right missing values", {
expect_equal(step_in_parsed_sps$missing$variable,
c("NR_DAYS", "CHARGE"))
expect_equal(step_in_parsed_sps$missing$values,
c("-99", "-99"))
})
test_that("CPS 1973 has right missing values", {
expect_equal(head(cps_1973_parsed_sps$missing$variable),
c("V1013", "V1014", "V1020",
"V1021", "V1022", "V1029"))
expect_equal(tail(cps_1973_parsed_sps$missing$variable),
c("V1261", "V1262", "V1263",
"V1264", "V1265", "V1266"))
expect_equal(head(cps_1973_parsed_sps$missing$values),
c("0000000", "0000000", "0000000",
"0000000", "0000000", "0000000"))
expect_equal(tail(cps_1973_parsed_sps$missing$values),
c("-999999", "-999999", "-999999",
"-999999", "-999999", "-999999"))
expect_equal(head(unique(cps_1973_parsed_sps$missing$variable)),
c("V1013", "V1014", "V1020",
"V1021", "V1022", "V1029"))
expect_equal(tail(unique(cps_1973_parsed_sps$missing$variable)),
c("V1261", "V1262", "V1263",
"V1264", "V1265", "V1266"))
})
test_that("Census Police has right missing values", {
expect_equal(head(census_police_parsed_sps$missing$variable),
c("SUBTYPE1", "SUBTYPE2", "Q1A1",
"Q1A2", "Q1A3", "Q1A4"))
expect_equal(tail(census_police_parsed_sps$missing$variable),
c("Q6E", "Q6F", "Q6G",
"Q6H", "Q6I", "Q6_TOT"))
expect_equal(head(census_police_parsed_sps$missing$values),
c("888", "888", "-9",
"-9", "-9", "-9"))
expect_equal(tail(census_police_parsed_sps$missing$values),
c("-9", "-9", "-9",
"-9", "-9", "-9"))
expect_equal(head(unique(census_police_parsed_sps$missing$variable)),
c("SUBTYPE1", "SUBTYPE2", "Q1A1",
"Q1A2", "Q1A3", "Q1A4"))
expect_equal(tail(unique(census_police_parsed_sps$missing$variable)),
c("Q6E", "Q6F", "Q6G",
"Q6H", "Q6I", "Q6_TOT"))
})
test_that("India human has right missing values", {
expect_equal(india_human_parsed_sps$missing$variable,
"MB21B")
expect_equal(tail(india_human_parsed_sps$missing$values),
c("8"))
})
test_that("Sac has right missing values", {
expect_equal(head(sac_parsed_sps$missing$variable),
c("TODDATYR", "DATSTAR", "CONSTATE",
"Q3JETH", "Q5JSUPDP", "Q6JVIC"))
expect_equal(head(sac_parsed_sps$missing$values),
c("9999", "888888", "9",
"9", "9", "9"))
expect_equal(tail(sac_parsed_sps$missing$variable),
c("Q126PN3", "Q126OTH3", "KAGE",
"VERDICT", "DURAT", "DURAT2"))
expect_equal(tail(sac_parsed_sps$missing$values),
c("9", "99", "9",
"9", "99", "9"))
expect_equal(head(unique(sac_parsed_sps$missing$variable)),
c("TODDATYR", "DATSTAR", "CONSTATE",
"Q3JETH", "Q5JSUPDP", "Q6JVIC"))
expect_equal(tail(unique(sac_parsed_sps$missing$variable)),
c("Q126PN3", "Q126OTH3", "KAGE",
"VERDICT", "DURAT", "DURAT2"))
})
test_that("SADC has right missing values", {
expect_equal(head(sadc_parsed_sps$missing$variable),
c("sitecode", "sitetypenum", "year",
"survyear", "weight", "stratum"))
expect_equal(head(sadc_parsed_sps$missing$values),
c("", "", "",
"", "", ""))
expect_equal(tail(sadc_parsed_sps$missing$variable),
c("qnsunburn", "qnconcentrating", "qncurrentasthma",
"qnwheresleep", "qnspeakenglish", "qntransgender"))
expect_equal(tail(sadc_parsed_sps$missing$values),
c("", "", "",
"", "", ""))
expect_equal(head(unique(sadc_parsed_sps$missing$variable)),
c("sitecode", "sitetypenum", "year",
"survyear", "weight", "stratum"))
expect_equal(tail(unique(sadc_parsed_sps$missing$variable)),
c("qnsunburn", "qnconcentrating", "qncurrentasthma",
"qnwheresleep", "qnspeakenglish", "qntransgender"))
})
test_that("Sex offender has right missing values", {
expect_equal(head(sex_offender_parsed_sps$missing$variable),
c("DATE", "Q1", "Q3",
"Q4", "Q5", "Q7"))
expect_equal(head(sex_offender_parsed_sps$missing$values),
c("8888888", "99", "99",
"99", "99", "99"))
expect_equal(tail(sex_offender_parsed_sps$missing$variable),
c("Q9E", "Q9F", "Q9G",
"Q10", "INDEX", "NEWQ9G"))
expect_equal(tail(sex_offender_parsed_sps$missing$values),
c("99", "99", "99",
"99", "9", "9"))
expect_equal(head(unique(sex_offender_parsed_sps$missing$variable)),
c("DATE", "Q1", "Q3",
"Q4", "Q5", "Q7"))
expect_equal(tail(unique(sex_offender_parsed_sps$missing$variable)),
c("Q9E", "Q9F", "Q9G",
"Q10", "INDEX", "NEWQ9G"))
})
test_that("Weimar has right missing values", {
expect_equal(head(weimar_parsed_sps$missing$variable),
c("V5", "V6", "V7",
"V8", "V9", "V10"))
expect_equal(head(weimar_parsed_sps$missing$values),
c("9999999", "9999999", "999999.",
"9999999", "9999999", "9999999"))
expect_equal(tail(weimar_parsed_sps$missing$variable),
c("V18", "V19", "V20",
"V21", "V22", "V23"))
expect_equal(tail(weimar_parsed_sps$missing$values),
c("9999999", "999999.", "9999999",
"999999.", "9999999", "999999."))
expect_equal(head(unique(weimar_parsed_sps$missing$variable)),
c("V5", "V6", "V7",
"V8", "V9", "V10"))
expect_equal(tail(unique(weimar_parsed_sps$missing$variable)),
c("V18", "V19", "V20",
"V21", "V22", "V23"))
})
test_that("Parole has right missing values", {
expect_equal(head(parole_parsed_sps$missing$variable),
c("TOTBEG", "TOTBEG", "ENDISREL",
"ENDISREL", "ENMANREL", "ENMANREL"))
expect_equal(head(parole_parsed_sps$missing$values),
c("-8", "-9", "-8" ,
"-9", "-8", "-9"))
expect_equal(tail(parole_parsed_sps$missing$variable),
c("BOOTIN", "LOCJAIL", "LOCJAILIN",
"LOCJAILIN", "OTHPAR", "ENDOFYEAR"))
expect_equal(tail(parole_parsed_sps$missing$values),
c("NA", "DK", "DK",
"NA", "DK", "DK"))
expect_equal(head(unique(parole_parsed_sps$missing$variable)),
c("TOTBEG", "ENDISREL", "ENMANREL",
"ENREINST", "OTHEN", "TOTEN"))
expect_equal(tail(unique(parole_parsed_sps$missing$variable)),
c("BOOTNUM", "BOOTIN", "LOCJAIL",
"LOCJAILIN", "OTHPAR", "ENDOFYEAR"))
})
test_that("Prisoners has right missing values", {
expect_equal(head(prisoners_parsed_sps$missing$variable),
c("YEAR", "YEAR", "YEAR",
"YEAR", "YEAR", "YEAR"))
expect_equal(head(prisoners_parsed_sps$missing$values),
c("-9", "-8", "-7",
"-6", "-5", "-4"))
expect_equal(tail(prisoners_parsed_sps$missing$variable),
c("HANDLEF", "HANDLEF", "HANDLEF",
"HANDLEF", "HANDLEF", "HANDLEF"))
expect_equal(tail(prisoners_parsed_sps$missing$values),
c("-6", "-5", "-4",
"-3", "-2", "-1"))
expect_equal(head(unique(prisoners_parsed_sps$missing$variable)),
c("YEAR", "STATEID", "REGION",
"CUSGT1M", "CUSGT1F", "CUSLT1M"))
expect_equal(tail(unique(prisoners_parsed_sps$missing$variable)),
c("DTHOTHM", "DTHOTHF", "DTHTOTM",
"DTHTOTF", "HANDLEM", "HANDLEF"))
})
test_that("Crosswalk has right missing values", {
expect_equal(head(crosswalk_parsed_sps$missing$variable),
c("UORI", "UCOUNTY", "UMSA",
"UPOPGRP", "UADD5", "CGOVIDNU"))
expect_equal(head(crosswalk_parsed_sps$missing$values),
c("", "999", "999",
"", "99999", "999999999"))
expect_equal(tail(crosswalk_parsed_sps$missing$variable),
c("CGOVTYPE", "FSTATE", "FCOUNTY",
"FPLACE", "FMSA", "FCMSA"))
expect_equal(tail(crosswalk_parsed_sps$missing$values),
c("99", "99", "999",
"999999", "9999", "999"))
expect_equal(head(unique(crosswalk_parsed_sps$missing$variable)),
c("UORI", "UCOUNTY", "UMSA",
"UPOPGRP", "UADD5", "CGOVIDNU"))
expect_equal(tail(unique(crosswalk_parsed_sps$missing$variable)),
c("CGOVTYPE", "FSTATE", "FCOUNTY",
"FPLACE", "FMSA", "FCMSA"))
})
test_that("UCR 1985 has right missing values", {
expect_equal(head(ucr1985_parsed_sps$missing$variable),
c("V5", "V8", "V10",
"V11", "V11", "V12"))
expect_equal(head(ucr1985_parsed_sps$missing$values),
c("99", "99", "99",
"0", "99999", "0"))
expect_equal(tail(ucr1985_parsed_sps$missing$variable),
c("V169", "V170", "V171",
"V172", "V173", "V174"))
expect_equal(tail(ucr1985_parsed_sps$missing$values),
c("0", "0", "0",
"0", "0", "0"))
expect_equal(head(unique(ucr1985_parsed_sps$missing$variable)),
c("V5", "V8", "V10",
"V11", "V12", "V13"))
expect_equal(tail(unique(ucr1985_parsed_sps$missing$variable)),
c("V169", "V170", "V171",
"V172", "V173", "V174"))
})
test_that("UCR 1986 has right missing values", {
expect_equal(head(ucr1986_parsed_sps$missing$variable),
c("V4", "V5", "V7",
"V8", "V9", "V10"))
expect_equal(head(ucr1986_parsed_sps$missing$values),
c("0000000", "0000099", "0000000",
"0000099", "0000000", "0000099"))
expect_equal(tail(ucr1986_parsed_sps$missing$variable),
c("V169", "V170", "V171",
"V172", "V173", "V174"))
expect_equal(tail(ucr1986_parsed_sps$missing$values),
c("0000000", "0000000", "0000000",
"0000000", "0000000", "0000000"))
expect_equal(head(unique(ucr1986_parsed_sps$missing$variable)),
c("V4", "V5", "V7",
"V8", "V9", "V10"))
expect_equal(tail(unique(ucr1986_parsed_sps$missing$variable)),
c("V169", "V170", "V171",
"V172", "V173", "V174"))
})
test_that("Jail 2010 has right missing values", {
expect_equal(head(jail_2010_parsed_sps$missing$variable),
c("NONCITZF", "WEEK", "CONVII10A",
"CONVII10AF", "UNCONVII10A", "UNCONVII10AF"))
expect_equal(head(jail_2010_parsed_sps$missing$values),
c("-9", "-9", "-9",
"-9", "-9", "-9"))
expect_equal(tail(jail_2010_parsed_sps$missing$variable),
c("STOLENPROP", "STOLENPROPF", "ESCAPE",
"ESCAPEF", "OTHERMAJVIO", "OTHERMAJVIOF"))
expect_equal(tail(jail_2010_parsed_sps$missing$values),
c("-9", "-9", "-9",
"-9", "-9", "-9"))
expect_equal(head(unique(jail_2010_parsed_sps$missing$variable)),
c("NONCITZF", "WEEK", "CONVII10A",
"CONVII10AF", "UNCONVII10A", "UNCONVII10AF"))
expect_equal(tail(unique(jail_2010_parsed_sps$missing$variable)),
c("STOLENPROP", "STOLENPROPF", "ESCAPE",
"ESCAPEF", "OTHERMAJVIO", "OTHERMAJVIOF"))
})
test_that("Corrections has right missing values", {
expect_equal(head(corrections_parsed_sps$missing$variable),
c("EDUCATION", "ADMTYPE", "OFFGENERAL",
"SENTLGTH", "OFFDETAIL", "RACE"))
expect_equal(head(corrections_parsed_sps$missing$values),
c("9", "9", "9",
"9", "99", "9"))
expect_equal(tail(corrections_parsed_sps$missing$variable),
c("ADMTYPE", "OFFGENERAL", "SENTLGTH",
"OFFDETAIL", "RACE", "AGEADMIT"))
expect_equal(tail(corrections_parsed_sps$missing$values),
c("9", "9", "9",
"99", "9", "9"))
expect_equal(head(unique(corrections_parsed_sps$missing$variable)),
c("EDUCATION", "ADMTYPE", "OFFGENERAL",
"SENTLGTH", "OFFDETAIL", "RACE"))
expect_equal(tail(unique(corrections_parsed_sps$missing$variable)),
c("ADMTYPE", "OFFGENERAL", "SENTLGTH",
"OFFDETAIL", "RACE", "AGEADMIT"))
})
test_that("NCVS 1979 has right missing values", {
expect_equal(head(ncvs_parsed_sps$missing$variable),
c("V2009", "V2010", "V2012",
"V2014", "V2016", "V2018"))
expect_equal(head(ncvs_parsed_sps$missing$values),
c("9998 thru highest",
"8 thru highest",
"98 thru highest",
"8 thru highest",
"8 thru highest",
"8 thru highest"))
expect_equal(tail(ncvs_parsed_sps$missing$variable),
c("V3048", "V3049", "V3050",
"V3051", "V3052", "V3053"))
expect_equal(tail(ncvs_parsed_sps$missing$values),
c("98 thru highest",
"98 thru highest",
"98 thru highest",
"98 thru highest",
"98 thru highest",
"98 thru highest"))
expect_equal(head(unique(ncvs_parsed_sps$missing$variable)),
c("V2009", "V2010", "V2012",
"V2014", "V2016", "V2018"))
expect_equal(tail(unique(ncvs_parsed_sps$missing$variable)),
c("V3048", "V3049", "V3050",
"V3051", "V3052", "V3053"))
})
test_that("SHR 1988 has right missing values", {
expect_equal(head(SHR1988_parsed_sps$missing$variable),
c("V12", "V13", "V25",
"V26", "V27", "V28"))
expect_equal(head(SHR1988_parsed_sps$missing$values),
c("0", "0", "0",
"9", "9", "9"))
expect_equal(tail(SHR1988_parsed_sps$missing$variable),
c("V154", "V154", "V155",
"V155", "V156", "V156"))
expect_equal(tail(SHR1988_parsed_sps$missing$values),
c("98", "99", "98",
"99", "8 THRU HI", "7"))
expect_equal(head(unique(SHR1988_parsed_sps$missing$variable)),
c("V12", "V13", "V25",
"V26", "V27", "V28"))
expect_equal(tail(unique(SHR1988_parsed_sps$missing$variable)),
c("V151", "V152", "V153",
"V154", "V155", "V156"))
})
test_that("SHR 1987 has right missing values", {
expect_equal(head(SHR1987_parsed_sps$missing$variable),
c("V12", "V13", "V25",
"V26", "V27", "V28"))
expect_equal(head(SHR1987_parsed_sps$missing$values),
c("0", "0", "0",
"9", "9", "9"))
expect_equal(tail(SHR1987_parsed_sps$missing$variable),
c("V154", "V154", "V155",
"V155", "V156", "V156"))
expect_equal(tail(SHR1987_parsed_sps$missing$values),
c("98", "99", "98",
"99", "8 THRU HI", "7"))
expect_equal(head(SHR1987_parsed_sps$missing$variable),
c("V12", "V13", "V25",
"V26", "V27", "V28"))
expect_equal(tail(unique(SHR1987_parsed_sps$missing$variable)),
c("V151", "V152", "V153",
"V154", "V155", "V156"))
})
test_that("UCR 1985 has right missing values", {
expect_equal(head(ucr1985_parsed_sps$missing$variable),
c("V5", "V8", "V10",
"V11", "V11", "V12"))
expect_equal(head(ucr1985_parsed_sps$missing$values),
c("99", "99", "99",
"0", "99999", "0"))
expect_equal(tail(ucr1985_parsed_sps$missing$variable),
c("V169", "V170", "V171",
"V172", "V173", "V174"))
expect_equal(tail(ucr1985_parsed_sps$missing$values),
c("0", "0", "0",
"0", "0", "0"))
expect_equal(head(unique(ucr1985_parsed_sps$missing$variable)),
c("V5", "V8", "V10",
"V11", "V12", "V13"))
expect_equal(tail(ucr1985_parsed_sps$missing$variable),
c("V169", "V170", "V171",
"V172", "V173", "V174"))
})
test_that("Dutch election has right missing values", {
expect_equal(head(dutch_election_parsed_sps$missing$variable),
c("V6", "V6", "V7",
"V7", "V8", "V10"))
expect_equal(head(dutch_election_parsed_sps$missing$values),
c("0000009 THRU HI", "0000000", "0000009 THRU HI",
"0000000", "0000000", "0011499 THRU HI"))
expect_equal(tail(dutch_election_parsed_sps$missing$variable),
c("V763", "V763", "V764",
"V764", "V765", "V765"))
expect_equal(tail(dutch_election_parsed_sps$missing$values),
c("0000098 THRU HI", "0000000", "0000009 THRU HI",
"0000000", "0000009 THRU HI", "0000000"))
expect_equal(head(unique(dutch_election_parsed_sps$missing$variable)),
c("V6", "V7", "V8",
"V10", "V30", "V31"))
expect_equal(tail(unique(dutch_election_parsed_sps$missing$variable)),
c("V760", "V761", "V762",
"V763", "V764", "V765"))
})
test_that("Monitoring the Future 2003 has right missing values", {
expect_equal(head(mtf_2003_parsed_sps$missing$variable),
c("CASEID", "V13", "V16",
"V17", "V5", "V1"))
expect_equal(head(mtf_2003_parsed_sps$missing$values),
c("-9", "-9", "-9",
"-9", "-9", "-9"))
expect_equal(tail(mtf_2003_parsed_sps$missing$variable),
c("V112", "V113", "V114",
"V205", "V206", "V207"))
expect_equal(tail(mtf_2003_parsed_sps$missing$values),
c("-9", "-9", "-9",
"-9", "-9", "-9"))
expect_equal(head(unique(mtf_2003_parsed_sps$missing$variable)),
c("CASEID", "V13", "V16",
"V17", "V5", "V1"))
expect_equal(tail(unique(mtf_2003_parsed_sps$missing$variable)),
c("V112", "V113", "V114",
"V205", "V206", "V207"))
})
test_that("Monitoring the Future 1990 has right missing values", {
expect_equal(head(mtf_1990_parsed_sps$missing$variable),
c("V1", "V3", "V4",
"V4", "V5", "V5"))
expect_equal(head(mtf_1990_parsed_sps$missing$values),
c("99", "9", "99999",
"99999 THRU HIGHEST", "0", "9"))
expect_equal(tail(mtf_1990_parsed_sps$missing$variable),
c("V145", "V145", "V146",
"V146", "V147", "V147"))
expect_equal(tail(mtf_1990_parsed_sps$missing$values),
c("0", "9", "0",
"9", "0", "9"))
expect_equal(head(unique(mtf_1990_parsed_sps$missing$variable)),
c("V1", "V3", "V4",
"V5", "V13", "V16"))
expect_equal(tail(unique(mtf_1990_parsed_sps$missing$variable)),
c("V142", "V143", "V144",
"V145", "V146", "V147"))
})
test_that("Monitoring the Future 1989 has right missing values", {
expect_equal(head(mtf_1989_parsed_sps$missing$variable),
c("V1", "V1", "V3",
"V3", "V4", "V4"))
expect_equal(head(mtf_1989_parsed_sps$missing$values),
c("0000099", "0000099 THRU HIGHEST", "0000009",
"0000009 THRU HIGHEST", "0099999", "0099999 THRU HIGHEST"))
expect_equal(tail(mtf_1989_parsed_sps$missing$variable),
c("V205", "V205", "V206",
"V206", "V207", "V207"))
expect_equal(tail(mtf_1989_parsed_sps$missing$values),
c("0000000", "0000008 THRU HIGHEST", "0000000",
"0000008 THRU HIGHEST", "0000000", "0000008 THRU HIGHEST"))
expect_equal(head(unique(mtf_1989_parsed_sps$missing$variable)),
c("V1", "V3", "V4",
"V5", "V13", "V16"))
expect_equal(tail(unique(mtf_1989_parsed_sps$missing$variable)),
c("V202", "V203", "V204",
"V205", "V206", "V207"))
})
test_that("Monitoring the Future 2004 has right missing values", {
expect_equal(head(mtf_2004_parsed_sps$missing$variable),
c("V1", "V1", "V3",
"V3", "V4", "V4"))
expect_equal(head(mtf_2004_parsed_sps$missing$values),
c("99 THRU HI", "99", "9 THRU HI",
"9", "99999 THRU HI", "99999"))
expect_equal(tail(mtf_2004_parsed_sps$missing$variable),
c("V9001", "V9001", "V9002",
"V9002", "V9003", "V9003"))
expect_equal(tail(mtf_2004_parsed_sps$missing$values),
c("9998 THRU HI", "9999", "8 THRU HI",
"9", "8 THRU HI", "9"))
expect_equal(head(unique(mtf_2004_parsed_sps$missing$variable)),
c("V1", "V3", "V4",
"V5", "V13", "V16"))
expect_equal(tail(unique(mtf_2004_parsed_sps$missing$variable)),
c("V205", "V206", "V207",
"V9001", "V9002", "V9003"))
})
test_that("Monitoring the Future 2002 has right missing values", {
expect_equal(head(mtf_2002_parsed_sps$missing$variable),
c("V13", "V16", "V17",
"V5", "V1", "V3"))
expect_equal(head(mtf_2002_parsed_sps$missing$values),
c("-9", "-9", "-9",
"-9", "-9", "-9"))
expect_equal(tail(mtf_2002_parsed_sps$missing$variable),
c("V113", "V114", "V205",
"V206", "V207", "CASEID"))
expect_equal(tail(mtf_2002_parsed_sps$missing$values),
c("-9", "-9", "-9",
"-9", "-9", "-9"))
expect_equal(head(unique(mtf_2002_parsed_sps$missing$variable)),
c("V13", "V16", "V17",
"V5", "V1", "V3"))
expect_equal(tail(unique(mtf_2002_parsed_sps$missing$variable)),
c("V113", "V114", "V205",
"V206", "V207", "CASEID"))
})
test_that("Monitoring the Future 1993 has right missing values", {
expect_equal(head(mtf_1993_parsed_sps$missing$variable),
c("V1", "V3", "V4",
"V4", "V5", "V5"))
expect_equal(head(mtf_1993_parsed_sps$missing$values),
c("99", "9", "99999",
"99999 THRU HIGHEST", "0", "9"))
expect_equal(tail(mtf_1993_parsed_sps$missing$variable),
c("V145", "V145", "V146",
"V146", "V147", "V147"))
expect_equal(tail(mtf_1993_parsed_sps$missing$values),
c("0", "9", "0",
"9", "0", "9"))
expect_equal(head(unique(mtf_1993_parsed_sps$missing$variable)),
c("V1", "V3", "V4",
"V5", "V13", "V16"))
expect_equal(tail(unique(mtf_1993_parsed_sps$missing$variable)),
c("V142", "V143", "V144",
"V145", "V146", "V147"))
})
test_that("Monitoring the Future 1991 has right missing values", {
expect_equal(head(mtf_1991_parsed_sps$missing$variable),
c("V1", "V3", "V4",
"V4", "V5", "V5"))
expect_equal(head(mtf_1991_parsed_sps$missing$values),
c("99", "9", "99999",
"99999 THRU HIGHEST", "0", "9"))
expect_equal(tail(mtf_1991_parsed_sps$missing$variable),
c("V145", "V145", "V146",
"V146", "V147", "V147"))
expect_equal(tail(mtf_1991_parsed_sps$missing$values),
c("0", "9", "0",
"9", "0", "9"))
expect_equal(head(unique(mtf_1991_parsed_sps$missing$variable)),
c("V1", "V3", "V4",
"V5", "V13", "V16"))
expect_equal(tail(unique(mtf_1991_parsed_sps$missing$variable)),
c("V142", "V143", "V144",
"V145", "V146", "V147"))
})
test_that("Monitoring the Future 1992 has right missing values", {
expect_equal(head(mtf_1992_parsed_sps$missing$variable),
c("V1", "V3", "V4",
"V4", "V5", "V5"))
expect_equal(head(mtf_1992_parsed_sps$missing$values),
c("99", "9", "99999",
"99999 THRU HIGHEST", "0", "9"))
expect_equal(tail(mtf_1992_parsed_sps$missing$variable),
c("V145", "V145", "V146",
"V146", "V147", "V147"))
expect_equal(tail(mtf_1992_parsed_sps$missing$values),
c("0", "9", "0",
"9", "0", "9"))
expect_equal(head(unique(mtf_1992_parsed_sps$missing$variable)),
c("V1", "V3", "V4",
"V5", "V13", "V16"))
expect_equal(tail(unique(mtf_1992_parsed_sps$missing$variable)),
c("V142", "V143", "V144",
"V145", "V146", "V147"))
})
test_that("Monitoring the Future 1979 has right missing values", {
expect_equal(head(mtf_1979_parsed_sps$missing$variable),
c("V5", "V5", "V13",
"V16", "V17", "V4101"))
expect_equal(head(mtf_1979_parsed_sps$missing$values),
c("9 THRU HI", "0", "9 THRU HI",
"9 THRU HI", "9 THRU HI", "9 THRU HI"))
expect_equal(tail(mtf_1979_parsed_sps$missing$variable),
c("V4382", "V4382", "V4383",
"V4383", "V4384", "V4384"))
expect_equal(tail(mtf_1979_parsed_sps$missing$values),
c("9 THRU HI", "0", "9 THRU HI",
"0", "9 THRU HI", "0"))
expect_equal(head(unique(mtf_1979_parsed_sps$missing$variable)),
c("V5", "V13", "V16",
"V17", "V4101", "V4102"))
expect_equal(tail(unique(mtf_1979_parsed_sps$missing$variable)),
c("V4379", "V4380", "V4381",
"V4382", "V4383", "V4384"))
})
|
e4db3202a7c8997c9775410f038a64209cf8a389 | d9c32002215d0dd67a68d99586c7daafdfb385dc | /R/MCGHD.R | f30c8f5a865db6ef43fc5c59bfafb1da9c4c9c1c | [] | no_license | cran/MixGHD | 1d88b7a2236e2767b21d807289d8c75f2613a2c5 | 15cc4f93efde88be56fdb41ccc3c466983d8c2e5 | refs/heads/master | 2022-07-04T05:40:25.297469 | 2022-05-11T10:50:07 | 2022-05-11T10:50:07 | 22,120,018 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,244 | r | MCGHD.R | MainMCGHD=function(data=NULL, gpar0=NULL, G=2, max.iter=100, eps=1e-2, label=NULL, method="km",nr=NULL){
pcol=ncol(data)
if(!is.null(label)){
lc=apply(data[label==1,],2,mean)
# if(min(label)==0&max(label)==G){
for(i in 2:G){
lc=rbind(lc,apply(data[label==i,],2,mean))
}#}
z = combinewk(weights=matrix(1/G,nrow=nrow(data),ncol=G), label=label)
if (is.null(gpar0)) gpar = rgparC(data=data, g=G, w=z,l=lc)
else{ gpar = gpar0
for(i in 1:G){
gpar[[i]]$gam = eigen( gpar0[[i]]$sigma)$vectors
gpar[[i]]$phi = eigen( gpar0[[i]]$sigma)$values}
}
}
else{
if (is.null(gpar0)) gpar = rmgpar(g=G,p=ncol(data),data=data, method=method,nr=nr)
else{ gpar = gpar0
for(i in 1:G){
gpar[[i]]$gam = eigen( gpar0[[i]]$sigma)$vectors
gpar[[i]]$phi = eigen( gpar0[[i]]$sigma)$values}
}
}
loglik = numeric(max.iter)
for (i in 1:3) {
gpar = EMgrstep(data=data, gpar=gpar, v=1, label = label,it=i)
loglik[i] = llik(data, gpar)
}
while ( ( getall(loglik[1:i]) > eps) & (i < (max.iter) ) ) {
i = i+1
gpar = EMgrstep(data=data, gpar=gpar, v=1, label = label,it=i)
loglik[i] = llik(data, gpar)
}
if(i<max.iter){loglik=loglik[-(i+1:max.iter)]}
BIC=2*loglik[i]-log(nrow(data))*((G-1)+G*(4*pcol+pcol*(pcol-1)/2))
z=weightsMS(data=data, gpar= gpar)
map=MAPMS(data=data, gpar= gpar, label=label)
ICL=BIC+2*sum(log(apply(z,1,max)))
AIC=2*loglik[i]-2*((G-1)+G*(4*pcol+pcol*(pcol-1)/2))
AIC3=2*loglik[i]-3*((G-1)+G*(4*pcol+pcol*(pcol-1)/2))
par=partrue(gpar,G)
val = list(loglik= loglik, gpar=gpar, z=z, map=map,par=par, BIC=BIC,ICL=ICL,AIC=AIC,AIC3=AIC3)
return(val)
}
MCGHD <- function(data=NULL, gpar0=NULL, G=2, max.iter=100, eps=1e-2, label=NULL, method="km",scale=TRUE,nr=10, modelSel="AIC" ) {
data=as.matrix(data)
if( scale==TRUE){
data=scale(data)}
pcol=ncol(data)
#if (nrow(data)<((G-1)+G*(4*pcol+2+pcol*(pcol-1)/2)))stop('G is too big, number of parameters > n')
if (is.null(data)) stop('data is null')
if (nrow(data) == 1) stop('nrow(data) is equal to 1')
#if (ncol(data) == 1) stop('ncol(data) is equal to 1; This function currently only works with multivariate data p > 1')
if (any(is.na(data))) stop('No NAs allowed.')
if (is.null(G)) stop('G is NULL')
#if ( G < 1) stop('G is not a positive integer')
if ( max.iter< 1) stop('max.iter is not a positive integer')
if(modelSel=="BIC"){
bico=-Inf
t=length(G)
BIC=matrix(NA,t,1)
cont=0
for(b in 1:t){
mo=try(MainMCGHD(data=data, gpar0=gpar0, G=G[b], max.iter, eps, label, method,nr=nr),silent = TRUE)
cont=cont+1
if(is.list(mo)){
bicn=mo$BIC
BIC[cont]=bicn}
else{bicn=-Inf
BIC[cont]=NA}
if(bicn>bico){
bico=bicn
sg=G[b]
model=mo
}
}
#########
# val=list(BIC=BIC,model=model)
val=MixGHD(Index=BIC,AIC=model$AIC,AIC3=model$AIC3,BIC=model$BIC,ICL=model$ICL, map=model$map, gpar=model$gpar, loglik=model$loglik, z=model$z,par=model$par,method="MCGHD",data=as.data.frame(data),scale=scale)
cat("The best model (BIC) for the range of components used is G = ", sg,".\nThe BIC for this model is ", bico,".",sep="")
return(val)}
else if(modelSel=="ICL"){
iclo=-Inf
t=length(G)
ICL=matrix(NA,t,1)
cont=0
for(b in 1:t){
mo=try(MainMCGHD(data=data, gpar0=gpar0, G=G[b], max.iter, eps, label, method,nr=nr),silent = TRUE)
cont=cont+1
if(is.list(mo)){
icln=mo$ICL
ICL[cont]=icln}
else{icln=-Inf
ICL[cont]=NA}
if(icln>iclo){
iclo=icln
sg=G[b]
model=mo
}
}
###### val=list(ICL=ICL,model=model)
val=MixGHD(Index=ICL,AIC=model$AIC,AIC3=model$AIC3,BIC=model$BIC,ICL=model$ICL, map=model$map, gpar=model$gpar, loglik=model$loglik, z=model$z,par=model$par,method="MCGHD",data=as.data.frame(data),scale=scale)
cat("The best model (ICL) for the range of components used is G = ", sg,".\nThe ICL for this model is ", iclo,".",sep="")
return(val)}
else if(modelSel=="AIC3"){
iclo=-Inf
t=length(G)
AIC3=matrix(NA,t,1)
cont=0
for(b in 1:t){
mo=try(MainMCGHD(data=data, gpar0=gpar0, G=G[b], max.iter, eps, label, method,nr=nr),silent = TRUE)
cont=cont+1
if(is.list(mo)){
icln=mo$AIC3
AIC3[cont]=icln}
else{icln=-Inf
AIC3[cont]=NA}
if(icln>iclo){
iclo=icln
sg=G[b]
model=mo
}
}
##### val=list(AIC3=AIC3,model=model)
val=MixGHD(Index=AIC3,AIC=model$AIC,AIC3=model$AIC3,BIC=model$BIC,ICL=model$ICL, map=model$map, gpar=model$gpar, loglik=model$loglik, z=model$z,par=model$par,method="MCGHD",data=as.data.frame(data),scale=scale)
cat("The best model (AIC3) for the range of components used is G = ", sg,".\nThe AIC3 for this model is ", iclo,".",sep="")
return(val)}
else {
iclo=-Inf
t=length(G)
AIC=matrix(NA,t,1)
cont=0
for(b in 1:t){
mo=MainMCGHD(data=data, gpar0=gpar0, G=G[b], max.iter, eps, label, method,nr=nr)
cont=cont+1
if(is.list(mo)){
icln=mo$AIC
AIC[cont]=icln}
else{icln=-Inf
AIC[cont]=NA}
if(icln>iclo){
iclo=icln
sg=G[b]
model=mo
}
}
##### val=list(AIC=AIC,model=model)
val=MixGHD(Index=AIC,AIC=model$AIC,AIC3=model$AIC3,BIC=model$BIC,ICL=model$ICL, map=model$map, gpar=model$gpar, loglik=model$loglik, z=model$z, par=model$par, method="MCGHD",data=as.data.frame(data),scale=scale)
cat("The best model (AIC) for the range of components used is G = ", sg,".\nThe AIC for this model is ", iclo,".",sep="")
return(val)}
}
|
4628df7b3afe6bc8efc95185200d84e201b38b7f | 47c5a1669bfc7483e3a7ad49809ba75d5bfc382e | /man/getKnownS3generics.Rd | ce69764c546c531024299f717446d2b4b76f4b2d | [] | no_license | tdhock/inlinedocs | 3ea8d46ece49cc9153b4cdea3a39d05de9861d1f | 3519557c0f9ae79ff45a64835206845df7042072 | refs/heads/master | 2023-09-04T11:03:59.266286 | 2023-08-29T23:06:34 | 2023-08-29T23:06:34 | 20,446,785 | 2 | 2 | null | 2019-08-21T19:58:23 | 2014-06-03T14:50:10 | R | UTF-8 | R | false | false | 365 | rd | getKnownS3generics.Rd | \name{getKnownS3generics}
\alias{getKnownS3generics}
\title{getKnownS3generics}
\description{Copied from R-3.0.1, to support getKnownS3generics.}
\usage{getKnownS3generics()}
\author{Toby Dylan Hocking <toby.hocking@r-project.org> [aut, cre], Keith Ponting [aut], Thomas Wutzler [aut], Philippe Grosjean [aut], Markus Müller [aut], R Core Team [ctb, cph]}
|
44a2fc4ba29a833f96d291b038d74e9f00e5bec9 | fc5a777ea963c978ad82ab3e4050d0c66dfe4df8 | /data-raw/md.human.custom.R | 8d6dba7c915b3d14a73554870e7739992578bff3 | [] | no_license | mdozmorov/msigdf | e17379951ca71e65c1cad0c9f397c3a825a639cb | 72c8778b9978bbe60666b3abe827b6ddc44a3c64 | refs/heads/master | 2021-01-19T22:20:44.604682 | 2017-01-17T21:16:48 | 2017-01-17T21:16:48 | 65,923,906 | 0 | 0 | null | 2016-08-17T16:29:26 | 2016-08-17T16:29:26 | null | UTF-8 | R | false | false | 3,633 | r | md.human.custom.R | options(stringsAsFactors = FALSE)
library(annotables)
### Create annotation dataframe
# First column - general category. Second - signature. Third - EntrezIDs
md.human.custom <- data.frame(collection = vector(mode = "character"), geneset = vector(mode = "character"), entrez = vector(mode = "character"))
### PAM50
mtx <- read.table("/Users/mdozmorov/Documents/Work/GenomeRunner/gwas2bed/tumorportal/data/pam50_centroids.txt")
genes.symbol <- unique(rownames(mtx))
genes.notmap <- setdiff(genes.symbol, grch38$symbol)
genes.symbol[ genes.symbol == "CDCA1" ] <- "NUF2"
genes.symbol[ genes.symbol == "KNTC2" ] <- "NDC80"
genes.symbol[ genes.symbol == "ORC6L" ] <- "ORC6"
genes.entrez <- unique(grch38$entrez[ grch38$symbol %in% genes.symbol ])
md.human.custom.pam50 <- data.frame(collection = "cancer", geneset = "PAM50", entrez = sort(genes.entrez))
md.human.custom.pam50 <- md.human.custom.pam50[ complete.cases(md.human.custom.pam50), ]
### ADME core. Source: [PharmaADME.org](http://pharmaadme.org/)
mtx <- read.table("/Users/mdozmorov/Documents/Work/GenomeRunner/gwas2bed/genes/data/ADME_core.txt")
genes.symbol <- unique(mtx$V1)
genes.notmap <- setdiff(genes.symbol, grch38$symbol)
genes.entrez <- unique(grch38$entrez[ grch38$symbol %in% genes.symbol ])
md.human.custom.ADMEcore <- data.frame(collection = "pharmacology", geneset = "ADMEcore", entrez = sort(genes.entrez))
md.human.custom.ADMEcore <- md.human.custom.ADMEcore[ complete.cases(md.human.custom.ADMEcore), ]
### ADME extended. Source: [PharmaADME.org](http://pharmaadme.org/)
mtx <- read.table("/Users/mdozmorov/Documents/Work/GenomeRunner/gwas2bed/genes/data/ADME_extended.txt")
genes.symbol <- unique(mtx$V1)
genes.notmap <- setdiff(genes.symbol, grch38$symbol)
genes.symbol[ genes.symbol == "SULT1C1" ] <- "SULT1C2"
genes.symbol[ genes.symbol == "NOS2A" ] <- "NOS2"
genes.symbol[ genes.symbol == "CYP2D7P1" ] <- "CYP2D7"
genes.entrez <- unique(grch38$entrez[ grch38$symbol %in% genes.symbol ])
md.human.custom.ADMEextended <- data.frame(collection = "pharmacology", geneset = "ADMEextended", entrez = sort(genes.entrez))
md.human.custom.ADMEextended <- md.human.custom.ADMEextended[ complete.cases(md.human.custom.ADMEextended), ]
### ADME related. Source: [PharmaADME.org](http://pharmaadme.org/)
mtx <- read.table("/Users/mdozmorov/Documents/Work/GenomeRunner/gwas2bed/genes/data/ADME_related.txt")
genes.symbol <- unique(mtx$V1)
genes.notmap <- setdiff(genes.symbol, grch38$symbol)
genes.symbol[ genes.symbol == "ABP1" ] <- "AOC1"
genes.symbol[ genes.symbol == "ARS2" ] <- "SRRT"
genes.symbol[ genes.symbol == "BHLHB5" ] <- "BHLHE22"
genes.symbol[ genes.symbol == "C3orf15" ] <- "MAATS1"
genes.symbol[ genes.symbol == "CCBP2" ] <- "ACKR2"
genes.symbol[ genes.symbol == "CREBL1" ] <- "ATF6B"
genes.symbol[ genes.symbol == "FAM82A" ] <- "RMDN2"
genes.symbol[ genes.symbol == "HNRPA3" ] <- "HNRNPA3"
genes.entrez <- unique(grch38$entrez[ grch38$symbol %in% genes.symbol ])
md.human.custom.ADMErelated <- data.frame(collection = "pharmacology", geneset = "ADMErelated", entrez = sort(genes.entrez))
md.human.custom.ADMErelated <- md.human.custom.ADMErelated[ complete.cases(md.human.custom.ADMErelated), ]
### Age-associated genes, Enrichr
# mtx <- read.table("http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=text&libraryName=Aging_Perturbations_from_GEO_up")
### Append data
md.human.custom <- rbind(md.human.custom.pam50,
md.human.custom.ADMEcore,
md.human.custom.ADMEextended,
md.human.custom.ADMErelated)
save(md.human.custom, file = "data/md.human.custom.rda")
|
9d888b3bcff4f15e7eca6b221229bdc4177bfe14 | 848cd95fd8cbe16f0c5141ff3f3f45abd9deb88b | /man/design.M.Rd | 9a4cf9f3157e905c1b45904cd8f4f1a503eab1c5 | [] | no_license | UMN-BarleyOatSilphium/GSSimTPUpdate | 51d02af36449150f2dd3806572b92619a05636b2 | 17aa82dfa8936b5ea286ab555ac90e65e6c018a1 | refs/heads/master | 2021-05-01T16:39:19.674734 | 2017-04-21T15:25:57 | 2017-04-21T15:25:57 | 58,579,569 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 664 | rd | design.M.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convenience_functions.R
\name{design.M}
\alias{design.M}
\title{M matrix}
\usage{
design.M(n)
}
\arguments{
\item{n}{The number of rows of matrix X}
}
\description{
Creates an M matrix, or the orthagonal projector
}
\references{
Rincent, R., Laloe, D., Nicolas, S., Altmann, T., Brunel, D., Revilla, P.,
Moreau, L. (2012). Maximizing the Reliability of Genomic Selection by
Optimizing the Calibration Set of Reference Individuals: Comparison of
Methods in Two Diverse Groups of Maize Inbreds (Zea mays L.). Genetics,
192(2), 715–728. http://doi.org/10.1534/genetics.112.141473
}
|
2f17d29003e0a20d251e42ca908cc036ae588922 | 590c3f28f3fb89e0cde3abd9cd45ebe927c91a4b | /CommonMind/matrixeqtl.R | f318ff4aeb5108eb6fefa7557129371944288d67 | [] | no_license | ComputationalBiology-CS-CU/Gene-Expression-Analyses | a4f469650ee0c7e9dc89475dde7b705d6c83d974 | 110663c3f8d0128b6e0641588166a0b642f30a15 | refs/heads/master | 2016-08-13T01:45:57.093219 | 2016-01-27T18:33:34 | 2016-01-27T18:33:34 | 50,528,839 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,460 | r | matrixeqtl.R | #### Script adapted from tutorial on matrixEQTL website. FDR filter added
# R script to conduct cis eQTL analysis with MatrixEQTL package
# Necessary to format the input files according to specifications
# given in the website: http://www.bios.unc.edu/research/genomic_software/Matrix_eQTL/
# Argument parameters for script call are as follows:
# 1) genotype
# 2) snp location
# 3) gene expression
# 4) gene location
# 5) Output file name
# 6) covariates (optional)
#
#
# Below are a list of set parameters that can be changed accordingly
args <- commandArgs(TRUE)
testModel = "linear" # Can be "ANOVA", or linear_cross as well
cis_pValueThres = 1 # Set to 0 to ignore SNP and gene locations
trans_pValueThresh = 0 # Set to 0 for cis eQTL only
cisDist = 1e6 # Distance to include cis SNPs
fdrThreshold = 0.01
# Load package
library("MatrixEQTL")
# Set Model
useModel = modelLINEAR
# Load genotype data
snps = SlicedData$new();
snps$fileDelimiter = " "
snps$fileOmitCharacters = "NA"
snps$fileSkipRows = 0
snps$fileSkipColumns = 5
snps$fileSliceSize = 2000
snps$LoadFile(args[1])
# Load the gene expression data
gene = SlicedData$new()
gene$fileDelimiter = "\t"
gene$fileOmitCharacters = "NA"
gene$fileSkipRows = 1
gene$fileSkipColumns = 1
gene$fileSliceSize = 2000
gene$LoadFile(args[3])
# Load the covariates data
cvrt = SlicedData$new()
cvrt$fileDelimiter = "\t"
cvrt$fileOmitCharacters = "NA"
cvrt$fileSkipRows = 1
cvrt$fileSkipColumns = 1
cvrt$fileSliceSize = 2000
cvrt$LoadFile(args[6])
snpspos = read.table(args[2], header=TRUE, stringsAsFactors = FALSE)
genepos = read.table(args[4], header=TRUE, stringsAsFactors = FALSE)
me = Matrix_eQTL_main(
snps = snps,
gene = gene,
cvrt = cvrt,
output_file_name = NULL,
pvOutputThreshold = trans_pValueThresh,
useModel = useModel,
errorCovariance = numeric(),
verbose = TRUE,
output_file_name.cis = NULL,
pvOutputThreshold.cis = cis_pValueThres,
snpspos = snpspos,
genepos = genepos,
cisDist = cisDist,
pvalue.hist = FALSE,
min.pv.by.genesnp = FALSE,
noFDRsaveMemory = FALSE);
#Plot
plot(me)
# Results
cat('Analysis done in: ', me$time.in.sec, ' seconds', '\n')
cat('Detected local eQTLs:', '\n')
final <- me$cis$eqtls[me$cis$eqtls[5] < fdrThreshold ,]
write.table(final,file=args[5],quote=FALSE,sep="\t",row.names=FALSE,col.names=TRUE)
final <- me$trans$eqtls[me$trans$eqtls[5] < fdrThreshold ,]
write.table(final,file=args[7],quote=FALSE,sep="\t",row.names=FALSE,col.names=TRUE)
|
ca294723397da6ed6aa9a4d7b95bb85198b22fe3 | d4cf0bbfad4dfd5d6a01f7ac8852d3f0e1b7d560 | /project_05/stock_data.R | 48b7b717579b74e7f1e3d2b6decc8baa8a64ed52 | [] | no_license | snowdj/math_425 | 216cd98f19f2a3c87343b8735583dd4175099a68 | 9f70c6d88f968abf7280eb0ad5bea9f7cb96087a | refs/heads/master | 2020-03-19T04:44:15.078280 | 2018-04-15T15:40:13 | 2018-04-15T15:40:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,564 | r | stock_data.R | library(tidyquant)
stocks <- c("^GSPC", "AAPL", "JBLU", "BBY", "GM", "AMZN") %>%
tq_get() %>%
select(date, symbol, adjusted) %>%
group_by(symbol) %>%
tq_transmute(select = adjusted,
mutate_fun = periodReturn,
period = "daily",
type = "log",
col_rename = "daily.returns")
stocks %>%
ggplot(aes(x = daily.returns)) +
geom_histogram(aes(y=..density..),color = "white", fill = "#00203b", bins = 50) +
geom_density(aes(y=..density..), color = "firebrick2", size = .5) +
facet_wrap(~ symbol, scales = "free") +
theme_classic()
stocks.wide <- stocks %>%
spread(key = symbol, value = daily.returns)
stocks.wide %>%
ggplot(aes(x = `^GSPC`, y = AAPL)) +
geom_point(alpha = .3, size = 1.5) +
geom_smooth(method = "lm", se = FALSE, color = "firebrick2") +
scale_x_continuous(labels = scales::percent) +
scale_y_continuous(labels = scales::percent) +
labs(title = "Visualizing Returns Relationship: AAPL vs SPX 500",
x = "SPX 500") +
theme_classic()
aapl.spx <- lm(AAPL ~ `^GSPC`, data = stocks.wide)
pander::pander(summary(aapl.spx))
aapl.jblu <- lm(AAPL ~ JBLU, data = stocks.wide) %>%
summary() %>%
pander::pander()
aapl.bby <- lm(AAPL ~ BBY, data = stocks.wide) %>%
summary() %>%
pander::pander()
aapl.gm <- lm(AAPL ~ GM, data = stocks.wide) %>%
summary() %>%
pander::pander()
aapl.amzn <- lm(AAPL ~ AMZN, data = stocks.wide) %>%
summary() %>%
pander::pander()
plot(lm(AAPL ~ AMZN, data = stocks.wide))
|
ff1f1a5415eb5126680b1034064ccc46701ec731 | 52d0411931e9af474f9e7ce1002c621cbe7e73f1 | /Misc/emulator.simpact.best.R | 892292add1301bb3d6be877afd23b54b2a817768 | [] | no_license | niyongabirejunior/arteta | fd064e995c8a6450ea0ccde40d5e7e1d8b144a05 | 7932edc12f7fe77048abd5b3be8db92a5f15ff17 | refs/heads/master | 2023-04-20T23:24:36.155888 | 2018-11-16T14:45:33 | 2018-11-16T14:45:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,602 | r | emulator.simpact.best.R | #Read the data from the emulator best 274.
summaryparameters.best <- c("growth.rate", "inc.men.20.25", "inc.wom.20.25", "prev.men.25.30",
"prev.wom.25.30","prev.men.30.35", "prev.wom.30.35", "ART.cov.men.18.50",
"ART.cov.wom.18.50", "median.wom.18.50.AD")
targets <- c(0.015, 0.016, 0.043, 0.21, 0.47, 0.37, 0.54, 0.33, 0.34, 5)
file.name.csv <- paste0(dirname, "/","SummaryOutPut-inANDout.df.chunk-BESTEmulator1-274-2017-01-26.csv") # param.varied
file.name.csv <- paste0(dirname, "/","SummaryOutPut-inANDout.df.chunk-BESTEmulatorRERUN1-7-2017-01-27.csv") # param.varied
##file.name.csv <- paste0(dirname, "/","SummaryOutPut-inANDout.df.chunk-Emu222-274-2017-01-19.csv") # param.varied
# Read the output file from running simpact many times.
inputANDoutput.completeReminder <- data.frame(read.csv(file = file.name.csv, header = TRUE))
inputANDoutput.analysis <- inputANDoutput.completeReminder
#get all the average in all the columns in the selected df
inputANDoutput.analysis <- aggregate(inputANDoutput.analysis, by = list(inputANDoutput.analysis$sim.id), FUN = "mean")
#inputANDoutput.analysis <- subset(inputANDoutput.analysis, select=summaryparameters)
#label witch rows are complete
inputANDoutput.analysis$is.complete <- complete.cases(inputANDoutput.analysis)
sum(inputANDoutput.analysis$is.complete)
#Ploting xlim and number of bars if need be
x.lim <-list(c(-0.02,0.04),c(0,0.3),c(0,0.07),c(0,0.9),
c(0.1,0.9),c(0,1),c(0.1,0.9), c(0.1,0.45), c(0.1,0.45),c(3.5,6.5))
n.bars <-c(13,6,13,9,8,10,8,10,8,6)
#Check which of the incidence fall within the CI
inputANDoutput.analysis$inc.men.ok <- (inputANDoutput.analysis$inc.men.20.25 >0.011 &
inputANDoutput.analysis$inc.men.20.25 <0.025)
inputANDoutput.analysis$inc.wom.ok <- (inputANDoutput.analysis$inc.wom.20.25 >0.033 &
inputANDoutput.analysis$inc.wom.20.25 <0.056)
#Which agree on both cases men and women incidece are within the CI
inputANDoutput.analysis$inc.complete <- inputANDoutput.analysis$inc.men.ok*inputANDoutput.analysis$inc.wom.ok
inputANDoutput.analysis$met.cat <- inputANDoutput.analysis$is.complete
#Ask which of the rows are complete and meet the CI
inputANDoutput.analysis$met.cat[inputANDoutput.analysis$is.complete==TRUE &
inputANDoutput.analysis$inc.complete==TRUE] <- 3
#Want to then produce pairs plots on simpact paramters that meet the criteria
inputANDoutput.analysis.p <- inputANDoutput.analysis
inputANDoutput.analysis.p$met.cat[inputANDoutput.analysis.p$met.cat>2] <-
inputANDoutput.analysis.p$sim.id[inputANDoutput.analysis.p$met.cat>2]
#Just give those rows different numbers so we see them in the pairs plot
inputANDoutput.analysis.p$met.cat[inputANDoutput.analysis.p$met.cat==191] <- 3
inputANDoutput.analysis.p$met.cat[inputANDoutput.analysis.p$met.cat==264] <- 4
inputANDoutput.analysis.p$met.cat[inputANDoutput.analysis.p$met.cat==257] <- 5
inputANDoutput.analysis.p$met.cat[inputANDoutput.analysis.p$met.cat==233] <- 6
inputANDoutput.analysis.p$met.cat[inputANDoutput.analysis.p$met.cat==183] <- 7
inputANDoutput.analysis.p$met.cat[inputANDoutput.analysis.p$met.cat==16] <- 8
inputANDoutput.analysis.p$met.cat[inputANDoutput.analysis.p$met.cat==52] <- 9
for (i in seq(1, length(x.variables), 5)) {#specific for this work
pair.plot <- pairs(inputANDoutput.analysis.p[, x.variables[i:(i+4)]],
col = 1+inputANDoutput.analysis.p$met.cat, pch = 16, cex = 3)
}
inputANDoutput.analysis <- subset(inputANDoutput.analysis, is.complete == TRUE)
simpact.z.analysis <- dplyr::select_(inputANDoutput.analysis,.dots=z.variables)
par(mfrow=c(1,1))
multi.hist(simpact.z.analysis)
par(mfrow=c(4,3)) # distribution of statistics
j <- 0
for (i in z.variables){
j <- j + 1
hist(simpact.z.analysis[,i],
main = paste("Dist of ",i, sep = " "), xlab = "simpact.statistic")
abline(v = as.numeric(targets[j]), col="red", lwd=3, lty=2)
}
#get the sum of square difference
check.ssd.c <- as.data.frame(t(apply(simpact.z.analysis, 1, function(x)(x-t(targets))^2)))
names(check.ssd.c) <- z.variables
check.ssd.c.plot <- multi.hist(check.ssd.c)
#with limits on the x-axis and the number of hist bars
par(mfrow=c(4,3)) # distribution of statistics
j <- 0
for (i in z.variables){
j <- j + 1
hist(simpact.z.analysis[,i], n.bars[j], xlim = x.lim[[j]],
main = paste("Dist of ",i, sep = " "), xlab = "simpact.statistic")
abline(v = as.numeric(targets[j]), col="red", lwd=3, lty=2)
}
|
4198c3ac91f10f56883da5fb6e781e73944c2336 | 4483e0c1924204c74d10b30333cd6529ca529d7e | /R/nlm_wheys.R | 7364b7e781203e8ac51f5e7e073ee7e4492bc819 | [] | no_license | BorjaZ/NLMR | e6f35912b9af62644543fe183834b70609c06936 | e0516590cc2adc1a27bb453a9a3dd27dca9037d9 | refs/heads/master | 2021-04-28T18:00:32.276707 | 2018-02-16T13:42:29 | 2018-02-16T13:42:29 | 121,864,946 | 1 | 0 | null | 2018-02-17T15:12:04 | 2018-02-17T15:12:04 | null | UTF-8 | R | false | false | 3,008 | r | nlm_wheys.R | #' nlm_wheys
#'
#' @description Simulates a wheyed neutral landscape model.
#'
#' @details Wheyed landscape models builts on landscapes derived from random
#' curdling (\code{nlm_curds()}), by adding "wheye" on the "curds". Wheye is
#' hereby an additional step after the first recursion, where previously
#' selected cells that were declared matrix (value == FALSE) are now considered
#' to contain a proportion (\code{q}) of habitat.
#'
#' If \deqn{p_{1} = p_{2} = q_{2} = ... = p_{n} = p_{n}} the models resembles
#' a binary random map.
#'
#' @param p [\code{numerical(x)}]\cr
#' Vector with percentage(s) to fill with curds (fill with Habitat (value ==
#' TRUE)).
#' @param s [\code{numerical(x)}]\cr
#' Vector of successive cutting steps for the blocks (split 1 block into x
#' blocks).
#' @param q [\code{numerical(x)}]\cr
#' Vector of with percentage(s) to fill with wheys (fill with Habitat (value ==
#' TRUE)).
#' @param ext [\code{numerical(1)}]\cr
#' Extent of the resulting raster (0,x,0,x).
#'
#' @return raster
#'
#' @examples
#' # simulate wheyed curdling
#' wheyed_curdling <- nlm_wheys(c(0.1, 0.3, 0.6), c(32, 6, 2), c(0.1, 0.05, 0.2))
#'
#' \dontrun{
#' # visualize the NLM
#' util_plot(wheyed_curdling, discrete = TRUE)
#' }
#' @seealso \code{\link{nlm_curds}}
#'
#' @references
#' Szaro, Robert C., and David W. Johnston, eds. Biodiversity in managed
#' landscapes: theory and practice. \emph{Oxford University Press}, USA, 1996.
#'
#' @aliases nlm_wheys
#' @rdname nlm_wheys
#'
#' @importFrom magrittr "%>%"
#'
#' @export
#'
nlm_wheys <- function(p,
s,
q,
ext = 1) {
# supposed to be faster if initialized with false and inverted in the end
wheye_raster <- raster::raster(matrix(FALSE, 1, 1))
#
p <- 1 - p
for (i in seq_along(s)) {
# "tile" the raster into smaller subdivisions
wheye_raster <- raster::disaggregate(wheye_raster, s[i])
# get tibble with values and ids
vl <- raster::values(wheye_raster) %>%
tibble::as_tibble() %>%
dplyr::mutate(id = seq_len(raster::ncell(wheye_raster)))
# select ids randomly which are set to true and do so
ids <- vl %>%
dplyr::filter(!value) %>%
dplyr::sample_frac(p[i]) %>%
.$id
vl$value[ids] <- TRUE
# overwrite rastervalues
raster::values(wheye_raster) <- vl$value
# add wheye by proceding
vl <- raster::values(wheye_raster) %>%
tibble::as_tibble() %>%
dplyr::mutate(id = seq_len(raster::ncell(wheye_raster)))
# select ids randomly which are set to true and do so
ids <- vl %>%
dplyr::filter(value) %>%
dplyr::sample_frac(q[i]) %>%
.$id
vl$value[ids] <- FALSE
# overwrite rastervalues
raster::values(wheye_raster) <- vl$value
}
# invert raster
raster::values(wheye_raster) <- !raster::values(wheye_raster)
# set resolution ----
raster::extent(wheye_raster) <- c(
0,
ext,
0,
ext
)
return(wheye_raster)
}
|
38f50874bd3024fea7153d1c4cd0d7cbca5cd1dd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/biglasso/examples/predict.cv.biglasso.Rd.R | 6f52a9eaf874dd9a5a741e0c708379d7444f07d7 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 625 | r | predict.cv.biglasso.Rd.R | library(biglasso)
### Name: predict.cv.biglasso
### Title: Model predictions based on a fitted 'cv.biglasso' object
### Aliases: predict.cv.biglasso coef.cv.biglasso
### Keywords: models regression
### ** Examples
## predict.cv.biglasso
data(colon)
X <- colon$X
y <- colon$y
X.bm <- as.big.matrix(X)
fit <- biglasso(X.bm, y, penalty = 'lasso', family = "binomial")
cvfit <- cv.biglasso(X.bm, y, penalty = 'lasso', family = "binomial", seed = 1234, ncores = 2)
coef <- coef(cvfit)
coef[which(coef != 0)]
predict(cvfit, X.bm, type = "response")
predict(cvfit, X.bm, type = "link")
predict(cvfit, X.bm, type = "class")
|
c62643a94153865c60cdc5d68e764f767442c596 | fa139885e57dbd498849ae80edd0fb76f5c72c21 | /nyctaxitest.R | f38761b33044a5a1efc0f022612893e0b86b00c3 | [] | no_license | jreynolds01/spark-mrs-demo | fc500e19342943230e5ed85d25cab7d5a9b3b5a4 | 90df77043ec9c098752a4f4b64bbd03eddbf6dd4 | refs/heads/master | 2021-01-09T05:19:14.590538 | 2016-06-07T18:29:48 | 2016-06-07T18:29:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 630 | r | nyctaxitest.R | sparkEnvir <- list(spark.executor.instance = '10',
spark.yarn.executor.memoryOverhead = '8000')
sc <- sparkR.init(
sparkEnvir = sparkEnvir,
sparkPackages = "com.databricks:spark-csv_2.10:1.3.0"
)
sqlContext <- sparkRSQL.init(sc)
dataframe_import <- function(path = "wasb://nyctaxidata@alizaidi.blob.core.windows.net/") {
library(SparkR)
path <- file.path(path)
path_df <- read.df(sqlContext, path,
source = "com.databricks.spark.csv",
header = "true", inferSchema = "true", delimiter = ",")
return(path_df)
}
# full_taxi <- dataframe_import()
|
8c5091eee4f3589bc7086f3f25b86236af5ffd24 | 9a79c6d33fc2776d08c72e71c8ad91aa73df2e10 | /R/dataset.r | baf8671b36896f372dffa17986cd029b662610d1 | [] | no_license | giupo/rdataset | 8e6d1d645e1806bed616f2c9d34fdeac3af65129 | 7786febfadb60bf37343976a0d0d2a0286cca259 | refs/heads/master | 2021-06-07T19:17:06.802211 | 2021-05-14T15:29:23 | 2021-05-14T15:29:23 | 67,530,543 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,867 | r | dataset.r | ## Whenever you have a cyclic dependency, without really having one,
## it's due some generics you are defining in your code:
## that's why I'm writing here this reminder. If you ever encounter a
## cyclic dependency, slap this to R console:
##
## trace(stop, recover)
##
## and then inspect in which frame there's a getGeneric a see which one
## pisses R off.
#' Classe contenitore di dati (serie storiche)
#'
#' @name Dataset-class
#' @rdname Dataset-class
#' @slot data hash::hash containing data
#' @slot url path where data is
#' @export Dataset
#' @exportClass Dataset
#' @importClassesFrom hash hash
#' @importFrom methods new
# requireNamespace("hash", quietly=TRUE)
Dataset <- methods::setClass( # nolint
"Dataset",
representation(data = "hash", url = "character"))
.init <- function(x, url = "") { # nolint
x@data <- hash::hash()
x
}
methods::setMethod(
"initialize",
signature("Dataset"),
function(.Object, url = "") { # nolint
.init(.Object, url = url)
})
#' Returns the length of the object
#'
#' @name length
#' @param x object to retrieve the length
#' @return the length of the object x
#' @rdname length-methods
#' @docType methods
NULL
#' @rdname length-methods
#' @aliases length,Dataset-method
methods::setMethod(
"length",
c("Dataset"),
function(x) {
length(x@data)
})
#' Yields the names contained in the Object
#'
#' @rdname names-methods
#' @docType methods
#' @name names
#' @param x object where "names" are from
NULL
#' @rdname names-methods
#' @aliases names,Dataset-method
methods::setMethod(
"names",
c("Dataset"),
function(x) {
hash::keys(x@data)
})
#' shows the object on the stdout
#'
#' @rdname show-methods
#' @docType methods
#' @param object object to printout
#' @name show
NULL
#' @rdname show-methods
#' @aliases show,Dataset-method
methods::setMethod(
"show",
"Dataset",
function(object) {
template <- "Dataset with {{num}} object{{s}}\n"
num <- length(object@data)
s <- if(num == 1) "" else "s"
message <- whisker::whisker.render(template, list(num = num, s = s))
cat(message)
})
#' Ritorna il `Dataset` come `list` di oggetti
#'
#' @param x Dataset da convertire in list
#' @param ... per essere compliant su as.list
#' @seealso base::as.list
#' @return una `list` di Timeseries
#' @export
as.list.Dataset <- function(x, ...) as.list(x@data, ...)
#' checks if \code{x} is an object of type \code{Dataset}
#'
#' @name is.dataset
#' @export
#' @param x a generic object
#' @return \code{TRUE} if \code{x} is a \code{Dataset}, \code{FALSE} otherwise
is.dataset <- function(x) inherits(x, "Dataset")
#' Costruisce un dataset.
#'
#' @name dataset
#' @title Dataset OOP
#' @rdname Dataset.Rd
#' @export
#' @param ... Qui si puo specificare
#' - una stringa con un URL da cui costruire il Dataset
#' - una lista di oggetti
#' @return Il Dataset richiesto.
#' @examples
#' \dontrun{
#' ds <-Dataset()
#' ds <- Dataset('/path/to/something/useful')
#' given \code{lll} is a list of timeseries
#' ds <- dataset(lll)
#' }
dataset <- function(...) {
Dataset(...)
}
#' apply the abs function on all the timeseries contained in this Dataset
#'
#' @name abs_ds
#' @param x a Dataset we want to apply the abs
#' @export
#' @return a Dataset with all the timesereis with the abs applied
#' @note fix for 31922
abs_ds <- function(x) {
l <- as.list(x)
nomi <- names(l)
l <- lapply(l, abs)
names(l) <- nomi
as.dataset(l)
}
#' Accesses the timeseries/object based on its name
#'
#' @param x dataset
#' @param name nome dell'oggetto da estrarre
#' @export
methods::setMethod(
"$",
signature("Dataset"),
function(x, name) {
x[[unlist(stringr::str_split(name, " "))]]
})
methods::setMethod(
"$",
signature("Dataset"),
function(x, name) {
x[[unlist(stringr::str_split(name, " "))]]
})
|
72c58133fe40edc75456c1ef384d7954ed2c2420 | af4d49456b47fb82ea016753943b7b73cea8253a | /chapter3.R | 31f60a6959cb489e798099781584bab9bbe8a798 | [] | no_license | LarsIndus/local-gaussian-correlation | d95f2b84aeb45e20ddf3f6770d8398d340e3c760 | 8fcb8c7bc3ad88875416f1b5507102f500fa0369 | refs/heads/master | 2021-06-24T16:26:53.212139 | 2020-12-05T11:58:32 | 2020-12-05T11:58:32 | 177,267,581 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,136 | r | chapter3.R | # Setup ----------------------------------------------------------------
library(localgauss)
library(magrittr)
library(ggplot2)
library(MASS)
source("plot_localgauss.R")
SEED <- 42
# LGC for Normal Distribution (Figure 3) --------------------------------------------------------
set.seed(SEED)
n <- 1000
mean_x <- 0
mean_y <- 0
var_x <- 1
var_y <- 1
rho <- -0.7
b <- 1
x <- mvrnorm(n, mu = c(mean_x, mean_y), Sigma = matrix(c(var_x, rho, rho, var_y), ncol = 2))
lg_normal <- localgauss(x = x[, 1], y = x[, 2], gsize = 15, b1 = b, b2 = b, hthresh = 0.01)
plot_localgauss(lg_normal, plot_text = TRUE, plot_points = FALSE)
# LGC for Parabola -------------------------------------------------------------
set.seed(SEED)
n <- 1000
mean_x <- 0
sd_x <- sqrt(1)
mean_epsilon <- 0
sd_epsilon <- sqrt(0.5)
b <- 2
x <- rnorm(n, mean = mean_x, sd = sd_x)
epsilon <- rnorm(n, mean = mean_epsilon, sd = sd_epsilon)
y <- x^2 + epsilon
# Pearson correlation close to zero:
cor(x, y)
lg_parabola <- localgauss(x = x, y = y, b1 = b, b2 = b, gsize = 25, hthresh = 0.005)
plot_localgauss(lg_parabola, plot_text = TRUE, plot_points = FALSE) |
6cd8157a54fa17e7b4af6b21d81131eb5ebb6394 | 549b9ea2de06a6704912f5de95d2e9bca87440ae | /man/Connectance.Rd | 7ce4debcd7061ea5c14fda09e9233247268ace6b | [] | no_license | akeyel/spatialdemography | 38e863ba231c77b5e9d4d1c7357d98929f171de9 | cf0b006d43d0b55c76b55da3027210a4ceee29ef | refs/heads/master | 2016-12-13T05:19:09.151285 | 2016-04-02T03:56:27 | 2016-04-02T03:56:27 | 39,295,792 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 731 | rd | Connectance.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sdhelper.r
\name{Connectance}
\alias{Connectance}
\title{Calculate distances between cells}
\usage{
Connectance(NumPatches, extent)
}
\arguments{
\item{NumPatches}{Number of cells in the landscape, equal to the extent squared}
\item{extent}{Length of one side of the square landscape}
}
\description{
Calculates euclidean distance between all cells in a landscape
}
\details{
Designed with a square landscape in mind, but it will work for other configurations
Basically, cells are added by rows, and if there are not enough cells to complete a row, the row is missing some cells.
}
\author{
Jakob Gerstenlauer, with minor modification by A.C. Keyel
}
|
fa1bae4cd61ee206dd27b4d9c613db371c833dec | a37122475660395c7306c661f8baa33421228a75 | /man/streamSet-cash-getPlot.Rd | b9020ddc3e0b9c6f2ea47b921a25c8c556717fea | [
"Apache-2.0"
] | permissive | eddyrene/PI-Web-API-Client-R | 726b1edbea0a73bf28fe9b2f44259972ddecd718 | 7eb66c08f91e4a1c3a479a5fa37388951b3979b6 | refs/heads/master | 2020-04-17T01:01:27.260251 | 2018-11-14T10:48:46 | 2018-11-14T10:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,468 | rd | streamSet-cash-getPlot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streamSetApi.r
\name{streamSet$getPlot}
\alias{streamSet$getPlot}
\title{Returns values of attributes for an element, event frame or attribute over the
specified time range suitable for plotting over the number of intervals (typically
represents pixels).}
\arguments{
\item{webId}{The ID of an element, event frame or attribute, which is the base element or
parent of all the stream attributes.}
\item{categoryName}{Specify that included attributes must have this category. The default
is no category filter.}
\item{endTime}{An optional end time. The default is '*' for element attributes and points.
For event frame attributes, the default is the event frame's end time, or '*' if that is
not set. Note that if endTime is earlier than startTime, the resulting values will be in
time-descending order.}
\item{intervals}{The number of intervals to plot over. Typically, this would be the number
of horizontal pixels in the trend. The default is '24'. For each interval, the data
available is examined and significant values are returned. Each interval can produce up to
5 values if they are unique, the first value in the interval, the last value, the highest
value, the lowest value and at most one exceptional point (bad status or digital state).}
\item{nameFilter}{The name query string used for filtering attributes. The default is no
filter.}
\item{searchFullHierarchy}{Specifies if the search should include attributes nested
further than the immediate attributes of the searchRoot. The default is 'false'.}
\item{selectedFields}{List of fields to be returned in the response, separated by
semicolons (;). If this parameter is not specified, all available fields will be
returned.}
\item{showExcluded}{Specified if the search should include attributes with the Excluded
property set. The default is 'false'.}
\item{showHidden}{Specified if the search should include attributes with the Hidden
property set. The default is 'false'.}
\item{sortField}{The field or property of the object used to sort the returned collection.
For better performance, by default no sorting is applied. 'Name' is the only supported
field by which to sort.}
\item{sortOrder}{The order that the returned collection is sorted. The default is
'Ascending'}
\item{startTime}{An optional start time. The default is '*-1d' for element attributes and
points. For event frame attributes, the default is the event frame's start time, or '*-1d'
if that is not set.}
\item{templateName}{Specify that included attributes must be members of this template. The
default is no template filter.}
\item{timeZone}{The time zone in which the time string will be interpreted. This parameter
will be ignored if a time zone is specified in the time string. If no time zone is
specified in either places, the PI Web API server time zone will be used.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL
brevity and other special cases. Default is the value of the configuration item
"WebIDType".}
}
\value{
Plot values of the streams that meet the specified conditions.
}
\description{
Returns values of attributes for an element, event frame or attribute over the specified
time range suitable for plotting over the number of intervals (typically represents
pixels).
}
|
7465a7a03db5bb5c870d985974f3c171da39b973 | bd0c2f6797bb1bd130f6935b9650cc4cdb99a928 | /exercise01/census_logistic_v2.r | f4d1c36dc7c261f01805de5b4814a887e9d61328 | [] | no_license | rptaheri/exercises | c136ae57cf6ce8b215d8c271aadebb6b638f1ba4 | 93547977cb03169d9fb71947463d0aa0fe5b14e1 | refs/heads/master | 2020-02-26T13:29:37.139420 | 2015-01-26T23:56:48 | 2015-01-26T23:56:48 | 29,747,454 | 0 | 0 | null | 2015-01-23T18:49:25 | 2015-01-23T18:49:24 | null | UTF-8 | R | false | false | 3,076 | r | census_logistic_v2.r | library(gmodels)
library(caret)
library(pROC)
# read from CSV file
records = read.csv(records_log)
# recode ? values to missing
records$native_country[records$native_country == '?'] <- NA
records$occupation[records$occupation == '?'] <- NA
records$working_class[records$working_class == '?'] <- NA
# create new variable to indicate if country of origin is US, otherwise 0
# instead of country_of_origin
records$us_flg[records$native_country == 'United-States'] <- 1
records$us_flg[records$native_country != 'United-States'] <- 0
mosaicplot(table(records$working_class, records$over_50k))
table(records$working_class)
# logically group Never-worked and Without-pay to just Without-pay
records$working_class[records$working_class == 'Never-worked'] <- 'Without-pay'
table(records$working_class)
# re-factor categorical variables after removing question marks and setting missing values
records$occupation <- factor(records$occupation)
records$native_country <- factor(records$native_country)
records$us_flg <- factor(records$us_flg)
records$working_class <- factor(records$working_class)
inTrain = createDataPartition(records$over_50k, p=7/10, list=FALSE)
# split data into training (70%) and validation (30%)
train = records[inTrain,]
validate = records[-inTrain,]
# attach training dataset
attach(train)
logit <- glm(over_50k ~ age + capital_loss + capital_gain + education_num + hours_week + us_flg
+ marital_status + occupation + relationship + race +
gender + working_class, family=binomial(logit))
# run logistic regression - removed capital_gain due to linear separation problem
logit <- glm(over_50k ~ age + capital_loss + education_num + hours_week + us_flg
+ marital_status + occupation + relationship + race +
gender + working_class, family=binomial(logit))
summary(logit) # AIC 22,648
Concordance(logit) # concordance = 0.886
logit.roc <- roc(logit$y, logit$fitted)
logit.roc # roc = 0.8853
# removing race, all of the levels are insignificant
logit <- glm(over_50k ~ age + capital_loss + education_num + hours_week + us_flg
+ marital_status + occupation + relationship +
gender + working_class, family=binomial(logit))
summary(logit) # AIC 22,650
Concordance(logit) # concordance = 0.8859
logit.roc <- roc(logit$y, logit$fitted)
logit.roc # roc = 0.8852
plot(logit.roc)
detach(train)
# score validation set
score <- as.data.frame(predict(logit, validate, type="response"))
valid_score <- cbind(validate$over_50k,score)
# change names to make it easier to reference them
colnames(valid_score) <- c("over_50k", "predicted_prob")
# choose two cutoffs maximized for specificity
valid_score$cutoff50[valid_score$predicted_prob >= 0.5] <- 1
valid_score$cutoff50[valid_score$predicted_prob < 0.5] <- 0
valid_score$classification_50[valid_score$cutoff50 == valid_score$over_50k] <- 1
valid_score$classification_50[valid_score$cutoff50 != valid_score$over_50k] <- 0
colMeans(valid_score, na.rm = TRUE) # classification rate at 50% cutoff is 83.3% in validation
|
480ea0755048ddb4f83e7ca91b24e4c1597f3860 | d57b47733f71cafdf1af9dfacd2e8b859c3acdc0 | /R/gg_desc_lsmeans.R | 1b0bfa67129e4ad0c76153ddf4a7508194952300 | [] | no_license | jfrancoiscollin/ClinReport | 1d66270446f3621eff19552ee713558db7c92f0f | 445bac0b579b23eefb7ccb7753f9c695cd62df0c | refs/heads/master | 2021-07-03T08:29:50.477437 | 2020-09-16T08:47:55 | 2020-09-16T08:47:55 | 171,647,479 | 13 | 2 | null | 2019-02-27T09:00:06 | 2019-02-20T09:58:57 | R | UTF-8 | R | false | false | 4,681 | r | gg_desc_lsmeans.R | # TODO: Add comment
#
# Author: jfcollin
###############################################################################
#' Creates a ggplot object corresponding to a LS Means desc object
#'
#'
#' @param desc Desc object
#' @param title Character The title of the plot
#' @param ylim Numeric of length 2 for setting y axis limits
#' @param xlim Numeric of length 2 for setting x axis limits
#' @param xlab Character Label for x-axis
#' @param ylab Character Label for y-axis
#' @param legend.label Character Label for the legend (used only if x1 and x2 are not NULL in the desc object)
#' @param add.ci Logical. If TRUE it adds bars to the means representing 95\% CI
#' @param add.line Logical. If TRUE it joins the dots with a line (default to TRUE)
#'
#' @description
#' \code{gg_desc_lsmeans}
#' ggplot object is created. It is used internally in function \code{\link{plot.desc}}.
#' It's easier to use this last one.
#'
#' @details
#' It is used internally in function \code{\link{plot.desc}}.
#' It's easier to use this last one.
#'
#' @return
#' A ggplot object.
#'
#' @seealso \code{\link{plot.desc}} \code{\link{report.lsmeans}} \code{\link{gg_desc_quali}} \code{\link{gg_desc_quanti}}
#' @examples
#' \dontshow{
#'
#' library(nlme)
#' library(emmeans)
#'
#' data(datafake)
#' #Removing baseline data in the response, for the model
#'
#'data.mod=droplevels(datafake[datafake$TIMEPOINT!="D0",])
#'
#'mod3=lme(y_numeric~baseline+GROUP+TIMEPOINT+GROUP*TIMEPOINT,
#'random=~1|SUBJID,data=data.mod,na.action=na.omit)
#'
#'test3=emmeans(mod3,~GROUP|TIMEPOINT)
#'
#'tab.mod3=report.lsmeans(lsm=test3,at.row="TIMEPOINT")
#'
#'gg=ClinReport:::gg_desc_lsmeans(tab.mod3,title="LS Means plot example")
#'
#'
#'test4=emmeans(mod3,~GROUP)
#'tab.mod4=report.lsmeans(lsm=test4)
#'
#'gg=ClinReport:::gg_desc_lsmeans(tab.mod4,title="LS Means plot example")
#'
#'gg2=ClinReport:::gg_desc_lsmeans(tab.mod4,title="LS Means plot example",add.ci=TRUE)
#'
#' }
#'
#' @import ggplot2
gg_desc_lsmeans=function(desc,title="",ylim=NULL,xlim,xlab="",ylab="",
legend.label="Group",add.ci=F,add.line=T)
{
if(class(desc)!="desc") stop("\n desc should be a desc object")
if(desc$type.desc!="lsmeans") stop("This function should be used only for lsmeans desc object, see desc$type.desc")
x1=desc$x1
x2=desc$x2
stat=desc$raw.output
emmean="emmean"
contrast=desc$contrast
position = "identity"
if(!is.null(contrast))
{
if(contrast)
{
position = position_dodge(width=0.3)
if(!is.null(x1) & is.null(x2))
{
x2=x1
x1=desc$contrast.name
}
if(is.null(x1)) x1=desc$contrast.name
}
}
if(!is.null(x1) & !is.null(x2))
{
gg=ggplot(stat, aes_(y=as.name(emmean), x=as.name(x2),
group=as.name(x1),colour=as.name(x1))) +
geom_point(position=position)+
theme_bw()+
scale_colour_discrete(name=legend.label)+
theme(plot.background = element_rect(
colour = "black",
size = 1,
linetype = "solid"),legend.position="bottom",
title=element_text(size = 10),
axis.text.x=element_text(angle =45,hjust=1))+xlab("")+
ylab(ylab)+xlab(xlab)+
labs(title=title)
if(add.line)
{
gg=gg+geom_path()
}
}
if(!is.null(x1) & is.null(x2))
{
gg=ggplot(stat, aes_(y=as.name(emmean), x=as.name(x1))) +
geom_bar(stat="identity")+theme_bw()+
theme(plot.background = element_rect(
colour = "black",
size = 1,
linetype = "solid"),legend.position="bottom",
title=element_text(size = 10),
axis.text.x=element_text(angle =45,hjust=1))+xlab("")+
ylab(ylab)+
labs(title=title)
}
if(is.null(x1) & is.null(x2))
{
gg=ggplot(stat, aes_(y=as.name(emmean),x=1)) +
geom_bar(stat="identity")+theme_bw()+
theme(plot.background = element_rect(
colour = "black",
size = 1,
linetype = "solid"),legend.position="bottom",
title=element_text(size = 10),
axis.text.x=element_text(angle =45,hjust=1))+xlab("")+
ylab(ylab)+
labs(title=title)
}
if(add.ci)
{
if(!is.null(stat$lower.CL))
{
gg=gg+geom_errorbar(aes_(ymin =as.name("lower.CL"),
ymax = as.name("upper.CL")),width=0.15,position=position)
}
if(!is.null(stat$asymp.LCL))
{
gg=gg+geom_errorbar(aes_(ymin =as.name("asymp.LCL"),
ymax = as.name("asymp.UCL")),width=0.15,position=position)
}
}
if(!is.null(ylim))
{
gg=gg+ylim(ylim)
}
return(gg)
}
|
55c604e2cbaac0c65a60880628c5dae4aa92769a | f1172b6676c4f20034df5b4eca15dbc66c209c9b | /server.R | 97b32f7ce463f5fc9a2bef07a0a4b861e4aac322 | [
"MIT"
] | permissive | agbleze/GA | 7fabb91c15e1a692f416a252455e3ee2bb854c22 | b5dab8ac17227039980b1a9930b423e3a3816f99 | refs/heads/main | 2023-07-18T04:55:14.023131 | 2021-08-23T20:43:52 | 2021-08-23T20:43:52 | 399,218,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,533 | r | server.R | ### load libraries ######
#library(shiny)
library(readr)
library(forecast)
library(tidyverse)
library(ggplot2)
library(fpp2)
library(lubridate)
library(GGally)
library(dplyr)
library(magrittr)
library(labelled)
library(gtsummary)
library(bfast)
library(ggstatsplot)
library(googleVis)
library(formattable)
library(fontawesome)
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
############### Transaction usertype data ###########################
# GA_usertype_transformedData <- read_csv("~/GA_usertype_transformedData.csv")
# usertypedata_mean <- GA_usertype_transformedData%>%
# group_by(Year, Month)%>%
# summarise(across(everything(), mean))%>%
# ungroup()
#
######################## Transaction usertype data reactive ##########################################
usertype_data_reactive <- reactive({
transaction_year <- input$trans_year
transaction_month <- input$trans_month
usertypedata_mean%>%
filter(Year == transaction_year & Month == transaction_month)
})
###################### Transaction Exponential smoothing ##############################
### partition training and testing dataset
transactions_ts_train <- window(transactions_monthly_ts, end = c(2019, 10))
transaction_ts_test <- window(transactions_monthly_ts, start = c(2019, 11))
## Holt-Winters-- refiting model with optimal values for beta and gamma
ets_zzz_opt <- ets(transactions_ts_train[,4], beta = 0.342, gamma = 0.306, model = "ZZZ")
ets_zzz_opt_fort <- forecast(ets_zzz_opt)
#### fitting damped model --- the default moldel used was A,Ad,N
ets_zzz_damped <- ets(transactions_ts_train[,4], model = "ZZZ", damped = TRUE)
ets_zzz_fort_damped <- forecast(ets_zzz_damped)
##################################### full data ###############################################
full_data <- read_csv("~/full_data.csv")
full_data_ts <- ts(full_data, start = c(2014, 11), frequency = 12)
## Partition data into training and testing set
(train_full_data_forecast <- window(full_data_ts, start = c(2014, 11), end = c(2018, 6)))
(test_full_data_forecast <- window(full_data_ts, start = c(2018, 7)))
####################### full data reactive object ##########################################
full_data_reactive <- reactive({
year_select <- input$yearsel
month_select <- input$monthsel
full_data%>%
filter(Year == year_select & Month == month_select)
})
################# transaction data wrangling for timeseries analysis #######################
GA_transactions_monthly <- read_csv("GA_transactions_monthly.csv")
transactions_monthly <- GA_transactions_monthly
transactions_monthly <- transactions_monthly[, c(2, 3:5)]
transactions_monthly_ts <- ts(transactions_monthly, frequency = 12, start = c(2014, 11))
###################################### REVENUE UI #############################################
output$newuser_revenue <- renderValueBox({
revenue_selectyear <- input$revenue_selectyear
revenue_selectmonth <- input$revenue_selectmonth
newuser_revenue_display <- usertypedata_mean%>%
filter(Year == revenue_selectyear & Month == revenue_selectmonth)%>%
select(New_Users_Revenue)
valueBox(paste0("$", comma(newuser_revenue_display, digits = 2)),
paste0("New Users ( Monthly Average)" ), width = 6, icon = icon("funnel-dollar"))
})
output$returnuser_revenue <- renderValueBox({
revenue_selectyear <- input$revenue_selectyear
revenue_selectmonth <- input$revenue_selectmonth
returninguser_revenue_display <- usertypedata_mean%>%
filter(Year == revenue_selectyear & Month == revenue_selectmonth)%>%
select(Returning_Users_Revenue)
valueBox(paste0("$", comma(returninguser_revenue_display, digits = 2)),
subtitle = "Returning Users ( Monthly Average)", color = "aqua", width = 6, icon = icon("file-invoice-dollar"))
})
output$revenue_timeries <- renderPlot({
autoplot(full_data_ts[,16])+ ylab("Average Monthly Revenue") +
ggtitle("Timeseries of Average Mothly Revenue")
})
output$revenue_seasonality <- renderPlot({
(season_revenue_full_data <- ggseasonplot(full_data_ts[,16], year.labels = T,
year.label.left = T, year.label.right = T)+ ylab("Average Monthly Revenue") +
ggtitle(label = "Seasonal Timeseries plot of Average Monthly Revenue")
)
})
## revenue forecast with seasonal naive forecast
output$seasonal_forecast <- renderPlot({
h = as.numeric(input$revenue_horizon_forecast)
revenue_snaive <- snaive(train_full_data_forecast[,16], h = h)
autoplot(train_full_data_forecast[,16], series = "Data") +
autolayer(revenue_snaive, series = "Seasonal Naive", PI = T) + ylab("Average Monthly Revenue") +
guides(colour = guide_legend(title = "Legend", title.position = "top")) +
theme(legend.position = "bottom") + ggtitle("Forecast of revenue using on seasonal mean forecasting")
})
output$revenue_trendSeasonal_forecast <- renderPlot({
(fitfor <- tslm(full_data_ts[, 16] ~ trend + season))
forecast(fitfor) %>%
autoplot() + ylab("Average Monthly Revenue")
})
output$regress_model <- renderPlot({
ggcoefstats(
x = stats::lm(formula = log(full_data_ts[,16] + 1) ~ Avg_ECR + Avg_Users + Avg_bounce_rate + Avg_session_duration +
Avg_sessionPer_user + Avg_Pg_session, data = full_data_ts),
ggtheme = ggplot2::theme_gray(), # changing the default theme
title = "Regression analysis: Predicting the influence of various KPI on Revenue",
)
})
output$revenue_decompose <- renderPlot({
(revdecompose_mult <- decompose(full_data_ts[,16], type = "multiplicative") %>%
autoplot() + ylab("Average Monthly Revenue"))
})
output$revenue_stl_forecast <- renderPlot({
(rev_stlf_meth <- stlf(full_data_ts[,16]) %>%
autoplot() + ylab("Average Monthly Revenue"))
})
output$revenue_change_detect <- renderPlot({
(data_bfast <- bfast::bfast(full_data_ts[,16], decomp = "stl"))
plot(data_bfast)
})
output$revenue_bfastmonitor <- renderPlot({
(data_bfastmonitor <- bfastmonitor(full_data_ts[,16], start = c(20014, 11), plot = T))
})
###################### TRANSACTIONS UI ############################################
output$newuser_transactions <- renderValueBox({
newuser_transactions_display <- usertype_data_reactive()%>%
select(New_users_transactions)
valueBox(value = paste0(comma(newuser_transactions_display, digits = 2)),
subtitle = "New Users (Monthly Average)",
color = "yellow", icon = icon("hand-holding-usd"))
})
output$returnuser_transactions <- renderValueBox({
returnuser_transactions_display <- usertype_data_reactive()%>%
select(Returning_users_transactions)
valueBox(value = paste0(comma(returnuser_transactions_display, digits = 2)),
subtitle = "Returning Users (Monthly Average)", color = "yellow", icon = icon("comments-dollar"))
})
output$transaction_timeseries <- renderPlot({
autoplot(transactions_monthly_ts[,4]) + ylab("Average Monthly Transactions") +
ggtitle("Timeseries of Average Monthly Transactions")
})
output$transaction_decomp <- renderPlot({
(transaction_decomposition <- autoplot(decompose(transactions_monthly_ts[,4])))
})
output$transaction_seasonal <- renderPlot({
ggseasonplot(transactions_monthly_ts[,4], polar = T) + ggtitle("Seasonal Polar plot of Average Monthly Transactions")
})
output$transaction_autocorrelation <- renderPlot({
ggAcf(transactions_monthly_ts[,4]) + ggtitle("Autocorrelation of Average Monthly Transactions")
})
output$transaction_exponential <- renderPlot({
######### ploting timeseries with exponential smoothing and including damped
(autoplot(transactions_ts_train[,4], series = "Average Monthly Transactions data") +
autolayer(ets_zzz_opt_fort$mean, series = "ETS (A,N,N) forecast", PI = T) +
autolayer(ets_zzz_opt_fort$fitted, series = "ETS (A,N,N) fitted values") +
# autolayer(ets_zzz_damp_fort$fitted) +
autolayer(ets_zzz_fort_damped$mean, series = "ETS (A,Ad,N) damped forecast") + ylab("Average Monthly transactions") +
guides(colour = guide_legend(title = "Legend", title.position = "top")) +
theme(legend.position = "bottom") +
ggtitle("Forecasting with Exponential smoothing -- ETS(A,N,N), ETS(A,Ad,N)"))
})
output$ets_residuals <- renderPlot({
checkresiduals(ets_zzz_opt)
})
############################### Business question #########################################
output$correlation <- renderPlot({
as.data.frame(full_data_ts[, c(12, 13:19)])%>%
ggpairs()
})
######################## SCENARIO FORECASTING UI #####################################
output$scenario_forecast <- renderPlot({
#### regression model of log + 1 Avg_revenue against all predictors without Avg_session
### Model 4
model_tsall_log_butAvgsession <- tslm(log(full_data_ts[,16] + 1) ~ Avg_ECR + Avg_Users + Avg_bounce_rate + Avg_session_duration +
Avg_sessionPer_user + Avg_Pg_session, data = full_data_ts)
horizon_forecast = input$horizon_forecast
h = horizon_forecast
##### retrieve values for predictors for scenario forecasting
# determine if scenario is decrease or increase and convert decrease to negatives and increase to positives
ECR <- if(input$forecast_type_erc == "Decrease"){
-(input$erc_sce)
} else{
input$erc_sce
}
bounce_rate <- if(input$forecast_type_bounce_rate == "Decrease"){
-(input$bounce_rate_sce)
} else{
input$bounce_rate_sce
}
Users <- if(input$forecast_type_user == "Decrease"){
-(input$user_sce)
} else{
input$user_sce
}
session_duration <- if(input$forecast_type_session_dur == "Decrease"){
-(input$session_dur_sce)
} else{
input$session_dur_sce
}
sessionPer_user <- if(input$forecast_type_session_peruser == "Decrease"){
-(input$session_peruser_sce)
} else{
input$session_peruser_sce
}
Pg_session <- if(input$forecast_type_pages_persession == "Decrease"){
-(input$pages_persession_sce)
} else{
input$session_peruser_sce
}
new_data <- data.frame(
Avg_ECR = rep(ECR, h),
Avg_bounce_rate = rep(bounce_rate, h),
Avg_Users = rep(Users, h),
Avg_session_duration = rep(session_duration, h),
Avg_sessionPer_user = rep(sessionPer_user, h),
Avg_Pg_session = rep(Pg_session, h)
)
## FORECAST SCENARIO
# use model_tsall_log_butAvgsession to forecast based on scenario specified
fcast_scenario <- forecast(model_tsall_log_butAvgsession, newdata = new_data)
# convert forecast to dataframe
fcast_scenario_dataframe <- data.frame(fcast_scenario)
# backtransform forecast to get actual values of log
fcast_scenario_dataframe_backtransform <- fcast_scenario_dataframe%>%
# add column of backtransformed point forecast
mutate(backtransfrom_Point.forcast = exp(fcast_scenario_dataframe$Point.Forecast))
## convert forecast to time series
fcast_scenario_ts <- ts(fcast_scenario_dataframe_backtransform, start = c(2021, 8), frequency = 12)
# Plot timeseries of data
autoplot(full_data_ts[, 16], series = "Actual Average Monthly Revenue") +
# add plot of forecasted values
autolayer(fcast_scenario_ts[,6], series = "Forecasted Average Monthly Revenue", PI = TRUE) +
ylab("Average Monthly Revenue") +
ggtitle("Forecast of Average Monthly Revenue based on Scenario") +
guides(colour = guide_legend(title = "Legend", title.position = "top")) +
theme(legend.position = "bottom")
})
######################### WEB ANALYTICS UI ###########################################
output$gauge <- renderGvis({
avgsession <- full_data_reactive()%>%
select(Avg_session)
Name_col <- ""
avgsession_display <- cbind(Name_col, avgsession)
googleVis::gvisGauge(avgsession_display, options=list(min=0, max=5000, greenFrom=3000,
greenTo=5000, yellowFrom=1500, yellowTo=3000,
redFrom=0, redTo=1500))
})
output$gauge_users <- renderGvis({
Avgusers <- full_data_reactive()%>%
select(Avg_Users)
Name_col <- ""
avguser_display <- cbind(Name_col, Avgusers)
gvisGauge(avguser_display, options=list(min=0, max=5000, greenFrom=3000,
greenTo=5000, yellowFrom=1500, yellowTo=3000,
redFrom=0, redTo=1500))
})
output$gauge_Pg_session <- renderGvis({
Avgpgsession <- full_data_reactive()%>%
select(Avg_Pg_session)
Name_col <- ""
avgpgsession_display <- cbind(Name_col, Avgpgsession)
gvisGauge(avgpgsession_display, options=list(min=0, max=10, greenFrom=5,
greenTo=10, yellowFrom=3, yellowTo=6,
redFrom=0, redTo=3))
})
output$gauge_session_duration <- renderGvis({
Avgsession_duration <- full_data_reactive()%>%
select(Avg_session_duration)
Name_col <- ""
avgsession_duration_display <- cbind(Name_col, Avgsession_duration)
gvisGauge(avgsession_duration_display, options=list(min=0, max=300, greenFrom= 200,
greenTo= 300, yellowFrom=100, yellowTo= 200,
redFrom=0, redTo=100))
})
output$gauge_revenue <- renderGvis({
Avgrevenue <- full_data_reactive()%>%
select(Avg_revenue)
Name_col <- ""
avgrevenue_display <- cbind(Name_col, Avgrevenue)
gvisGauge(avgrevenue_display, options=list(min=0, max=20000, greenFrom=15000,
greenTo=20000, yellowFrom=7000, yellowTo=15000,
redFrom=0, redTo= 7000))
})
output$gauge_bouncerate <- renderGvis({
Avgbouncerate <- full_data_reactive()%>%
select(Avg_bounce_rate)
Name_col <- ""
avgbouncerate_display <- cbind(Name_col, Avgbouncerate)
gvisGauge(avgbouncerate_display, options=list(min=0, max=60, greenFrom= 40,
greenTo= 60, yellowFrom= 20, yellowTo= 40,
redFrom=0, redTo= 20))
})
output$gauge_sessionPer_user <- renderGvis({
Avgsessionper_user <- full_data_reactive()%>%
select(Avg_sessionPer_user)
Name_col <- ""
avgsessionper_user_display <- cbind(Name_col, Avgsessionper_user)
gvisGauge(avgsessionper_user_display, options=list(min=0, max=2, greenFrom= 1.2,
greenTo=2, yellowFrom=0.5, yellowTo= 1.2,
redFrom=0, redTo=0.5))
})
output$gauge_erc <- renderGvis({
Avgecr <- full_data_reactive()%>%
select(Avg_ECR)
Name_col <- ""
avgecr_display <- cbind(Name_col, Avgecr)
gvisGauge(avgecr_display, options=list(min=0, max= 5, greenFrom= 2.7,
greenTo= 5, yellowFrom= 1.0, yellowTo=2.7,
redFrom=0, redTo= 1.0))
})
################ ANIMATIONS #########
observe(addHoverAnim(session, "returnuser_revenue", "rubberBand"))
observe(addHoverAnim(session, "newuser_revenue", "rubberBand"))
observe(addHoverAnim(session, "revenue_timeries", "swing"))
# observe(addHoverAnim(session, "seas", "pulse"))
observe(addHoverAnim(session, "revenue_decompose", "pulse"))
observe(addHoverAnim(session, "revenue_bfastmonitor", "pulse"))
observe(addHoverAnim(session, "revenue_change_detect", "pulse"))
observe(addHoverAnim(session, "revenue_stl_forecast", "pulse"))
observe(addHoverAnim(session, "revenue_trendSeasonal_forecast", "pulse"))
observe(addHoverAnim(session, "revenue_seasonality", "pulse"))
observe(addHoverAnim(session, "revenue_seasonality", "pulse"))
observe(addHoverAnim(session, "regress_model", "pulse"))
observe(addHoverAnim(session, "seasonal_forecast", "pulse"))
})
|
a1c754930f438e8d9b82ac1965067616086a4ce9 | b62236109f1e8d01739e6cc8ec3d62fd046a1784 | /dataFromBook DataVISWithR/scripts/timeseries_seasonalsubseries_inc.r | bf8577190fd27b48c416b0266640c4935aa36154 | [] | no_license | shahidnawazkhan/Machine-Learning-Book | 049670a3b9d74b11e619428ee11468f08ffee595 | f152f36a8b7dbabe30d2c1b6de10788cbd1fa5e8 | refs/heads/master | 2023-08-22T02:48:33.039710 | 2021-10-19T14:59:49 | 2021-10-19T14:59:49 | 395,435,421 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 951 | r | timeseries_seasonalsubseries_inc.r | pdf_file<-"pdf/timeseries_seasonalsubseries_inc.pdf"
cairo_pdf(bg="grey98", pdf_file,width=8,height=8)
par(omi=c(1,0,1,0.5),mai=c(2,0.80,0,0.5),family="Lato Light",las=1)
# Import data and prepare chart
source("scripts/inc_datadesign_dbconnect.r")
sql<-"select left(week, 4) year, substr(week, 6, 2) month, avg(chicken_soup) chicken_soup from google_trends group by left(week, 4), substr(week, 6, 2) order by year, month"
myData<-dbGetQuery(con,sql)
attach(myData)
y<-ts(chicken_soup,frequency =12,start=c(2004,1))
# Create chart
monthplot(y,axes=F,box=F,type="h",lwd=3,col="darkred",ylab="Normalized Search Activity")
axis(2,col=par("bg"),col.ticks="grey81",lwd.ticks=0.5,tck=-0.025)
# Titling
mtext("Google trend for 'chicken soup'",3,line=2,adj=0,cex=2.0,family="Lato Black",outer=T)
mtext("Jan 2004 to Feb 2012",3,line=0,adj=0,cex=1.5,font=3,outer=T)
mtext("Source: www.google.com/trends",1,line=3,adj=1.0,cex=0.95,font=3,outer=T)
dev.off() |
f1e5c73cfc2df6b35df27478c937b78a2a4e975b | 3578f8aed6fd510830bf79aa4213c4d6c30f48cc | /man/fs_di.Rd | ec02dab632241092018d8176d9f65b7e23bf978e | [
"MIT"
] | permissive | diazrenata/sadspace | 01d772eff7be14acb9c89c6774f6aba25d3dbebd | e8a1f105320dd99a8af7f94aaa8b644298fc8f3c | refs/heads/master | 2021-01-03T21:27:03.940129 | 2020-04-26T18:49:57 | 2020-04-26T18:49:57 | 240,241,563 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 286 | rd | fs_di.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample_fs.R
\name{fs_di}
\alias{fs_di}
\title{Add diversity indices}
\usage{
fs_di(fs_samples)
}
\arguments{
\item{fs_samples}{fs df}
}
\value{
fs df summarized to dis
}
\description{
Add diversity indices
}
|
cc25698bb4a38e444c8a8a35d99c2e2fb9b05a26 | c0e397f20fc5070e859cf2fe181767549be790e3 | /man/Mesh.Rd | 9da5443186b60360726d5f520f2d0d3241deb9e4 | [] | no_license | mayunlong89/RISmed | 8c3ef9245105ce914d3203020022f3daa088ba24 | f61c84dc295681bb22d514d834ff4fa421cf7920 | refs/heads/master | 2023-08-19T08:04:07.559181 | 2021-10-03T06:53:26 | 2021-10-03T06:53:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 961 | rd | Mesh.Rd | \name{Mesh}
\alias{Mesh}
\title{
Extracts \code{Mesh} headings from \code{Medline} object.
}
\description{
Extractor for the \code{Mesh} headings of a \code{Medline} object.
}
\usage{
Mesh(object)
}
\arguments{
\item{object}{instance of class \code{Medline}}
}
\value{List by Pubmed article. Each list contains a data frame with \code{Heading} and \code{Type}. The \code{Heading} is a MeSH Term and \code{Type} is either a \code{Descriptor} or a \code{Qualifier} of a Descriptor term. Qualifiers of a Descriptor immediately follow the Descriptor term in the data frame. When MeSH headings have not been included with a MEDLINE record, the list will contain \code{NAs} (see details).
}
\seealso{\code{\link{Medline}}}
\details{In Process and publisher-supplied records lack MeSH terms. See the MeSH home page
\url{https://www.nlm.nih.gov/mesh/meshhome.html} for additional information about MeSH.
Note that more recent records may lack MeSH headings.
}
|
b28091f731e7400c8033f414034e3d184b0d90d3 | 3b4d121df48e0b09c3a8b7b95154a7ab2b29a40b | /softChange/pg/size.r | 4ccf8ceb9716669e0d2a37de11601a240f24b2c0 | [] | no_license | hackerlank/hacking | 998e11061584ca4acad56312d7dafad51de0722c | 873f30cf1489c7856842b1a861c817f0f1963ed4 | refs/heads/master | 2020-06-12T19:03:47.656164 | 2016-07-06T15:48:51 | 2016-07-06T15:48:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 570 | r | size.r | #!/usr/bin/Rscript
library(RdbiPgSQL)
conn <- dbConnect(PgSQL(), host="localhost", dbname="ruby", user="dmg", password="patito32")
#res <- dbSendQuery(conn, "select date, avg(churn) from (
#select extract(year from datecomm) * 12 + extract(month from datecomm) as date, sumadd-sumrem as churn from commitsum natural join metadata
#) as rip group by date;");
#
res <- dbSendQuery(conn, "select extract(year from datecomm) * 12 + extract(month from datecomm) as date, sumadd-sumrem as churn from commitsum natural join metadata ;");
bydate <- dbGetResult(res)
|
73951df60c9cbd86fb2ae46dabc605912560bcc9 | 1e4c9ffd2979c793edf1312b220abeecb3d1a25b | /man/computeMethylationProfile.Rd | aba16dd751b757992b15862f4d5432189e344e33 | [] | no_license | nrzabet/DMRcaller | e21e2919f191fbb9b5db4a8a91be15526c288c22 | 6191d9e68dee6e0a2daedc07485bdc939e84e14e | refs/heads/master | 2020-03-11T13:32:07.018510 | 2019-02-15T18:16:10 | 2019-02-15T18:16:10 | 130,027,757 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,209 | rd | computeMethylationProfile.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/profile.R
\name{computeMethylationProfile}
\alias{computeMethylationProfile}
\title{Compute methylation profile}
\usage{
computeMethylationProfile(methylationData, region,
windowSize = floor(width(region)/500), context = "CG")
}
\arguments{
\item{methylationData}{the methylation data stored as a \code{\link{GRanges}}
object with four metadata columns (see \code{\link{methylationDataList}}).}
\item{region}{a \code{\link{GRanges}} object with the regions where to
compute the DMRs.}
\item{windowSize}{a \code{numeric} value indicating the size of the window in
which methylation is averaged.}
\item{context}{the context in which the DMRs are computed (\code{"CG"},
\code{"CHG"} or \code{"CHH"}).}
}
\value{
a \code{\link{GRanges}} object with equal sized tiles of the
\code{region}. The object consists of the following metadata
\describe{
\item{sumReadsM}{the number of methylated reads.}
\item{sumReadsN}{the total number of reads.}
\item{Proportion}{the proportion of methylated reads.}
\item{context}{the context (\code{"CG"}, \code{"CHG"} or \code{"CHH"}).}
}
}
\description{
This function computes the low resolution profiles for the bisulfite
sequencing data.
}
\examples{
# load the methylation data
data(methylationDataList)
# the region where to compute the profile
region <- GRanges(seqnames = Rle("Chr3"), ranges = IRanges(1,1E6))
# compute low resolution profile in 20 Kb windows
lowResProfileWTCHH <- computeMethylationProfile(methylationDataList[["WT"]],
region, windowSize = 20000, context = "CHH")
\dontrun{
# compute low resolution profile in 10 Kb windows
lowResProfileWTCG <- computeMethylationProfile(methylationDataList[["WT"]],
region, windowSize = 10000, context = "CG")
lowResProfileMet13CG <- computeMethylationProfile(
methylationDataList[["met1-3"]], region,
windowSize = 10000, context = "CG")
}
}
\author{
Nicolae Radu Zabet and Jonathan Michael Foonlan Tsang
}
\seealso{
\code{\link{plotMethylationProfileFromData}},
\code{\link{plotMethylationProfile}}, \code{\link{methylationDataList}}
}
|
f8b3a4da6762e08fd8c26dfff0b0e874b540f184 | 4f6639f66fc05559d38b46483cd26cdd1037d0f1 | /r/rproject2hrace/src/env.r | 8abde899d2dc310c59c82c4ec57ea34c7e7a6279 | [] | no_license | renc/coding_exercises | fb1bda9f4951ea7501dcdf253e82e35257c26352 | 2e531f548c92e066a473c7685be24776ff85196c | refs/heads/master | 2023-09-01T19:38:11.319703 | 2023-08-28T13:22:46 | 2023-08-28T13:22:46 | 16,608,891 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 498 | r | env.r |
row.win <- 50
start.date <- 50
period <- 1
finishing.position.ix <- 1
horse.ix <- 4
jockey.ix <- 5
trainer.ix <- 6
load.ix <- 7
weight.ix <- 8
interval.ix <- 15
odd.ix <- 16
race.ix <- 19
roll.back.horse.interval.1 <- 100 #300 # renc, original code use 300/200/100 but should be 100/50/20 from the comments
roll.back.horse.interval.2 <- 50 #200
roll.back.horse.interval.3 <- 20 #100
roll.back.jockey.interval.1 <- 100
roll.back.jockey.interval.2 <- 50
roll.back.jockey.interval.3 <- 20
|
dbc241d84c33c3f7490caeca43e00689790667d7 | c03726589af4d711b9d010e742ff5285ec42a76d | /plot.mc.C.r | 70d5887a6f67d9431eca05797de300774c9cc126 | [] | no_license | EmilyKlein/KPFM2 | abdda7d656104aa92628605478845f0ed27862ba | 7bcc660332d4f02a2dba828dfeee7025cd3b84da | refs/heads/master | 2022-01-31T15:40:35.618126 | 2022-01-18T22:14:37 | 2022-01-18T22:14:37 | 225,436,222 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,522 | r | plot.mc.C.r | plot.mc.C<-function(mc.object, quants = c(0.2, 0.5, 0.8), plot.trials = TRUE, annual.plots = TRUE, connector = "sum", page.layout = c(4, 4), Cylimits = c(0, 3), color.tags = NULL)
{
# auxiliary function to plot abundances from Monte Carlo trials
# George Watters
# code last edited 19 July 2006
#
if(!is.null(mc.object$color.tags)&&max(mc.object$color.tags>15)){
stop("FAULT -- software not designed to deal with plotting more than 15 colors in a single panel.\nIf you're sure you want to do this we can easily edit the color table.")
}
if(!is.null(quants)&&length(quants) > 3){stop("FAULT: Sorry, you can only plot 3 quantiles.")}
#
OPTION6 <- FALSE
if(!is.null(mc.object$call$fishing.option) && mc.object$call$fishing.option==6){OPTION6<-TRUE}
#
ntrials <- mc.object$setup$ntrials
nssmus <- mc.object$setup$nssmus
nyears <- mc.object$setup$nyears
nseasons <- mc.object$setup$nseasons
ntimes <- mc.object$setup$ntimes
#
# get the desired data as determined by the arguments annual.plots and connector
# then standardize these data as appropriate
#
if(annual.plots){
season.vector <- rep(1:nseasons, length.out = ntimes)
year.vector <- rep(1:(ntimes/nseasons),each = nseasons)
time.label <- "year"
if(is.character(connector)){
plot.time <- unique(year.vector)
tt.data2 <- array(0,dim=c(length(unique(year.vector)),nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- as.vector(tapply(mc.object$fishery$allocation[,i,j],list(year.vector),sum))
tt.y <- as.vector(tapply(mc.object$fishery$catch[,i,j],list(year.vector),sum))
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "sumC/sumAC"
}
if(is.numeric(connector)){
# first check that the connector is a feasible season
if(connector > nseasons){stop("FAULT: connector season > nseasons")}
keepers <- (season.vector == connector)
plot.time <- year.vector[keepers]
tt.data2 <- array(0,dim=c(length(unique(year.vector)),nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- mc.object$fishery$allocation[(1:ntimes)[keepers],i,j]
tt.y <- mc.object$fishery$catch[(1:ntimes)[keepers],i,j]
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- paste("C",connector,"/AC",connector,sep="")
}
}
else {
plot.time <- 1:ntimes
tt.data2 <- array(0,dim=c(ntimes,nssmus,ntrials))
for(j in 1:ntrials){
tt.denom <- mc.object$fishery$allocation[,,j]
tt.y <- mc.object$fishery$catch[,,j]
tt.data2[,,j] <- tt.y/tt.denom
}
title.prefix <- "C/AC"
time.label<-"season"
}
tt.data2[is.nan(tt.data2)]<-NA
#
# now compute quantiles
if(is.null(quants)){
quants <- rep(NA, 3)
plot.trials <- TRUE
}
if(!is.na(quants[2])) {
ttmed <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = T)
}
, prob = quants[2])
}
if(!is.na(quants[1])) {
ttlow <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = T)
}
, prob = quants[1])
}
if(!is.na(quants[3])) {
tthigh <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = T)
}
, prob = quants[3])
}
title.suffix <- paste("quantiles = ", deparse(quants), sep="")
title.string <- paste(title.prefix, title.suffix, sep = " -- ")
red.width <- ifelse(plot.trials,2,1)
#
# set up the color table
# now actually turn the color.tags into colors that are interpretable by the plot functions
# black, blue, green, yellow, magenta, orange, cyan, lightgoldenrod, blueviolet, springgreen, gray47, aquamarine3, orange4, purple, yellow4
if(!is.null(mc.object$color.tags)){
tt.colors <- colors()[c(24,26,254,652,450,498,68,410,31,610,200,11,502,547,656)]
tt.colors <- tt.colors[match(mc.object$color.tags,1:15)]
}
else{
tt.colors <- rep("black", ntrials)
}
#
# now do the plotting
windows()
origpar <- par(no.readonly=TRUE)
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
panel.count <- 1
left.col.panels <- seq(from=1,to=page.layout[1]*page.layout[2],by=page.layout[2])
bottom.panels <- (1:(page.layout[1]*page.layout[2]))[max(left.col.panels):((page.layout[1]*page.layout[2]))]
for(i in 1:nssmus) {
if(panel.count > (page.layout[1] * page.layout[2])) {
panel.count <- 1
par(origpar)
windows()
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
}
if(is.element(panel.count,left.col.panels)){ylabel<-"relative catch"}else{ylabel<-""}
if(is.element(panel.count,bottom.panels)){xlabel<-time.label}else{xlabel<-""}
if(!all(is.na(tt.data2[, i, 1]))) {
plot(plot.time, tt.data2[, i, 1], type = "n", ylim = Cylimits, ylab = ylabel,
xlab = xlabel,axes=FALSE)
box()
axis(1, cex.axis=0.8)
axis(2, cex.axis=0.8)
if(plot.trials){
for(j in 1:ntrials) {
lines(plot.time, tt.data2[, i, j], col = tt.colors[j])
if(OPTION6){points(plot.time, tt.data2[, i, j], col = tt.colors[j],pch=16,cex=0.75)}
}
}
if(!is.na(quants[2])){
lines(plot.time, ttmed[, i], col = "red", lwd = red.width, lty = 1)
if(OPTION6){points(plot.time, ttmed[, i], col = "red", pch=16, cex=0.75)}
}
if(!is.na(quants[1])) {
lines(plot.time, ttlow[, i], col = "red", lwd = red.width, lty = 2)
if(OPTION6){points(plot.time, ttlow[, i], col = "red", pch=16, cex=0.75)}
}
if(!is.na(quants[3])) {
lines(plot.time, tthigh[, i], col = "red", lwd = red.width, lty = 2)
if(OPTION6){points(plot.time, tthigh[, i], col = "red", pch=16, cex=0.75)}
}
}
else {
plot(range(plot.time), Cylimits, type = "n", ylab = ylabel, xlab = xlabel,axes=FALSE)
box()
axis(1, cex.axis=0.8)
axis(2, cex.axis=0.8)
}
title(main=paste("SSMU ", i, sep = ""), line = 0.5, outer = FALSE, cex.main = 0.9)
panel.count <- panel.count + 1
if(panel.count > (page.layout[1] * page.layout[2])) {
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
}
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
|
767fc3b11c4d18b4b26878c6840b508c517dbef2 | d1525206f07392821e1c6611f8c70d3dbf686c7a | /man/perms.Rd | 3361562fb357091dd992a76208364188c4609c74 | [
"MIT"
] | permissive | mattwarkentin/sandbox | 8319143142f5069050d45275cb89a944111194da | 7b0cff18583b2ff14e9ce57373205ddf37a80c4b | refs/heads/master | 2020-04-18T09:41:49.185540 | 2020-01-24T21:14:43 | 2020-01-24T21:14:43 | 167,443,475 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,559 | rd | perms.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/perm-test-funs.R
\name{perms}
\alias{perms}
\title{Permutation Resampling}
\usage{
perms(
data = NULL,
...,
strata = NULL,
times = 25,
apparent = FALSE,
seed = NULL
)
}
\arguments{
\item{data}{A data frame.}
\item{...}{Column names in \code{data} to permute/shuffle; or one of the \code{\link[tidyselect]{select_helpers}}.}
\item{strata}{A discrete varible for stratified permutations.}
\item{times}{Number of permutations.}
\item{apparent}{A \code{logical}. Should a copy of the input \code{data} be returned?}
\item{seed}{A numeric value used to set the RNG seed for reproducible permutations.}
}
\value{
A data frame (\code{\link{tibble}}) where each row is a permuted version of the input data. The returned data frame has the added class \code{perms} which can be used by the \code{summary} generic for S3 methods dispatch.
}
\description{
A function for generating permuted datasets; where one can permute as many columns as desired. Stratified (i.e. group-based) shuffling can be achieved by specifying a column name for the \code{strata} argument. See details for a more complete description and guidance on usage.
}
\details{
This function was motivated by the \code{rsample} package which allows straightforward implementation of several common resampling methods (e.g. boostrap, K-fold crossvalidation). While the internal mechanisms of this function are quite different, the goal is to provide a function that works like rsample for permuted data. This function works well with the pipe. See \code{\link{magrittr}} for more details.
After using \code{perms}, one can compute permutation-based P-values or other statistics using any function, including custom functions, in a concise manner. The syntax and usage of this function is motivated by the \code{tidy eval} principles. Thus, you specify both the names of the columns to permute and the stratitfying variable as bare column names, not quoted names. The default number of permutations is aligned with the default number of bootstraps for \code{rsample::bootstraps}.
This function allows for easy integration with \code{\link[purrr]{map}} functions for functional programming. See the examples for a use-case. Also, consider the using \code{\link[furrr]{future_map}} equivalents for parallel computations.
}
\examples{
iris \%>\%
perms(Sepal.Length)
iris \%>\%
perms(Sepal.Width, Sepal.Length) \%>\%
dplyr::mutate(cor = purrr::map_dbl(data, ~with(., cor(Sepal.Width, Sepal.Length))))
}
|
802f58da4a714c24202cc3702e7309b8354ccd28 | fe41c9601e66865451ee464e937a1d3bfe3f507e | /cachematrix.R | 731e4a6067d9dfca583c1ad583ecca24a14b8bd5 | [] | no_license | ecrvanberkel/ProgrammingAssignment2 | 0e88e94c8a8d6da45bcfa153579dc50d56a287d3 | bacc43c04f1567439c58fe1064a1afeb1ab9d95d | refs/heads/master | 2021-01-12T20:56:01.678618 | 2014-05-21T11:54:22 | 2014-05-21T11:54:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,103 | r | cachematrix.R | ## These functions are able to compute the inverse of a matrics and
## store it to cache. The next time an inverse is to be computed,
## it is first checked whether the inverse already exists and it is
## only recomputed if the inverse does not yet exist.
## In makeCacheMatrix, a list is created which is used for inserting
## and outputting a matrix and its corresponding inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # set m equal to zero in the function environment
## Define the set function
set <- function(y) {
x <<- y # set x equal to y in the parent environment
m <<- NULL # set m equal to NULL in the parent environment
}
## Define the get function
get <- function() x
## Define the setinverse function which takes the solve function as
## its argument and sets 'm' equal to 'solve' in the parent environment
setinverse <- function(solve) m <<- solve
## Define the getinverse function
getinverse <- function() m
## List the outcomes
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function chechks whether the inverse of a matrix
## already exists in cache. If so, it loads the results.
## If the inverse does not exist yet, it is computed.
cacheSolve <- function(x, ...) {
# set m equal to the inverse matrix if it exists
m <- x$getinverse()
# If m is not NULL, than use the cached data
if(!is.null(m)) {
message("getting cached data")
return(m) # returns 'm' and makes sure that
# the rest of the function is not
# evaluated
}
# if m equals NULL than the inverse is to be calculated
data <- x$get() # assign the matrix to the variable 'data'
m <- solve(data, ...) # calculate the inverse matrix and
# assign it to 'm'
x$setinverse(m) # assign the resulting matrix 'm' to
# the 'setinverse' element of 'x'
m # return 'm'
}
|
48078d6436ae3a1a6b6c18978ade5feffb684a3d | 984852db807b748035294db8843d570a336ca2b2 | /Lab/Lab2.2.R | d9c20697e82be4b78146a53cabc63bd90f77f21c | [] | no_license | xiaoyaoyang/Applied-Data-Science | f6502b5bcfe0f08f5b87ddfe63c4082cea661c0e | 4031345ca5e82bbb9902fcc40aac0c70737afccd | refs/heads/master | 2021-01-02T22:45:45.150783 | 2014-10-27T20:50:13 | 2014-10-27T20:50:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 213 | r | Lab2.2.R | \documentclass{article}
\begin{document}
@BOOK{barberBRML2012,
author = {Barber, D.},
title= {{Bayesian Reasoning and Machine Learning}},
publisher = {{Cambridge University Press}},
year = 2012}
\end{document} |
b2db90a48f3576483b4248806b3ce0304a8f9cd2 | 9721b23133259c14b670297ea36a6e55a6040e1b | /man/ia_cast.Rd | 5e7471aae392cabde5038d5159284ba028a623d4 | [] | no_license | ryantimpe/ipfitr | 8261d959cb0e79f746af0ec3ba00fdf213229442 | 272448090af95c63092471b7d01997db041cf3e3 | refs/heads/master | 2021-10-12T00:04:45.603694 | 2019-01-30T21:26:45 | 2019-01-30T21:26:45 | 103,159,300 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,307 | rd | ia_cast.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ia_cast.R
\name{ia_cast}
\alias{ia_cast}
\title{Use one vector to fore- or backcast another vector}
\usage{
ia_cast(target, cast_source, cast_source_metric = "level",
direction = "forward", base_index = NULL)
}
\arguments{
\item{target}{Vector of values to be fore- or backcast.}
\item{cast_source}{Vector of values or growth rates to be applied to \code{target} as a cast. \code{cast_source[1]} aligns with \code{target[1]}. Must be same length as \code{target}}
\item{cast_source_metric}{'level' to apply calculated from rates from \code{cast_source}. 'growth_rate' to use \code{cast_source[1]} as is.}
\item{direction}{Use 'forward' to forecast from \code{base_index} or 'backward' to backcast from \code{base_index}, preserving original values in opposite direction. Use 'both' to cast in both directions from \code{base_index}.}
\item{base_index}{Optional index to begin cast in \code{target}. Otherwise first/last non-NA value will be used}
}
\value{
An array of same length as \code{target} with backcast values.
}
\description{
Use one vector to fore- or backcast another vector
}
\examples{
ia_cast(c(10:20, rep(NA, 5)), c(1:16))
ia_cast(c(rep(NA, 5), 10:20), c(1:16), direction = "backward", base_index = 10)
}
|
83ea8f293cf27b8e1e674892773b42a73fb640dd | a5e49e9b3e7892ce476bab528cde3f686d5a5e3d | /R/reg.5Plot.R | 7b7da712898a89ec36facc0aec8b938897fe59d2 | [] | no_license | cran/lessR | a7af34480e88c5b9bf102ab45fa6464a22ffbe3b | 562f60e6688622d8b8cede7f8d73d790d0b55e27 | refs/heads/master | 2023-05-29T07:57:09.544619 | 2023-05-14T20:20:02 | 2023-05-14T20:20:02 | 17,697,039 | 6 | 3 | null | null | null | null | UTF-8 | R | false | false | 10,554 | r | reg.5Plot.R | .reg5Plot <-
function(lm.out, n_res_rows=NULL, n_pred_rows=NULL,
scatter_coef=FALSE, X1_new=NULL, ancova,
numeric.all, in.data.frame, c.int, p.int, plot_errors=FALSE,
digits_d, n_cat, pdf=FALSE, width=5, height=5, manage.gr=FALSE,
quiet, ...) {
nm <- all.vars(lm.out$terms) # names of vars in the model
n.vars <- length(nm)
n.pred <- n.vars - 1L
n.obs <- nrow(lm.out$model)
n.keep <- nrow(lm.out$model)
b0 <- lm.out$coefficients[1]
b1 <- lm.out$coefficients[2]
if (is.null(n_pred_rows))
n_pred_rows <- ifelse (n.keep < 25, n.keep, 4)
if (n_pred_rows == "all") n_pred_rows <- n.keep # no preds with n_pred_rows=0
# pdf graphics option
if (pdf) {
pdf_file <- "RegScatterplot.pdf"
if (n.pred > 1) pdf_file <- "RegScatterMatrix.pdf"
pdf(file=pdf_file, width=width, height=height)
}
# keep track of the plot in this routine
plt.i <- 0L
plt.title <- character(length=0)
x.cat <- 0
x.cont <- 0
do.sp <- ifelse (n.pred < 2, TRUE, FALSE)
if (ancova) {
if (is.numeric(lm.out$model[,nm[2]]) && is.factor(lm.out$model[,nm[3]])) {
x.cat <- 3
x.cont <- 2
do.sp <- TRUE
}
if (is.numeric(lm.out$model[,nm[3]]) && is.factor(lm.out$model[,nm[2]])) {
x.cat <- 2
x.cont <- 3
do.sp <- TRUE
}
plot_errors <- FALSE
lvl <- levels(lm.out$model[,nm[x.cat]])
}
txeqs <- NULL
# ----------------------------------------------------
# scatterplot, if one (or no) pred variables or ancova
if (do.sp) {
if (n.pred %in% 1:2) {
if (!ancova)
x.values <- lm.out$model[,nm[2]]
else
x.values <- lm.out$model[,nm[x.cont]]
}
else if (n.pred == 0) { # null model
x.values <- 1:n.obs
nm[2] <- "Index"
x.lab <- nm[2]
}
y.values <- lm.out$model[,nm[1]]
do.predint <- ifelse (n_pred_rows==0 || !is.null(X1_new) || is.null(p.int)
|| ancova, FALSE, TRUE)
if (n.pred > 0)
if (is.factor(lm.out$model[,nm[2]])) do.predint <- FALSE
# title
if (!do.predint || !is.numeric(x.values)) {
ctitle <- "Scatterplot"
if (is.numeric(x.values)) {
if (n.pred == 0)
ctitle <- paste(ctitle, "and Null Model")
else
ctitle <- paste(ctitle, "and Least-Squares Line")
if (ancova)
ctitle <- paste(ctitle, "s", sep="")
}
else if (is.factor(x.values) && n.pred==1 && nlevels(x.values)==2) {
ctitle <- paste(ctitle, "and Least-Squares Line")
}
y.min <- min(lm.out$model[,nm[1]])
y.max <- max(lm.out$model[,nm[1]])
}
else {
ctitle <- "Reg Line, Confidence & Prediction Intervals"
y.min <- min(p.int$lwr)
y.max <- max(max(p.int$upr), max(lm.out$model[,nm[1]]) )
}
plt.i <- plt.i + 1L
plt.title[plt.i] <- gsub(pattern="\n", replacement=" ", x=ctitle)
# scale for regular R or RStudio
axis_cex <- 0.76
radius <- 0.22
adj <- .RSadj(radius, axis_cex, lab_cex=getOption("lab_cex"))
radius <- adj$radius
size.lab <- getOption("lab_cex")
cex.txt <- getOption("axis_cex")
# size of points
size.pt <- ifelse (.Platform$OS == "windows", 0.85, 0.70)
# set margins
max.width <- strwidth(as.character(max(pretty(y.values))), units="inches")
margs <- .plt.marg(max.width, y.lab=nm[1], x.lab=nm[2], main=NULL, sub=NULL)
lm <- margs$lm
tm <- margs$tm
rm <- margs$rm
bm <- margs$bm
if (ancova) {
big.nm <- max(nchar(lvl))
if (big.nm > 6) rm <- rm + (.05 * (big.nm - 6))
rm <- rm + .30 + (.65 * getOption("axis_cex")) # better if axis_y_cex
}
par(bg=getOption("window_fill"))
orig.params <- par(no.readonly=TRUE)
on.exit(par(orig.params))
par(mai=c(bm, lm, tm, rm))
plot(x.values, y.values, type="n", axes=FALSE, ann=FALSE)
usr <- par("usr")
if (is.factor(x.values)) {
x.lvl <- levels(x.values)
axT1 <- 1:length(x.lvl) # mark category values
}
else {
x.lvl <- NULL
axT1 <- axTicks(1) # else numeric, so all the ticks
}
.plt.bck(usr, axT1, axTicks(2))
.axes(x.lvl, NULL, axT1, axTicks(2))
theme <- getOption("theme")
if (!ancova) {
.axlabs(x.lab=nm[2], y.lab=nm[1], main.lab=NULL, sub.lab=NULL)
fill <- getOption("pt_fill")
color <- getOption("pt_color")
}
else {
.axlabs(x.lab=nm[x.cont], y.lab=nm[1], main.lab=NULL, sub.lab=NULL)
clr <- .get_fill(theme)
fill <- getColors(clr, n=length(lvl))
color <- getColors(clr, n=length(lvl))
}
# Plot legend for ancova
if (ancova) {
pts_trans <- 0
shp <- 21
fill_bg <- "transparent"
options(byname = nm[x.cat])
.plt.by.legend(lvl, color, fill, shp, pts_trans, fill_bg, usr)
}
# Plot points
# -----------
ux <- length(unique(x.values))
uy <- length(unique(y.values))
n_cat <- 10
discrete <- ifelse (ux>n_cat && uy>n_cat || !.is.integer(x.values) ||
!.is.integer(y.values), FALSE, TRUE)
if (!discrete) {
n.iter <- ifelse(ancova, nlevels(lm.out$model[,nm[x.cat]]), 1)
for (i in 1:n.iter) { # iter > 1 only for ancova, levels of cat var
if (!ancova)
ind <- 1:length(x.values)
else
ind <- which(lm.out$model[,nm[x.cat]] == lvl[i])
points(x.values[ind], y.values[ind],
pch=21, col=color[i], bg=fill[i], cex=size.pt)
} # end 1:n.iter
} # end !discrete
if (discrete) { # bubble plot, not for ancova
mytbl <- table(x.values, y.values) # get the counts, all x-y combinations
n.count <- nrow(mytbl) * ncol(mytbl)
count <- integer(length=n.count)
# melt the table of counts to a data frame with xx, yy, count
xx <- integer(length=n.count)
yy <- integer(length=n.count)
k <- 0
for (i in 1:nrow(mytbl)) {
for (j in 1:ncol(mytbl)) {
if (mytbl[i,j] != 0) { # 0 plots to a single pixel, so remove
k <- k + 1
count[k] <- mytbl[i,j]
xx[k] <- as.numeric(rownames(mytbl)[i]) # rownames are factors
yy[k] <- as.numeric(colnames(mytbl)[j])
}
}
}
cords <- data.frame(xx, yy, count)
power <- 0.6
sz <- cords[,3]**power # radius unscaled
radius <- 0.18
symbols(cords$xx, cords$yy, circles=sz, inches=radius,
bg=.maketrans(fill, 110), fg=color, add=TRUE, ...)
q.ind <- 1:nrow(cords) # all bubbles get text
for (i in 1:nrow(cords)) if (cords[i,3] < 5) cords[i,3] <- NA
text(cords[q.ind,1], cords[q.ind,2], cords[q.ind,3], cex=0.8)
} # end bubble plot
# Plot Line
# ---------
if (n.pred == 0) {
m <- lm.out$coefficients[1] # mean of Y
mv <- rep(m, n.obs)
names(mv) <- NULL
lines(x.values, mv, lwd=0.75)
}
else if (n.pred == 1) {
if (!is.factor(x.values)) {
abline(b0, b1, col=getOption("segment_color"), lwd=1)
}
else if (nlevels(x.values)==2) {
y0 <- b0 + (b1*0)
y1 <- b0 + (b1*1)
abline(v=1, col="gray60", lwd=.5)
abline(v=2, col="gray60", lwd=.5)
abline(h=y0, col="gray60", lwd=.5)
abline(h=y1, col="gray60", lwd=.5)
segments(y0=y0, y1=y1, x0=1, x1=2, col="black", lwd=1.5)
}
}
else if (ancova) {
coefs <- lm.out$coefficients
n.lvl <- nlevels(lm.out$model[,nm[x.cat]])
b.cont <- ifelse (x.cont == 2, coefs[2], coefs[1+n.lvl])
if (x.cat == 2)
b.cat <- coefs[2:n.lvl]
else
b.cat <- coefs[3:(1+(n.lvl))]
tx <- character(length = 0)
for (i.coef in 0:length(b.cat)) {
if (i.coef == 0)
b00 <- b0
else
b00 <- b0 + b.cat[i.coef]
abline(b00, b.cont, col=fill[i.coef+1], lwd=1.5)
tx[length(tx)+1] <- paste("Level ",lvl[i.coef+1], ": y^_", nm[1],
" = ", .fmt(b00, digits_d), " + ", .fmt(b.cont, digits_d),
"(x_", nm[x.cont], ")", sep="")
}
txeqs <- tx
}
# Plot Errors
# -----------
if (plot_errors) {
theme <- getOption("theme")
red <- rgb(130,40,35, maxColorValue=255)
pe.clr <- ifelse (theme %in% c("gray", "white"), "gray58", red)
segments(y0=lm.out$fitted.values, y1=lm.out$model[,1],
x0=x.values, x1=x.values, col=pe.clr, lwd=1)
}
# Plot Intervals
# --------------
if (!is.factor(x.values) && do.predint) {
col.ci <- getOption("segment_color")
col.pi <- "gray30"
lines(x.values, c.int$lwr, col=col.ci, lwd=0.75)
lines(x.values, c.int$upr, col=col.ci, lwd=0.75)
lines(x.values, p.int$lwr, col=col.pi, lwd=1)
lines(x.values, p.int$upr, col=col.pi, lwd=1)
len <- length(x.values)
xx <- c( c(x.values[1],x.values,x.values[len]),
rev(c(x.values[1],x.values,x.values[len])) )
yy <- c( c(min(c.int$upr),c.int$upr,min(c.int$upr)),
rev(c(min(c.int$lwr),c.int$lwr,min(c.int$lwr))) )
polygon(xx, yy, col=getOption("se_fill"), border="transparent")
yy <- c( c(min(p.int$upr),p.int$upr,min(p.int$upr)),
rev(c(min(p.int$lwr),p.int$lwr,min(p.int$lwr))) )
polygon(xx, yy, col=getOption("ellipse_fill"), border="transparent")
}
} # end do.sp, a single scatterplot
else { # scatterplot matrix for multiple regression
if (numeric.all && in.data.frame) {
plt.i <- plt.i + 1L
plt.title[plt.i] <- "ScatterPlot Matrix"
panel_fill <- getOption("panel_fill")
window_fill <- getOption("window_fill")
bckg <- ifelse(panel_fill=="transparent", window_fill, panel_fill)
.plt.mat(lm.out$model[c(nm)], fit="lm", col.bg=bckg,
pt.size=TRUE, size.miss=TRUE)
}
else if (!quiet) {
cat("\n>>> No scatterplot matrix reported because not all variables are ")
if (!in.data.frame) cat("in the data frame.\n")
if (!numeric.all) cat("numeric.\n")
if (dev.cur() > 1) dev.off() # 1 is the null device
}
}
if (pdf) {
if (dev.cur() > 1) {
dev.off()
if (n.pred==1 || ancova)
.showfile(pdf_file, "scatterplot")
else
.showfile(pdf_file, "scatterplot matrix")
cat("\n\n")
}
}
# just generated plot
return(invisible(list(i=plt.i, ttl=plt.title, txeqs=txeqs,
cat=nm[x.cat], cont=nm[x.cont])))
}
|
bbab6ccfa17d77d4964f563cccf1b6b359caff88 | 42289239f68be6a947a917bb88cd77443cc4e762 | /functions/create.energy.time.sequence.R | 3ed63807bfb75bb71db070df286ea854eb512023 | [] | no_license | Ness2/ecogenie | 7802e8f8b2227ea5ccffbe56b25a1aefdf6c031d | 004bf5d61294e82d0d8c8d3bd590f26e5ff3dadf | refs/heads/master | 2021-04-06T08:21:09.876443 | 2018-05-01T13:56:50 | 2018-05-01T13:56:50 | 125,255,976 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,331 | r | create.energy.time.sequence.R | #####################################################################################
#
# create energy time sequence
# - for energy-years
# - start of energy-year is on April 1st
# - end of energy-year is on March 31st
#
#####################################################################################
create.energy.time.sequence <- function(yearStart = "2012", yearEnd = "2017", monthStart = "04", monthEnd = "03",
dayStart = "01", dayEnd = "31", hourStart = "00", hourEnd = "23",
minStart = "00", minEnd = "59", secondStart = "00", secondEnd = "59",
by = "min", format = "%Y-%m-%d %H:%M", tz = "UTC",
...) { # WARNING: by = "sec" may cause computation errors
yearIndex <- as.numeric(yearStart):as.numeric(yearEnd) # these are the energy-year starting years
tmp.list.1 <- list(length = length(yearIndex)) # create temporary list for list names
tmp.list.2 <- list(length = length(yearIndex)) # create a list of energy-years
pb <- txtProgressBar(min = 0, max = length(yearIndex), style = 3) # this is the progress bar
for (i in 1:length(yearIndex)) { # this for loop creates the time sequences of the energy-years
yearIndexEnd <- yearIndex[[i]] + 1 # an energy-year ends in the year after the starting year
tmp.list.1[[i]] <- paste("Y", yearIndex[[i]], yearIndexEnd, sep = "_") # energy-year names
tmp.list.2[[i]] <- data.frame(iteration_stamp = seq(as.POSIXct(x = paste(paste(yearIndex[[i]], monthStart,
dayStart, sep = "-"),
paste(hourStart, minStart,
secondStart, sep = ":"),
sep = " "),
tz = tz), # start of sequence
as.POSIXct(x = paste(paste(yearIndexEnd, monthEnd,
dayEnd, sep = "-"),
paste(hourEnd, minEnd,
secondEnd, sep = ":"),
sep = " "),
tz = tz), # end of sequence
by = by) %>%
format.POSIXct(format = format, # this can be used to truncate times
tz = tz) %>% # time zone
as.POSIXct(tz = tz)) # reformat to POSIXct standard date-time format
setTxtProgressBar(pb, i) # set progress bar position
}
close(pb) # close progress bar
names(tmp.list.2) <- tmp.list.1 # give names to list
tmp.list.2 # output your created energy time sequence
} # end of function
|
6b24d8107868bfe8ef77574c168895b090d569f5 | 461669c8e2b2aaeb5823dcf07e0e6a0bfe38c77d | /exemplos/09-exemplo-arvore-pt1.R | 6b17f5b099139d8da28e74f99679d81545fe2f2c | [
"MIT"
] | permissive | curso-r/202104-intro-ml | aba675b6af96bd9efb74b29fdb7d8df33de08050 | 8971b4bdfc6da9a9fc4424fab07dd9090643ce1b | refs/heads/master | 2023-04-29T06:20:19.317100 | 2021-05-21T01:28:06 | 2021-05-21T01:28:06 | 315,781,009 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 724 | r | 09-exemplo-arvore-pt1.R | library(tidymodels)
library(tidyverse)
library(rpart)
library(rpart.plot)
dados <- tribble(
~Pressão, ~Glicose, ~Diabetes,
"hipertensao" ,92 , "nao",
'normal' ,130 , "sim",
"normal" ,130 , "nao",
"normal" ,55 , "nao",
"hipertensao" ,220 , "sim",
"normal" ,195 , "sim"
) %>%
mutate(
Diabetes = as.factor(Diabetes)
)
diabetes_tree_model <- decision_tree(
min_n = 1,
cost_complexity = -1,
tree_depth = 20
) %>%
set_mode("classification") %>%
set_engine("rpart")
credit_tree_fit <- fit(
diabetes_tree_model,
Diabetes ~.,
data = dados
)
rpart.plot(credit_tree_fit$fit, roundint=FALSE, cex = 4)
cp <- as.data.frame(credit_tree_fit$fit$cptable)
cp
|
9122a3527b7132a0c85ac7bc5bac0d7d0b9f26c3 | dd9631162faded93831096c8459c9133a3a4a507 | /man/furrr_options.Rd | 2973a5e96745e5d55c898b07f432d11c515ce2aa | [
"MIT"
] | permissive | DavisVaughan/furrr | c6eae607aaad6b1fcb33d2f9812f893ddad9bc19 | aa124bc06d9fc3e18ea047cb90d270ebeb8d9698 | refs/heads/main | 2023-08-26T09:58:43.261373 | 2022-08-15T19:35:39 | 2022-08-15T19:35:39 | 129,414,651 | 698 | 44 | NOASSERTION | 2023-01-17T03:37:54 | 2018-04-13T14:38:15 | R | UTF-8 | R | false | true | 5,857 | rd | furrr_options.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/furrr-options.R
\name{furrr_options}
\alias{furrr_options}
\title{Options to fine tune furrr}
\usage{
furrr_options(
...,
stdout = TRUE,
conditions = "condition",
globals = TRUE,
packages = NULL,
seed = FALSE,
scheduling = 1,
chunk_size = NULL,
prefix = NULL
)
}
\arguments{
\item{...}{These dots are reserved for future extensibility and must
be empty.}
\item{stdout}{A logical.
\itemize{
\item If \code{TRUE}, standard output of the underlying futures is relayed as soon
as possible.
\item If \code{FALSE}, output is silenced by sinking it to the null device.
}}
\item{conditions}{A character string of conditions classes to be relayed.
The default is to relay all conditions, including messages and warnings.
Errors are always relayed. To not relay any conditions (besides errors),
use \code{conditions = character()}. To selectively ignore specific classes,
use \code{conditions = structure("condition", exclude = "message")}.}
\item{globals}{A logical, a character vector, a named list, or \code{NULL} for
controlling how globals are handled. For details, see the
\verb{Global variables} section below.}
\item{packages}{A character vector, or \code{NULL}. If supplied, this specifies
packages that are guaranteed to be attached in the R environment where the
future is evaluated.}
\item{seed}{A logical, an integer of length \code{1} or \code{7}, a list of
\code{length(.x)} with pre-generated random seeds, or \code{NULL}. For details, see
the \verb{Reproducible random number generation (RNG)} section below.}
\item{scheduling}{A single integer, logical, or \code{Inf}. This argument
controls the average number of futures ("chunks") per worker.
\itemize{
\item If \code{0}, then a single future is used to process all elements of \code{.x}.
\item If \code{1} or \code{TRUE}, then one future per worker is used.
\item If \code{2}, then each worker will process two futures (provided there
are enough elements in \code{.x}).
\item If \code{Inf} or \code{FALSE}, then one future per element of \code{.x} is used.
}
This argument is only used if \code{chunk_size} is \code{NULL}.}
\item{chunk_size}{A single integer, \code{Inf}, or \code{NULL}. This argument
controls the average number of elements per future (\code{"chunk"}). If \code{Inf},
then all elements are processed in a single future. If \code{NULL}, then
\code{scheduling} is used instead to determine how \code{.x} is chunked.}
\item{prefix}{A single character string, or \code{NULL}. If a character string,
then each future is assigned a label as \code{{prefix}-{chunk-id}}. If \code{NULL},
no labels are used.}
}
\description{
These options fine tune furrr functions, such as \code{\link[=future_map]{future_map()}}. They
are either used by furrr directly, or are passed on to \code{\link[future:future]{future::future()}}.
}
\section{Global variables}{
\code{globals} controls how globals are identified, similar to the \code{globals}
argument of \code{\link[future:future]{future::future()}}. Since all function calls use the same set of
globals, furrr gathers globals upfront (once), which is more efficient than
if it was done for each future independently.
\itemize{
\item If \code{TRUE} or \code{NULL}, then globals are automatically identified and
gathered.
\item If a character vector of names is specified, then those globals are
gathered.
\item If a named list, then those globals are used as is.
\item In all cases, \code{.f} and any \code{...} arguments are automatically passed as
globals to each future created, as they are always needed.
}
}
\section{Reproducible random number generation (RNG)}{
Unless \code{seed = FALSE}, furrr functions are guaranteed to generate
the exact same sequence of random numbers \emph{given the same initial
seed / RNG state} regardless of the type of futures and scheduling
("chunking") strategy.
Setting \code{seed = NULL} is equivalent to \code{seed = FALSE}, except that the
\code{future.rng.onMisuse} option is not consulted to potentially monitor the
future for faulty random number usage. See the \code{seed} argument of
\code{\link[future:future]{future::future()}} for more details.
RNG reproducibility is achieved by pre-generating the random seeds for all
iterations (over \code{.x}) by using L'Ecuyer-CMRG RNG streams. In each
iteration, these seeds are set before calling \code{.f(.x[[i]], ...)}.
\emph{Note, for large \code{length(.x)} this may introduce a large overhead.}
A fixed \code{seed} may be given as an integer vector, either as a full
L'Ecuyer-CMRG RNG seed of length \code{7}, or as a seed of length \code{1} that
will be used to generate a full L'Ecuyer-CMRG seed.
If \code{seed = TRUE}, then \code{.Random.seed} is returned if it holds a
L'Ecuyer-CMRG RNG seed, otherwise one is created randomly.
If \code{seed = NA}, a L'Ecuyer-CMRG RNG seed is randomly created.
If none of the function calls \code{.f(.x[[i]], ...)} use random number
generation, then \code{seed = FALSE} may be used.
In addition to the above, it is possible to specify a pre-generated
sequence of RNG seeds as a list such that \code{length(seed) == length(.x)} and
where each element is an integer seed that can be assigned to \code{.Random.seed}.
Use this alternative with caution. \emph{Note that \code{as.list(seq_along(.x))} is
not a valid set of such \code{.Random.seed} values.}
In all cases but \code{seed = FALSE}, after a furrr function returns, the RNG
state of the calling R process is guaranteed to be "forwarded one step" from
the RNG state before the call. This is true regardless of the future
strategy / scheduling used. This is done in order to guarantee that an R
script calling \code{future_map()} multiple times should be numerically
reproducible given the same initial seed.
}
\examples{
furrr_options()
}
|
dc03a193ce003e40707826ef3f1d7b7e2f7186c0 | 8f21792a43ea9d59275e2afdef59dbcc0cec1410 | /EJERCICIO CALCULO PRIMA DAÑOS PROPIOS manuel_delpino.R | 0f11ab080e4bd07ab94b9f8832e423c5845dd20f | [] | no_license | Manueldelpino/Pricing-y-Tarificacion | 8d026d6600fdd032c28964b61215c4c5f59d6195 | f534205dc8c059abb4879073421b3b7e380e7585 | refs/heads/master | 2020-06-23T15:41:04.369984 | 2019-07-24T15:55:11 | 2019-07-24T15:55:11 | 198,666,805 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,964 | r | EJERCICIO CALCULO PRIMA DAÑOS PROPIOS manuel_delpino.R | ### EJERCICIO CALCULO PRIMA DAÑOS PROPIOS - Manuel del Pino Guerrero
##### 1. Bloque de inicializacion de librerías #####
if(!require("zoo")){
install.packages("zoo")
library("zoo")
}
if(!require("caTools")){
install.packages("caTools")
library("caTools")
}
if(!require("ROCR")){
install.packages("ROCR")
library("ROCR")
}
if(!require("dplyr")){
install.packages("dplyr")
library("dplyr")
}
## -------------------------------------------------------------------------
##### 2. Bloque de parámetros y datasets iniciales necesarios para la práctica #####
setwd("C:/Users/Knowhow/Desktop/CUNEF/PRICING Y TARIFICACION")
# fichero de polizas
Polizas=read.csv2('Policy_v1.csv')
# fichero de polizas
Polizas2=read.csv2('Policy_v2.csv')
# fichero de siniestros
Siniestros = read.csv2('Claims_v1.csv')
str(Siniestros)
summary(Siniestros)
hist(Siniestros$Costes)
svd(Siniestros$Costes)
table(Siniestros$TipoSin)
##### 4. Análisis descriptivo de Pólizas #####
str(Polizas2)
summary(Polizas2)
hist(Polizas$Valor)
hist(Polizas$Potencia)
hist(Polizas$Peso)
table(Polizas$Forma_Pago)
table(Polizas$Antiguedad)
table(Polizas$Sexo)
table(Polizas$Edad,Polizas$Comb) #donde Comb es Combustible
#Polizas$start_date2<-as.Date(Polizas$start_date)
#Polizas$Expuestos=diff.Date(Polizas$end_date,Polizas$start_date)/365
# 5. Procedo ahora a realizar el proceso de cálculo de la prima para Own Damage ("Daños Propios")
SINI_RCC=Siniestros[Siniestros$TipoSin %in% c("Daños Propios"),]
Costes=aggregate(SINI_RCC$Costes, by = list(SINI_RCC$ID_POL), mean) #Aquí le agrego los costes
Numero=aggregate(sign(SINI_RCC$Costes), by = list(SINI_RCC$ID_POL), sum)
Costes <- data.frame(ID_POL=Costes$Group.1, Costes=Costes$x)
Numer <- data.frame(ID_POL=Numero$Group.1,Nsini=Numero$x)
SINI_RCC=merge(SINI_RCC,Costes)
SINI_RCC=merge(SINI_RCC,Numer)
summary(SINI_RCC) # Ya podemos ver agregados los datos de Costes más frecuencia o Número de Siniestros.
hist(SINI_RCC$Costes)
# 6. Modelo Corp Damage (Modelo de COSTE DE SINIESTROS)
RCC2 <- merge(SINI_RCC, Polizas2, by = "ID_POL")
RCC2 <- RCC2 %>% filter(Costes>0)
modelo2_C1=glm(Costes~Edad_FMT,data=RCC2,family=Gamma)
summary(modelo2_C1)
modelo2_C4=glm(Costes~Carnet_FMT+Forma_Pago,data=RCC2,
family=Gamma)
summary(modelo2_C4)
Polizas2 <- Polizas2 %>% mutate(PredCorpDamage=predict(modelo2_C1,newdata = Polizas2,type = "response"))
# 7. Modelo NÚMERO DE SINIESTROS
RCCF <- merge(Polizas2,SINI_RCC, by = "ID_POL",all.x=TRUE)
RCCF[is.na(RCCF$Nsini),"Nsini"]<-0
RCCF <- RCCF %>% filter(Nsini>=0)
summary(RCCF)
summary(SINI_RCC)
ModeloN_C1=glm(Nsini~Edad_FMT+Valor_FMT+Sexo+Comb+
Potencia_FMT+Peso_FMT+Bonus_RC,data=RCCF,
family=poisson(link = "log"))
summary(ModeloN_C1)
Polizas2 <- Polizas2 %>% mutate(PredCorpDamageFRQ=predict(ModeloN_C1,newdata = Polizas2,type = "response"))
Polizas2 <- Polizas2 %>% mutate(Prima=(PredCorpDamage * PredCorpDamageFRQ))
summary(Polizas2$Prima)
## 8. MODELO CÁLCULO PRIMA DAÑOS PROPIOS (La variable target o Intercept va a ser Prima en este caso para el Modelo GLM)
ModeloN_prm=glm(Prima~Edad_FMT+Valor_FMT+Sexo+Comb+
Potencia_FMT+Peso_FMT+Bonus_RC,data=Polizas2,
family=Gamma(link = "log"))
Polizas2 <- Polizas2 %>% mutate(PredPrimaDaños=predict(ModeloN_prm,newdata = Polizas2,type = "response"))
summary(ModeloN_prm)
summary(Polizas2)
# Una vez creados los modelos para costes, siniestros y daños propios, vemos que se han creado las columnas
## Prima y PredPrimaDaños en la tabla polizas2, las cuales he denominado así para llevar a cabo los cálculos que se requieren en la presente práctica.
## Posteriormente, hacemos un filtrado en dicho DataFrame polizas2 con los valores determinados para así calcular los valores de Prima de Daños. |
675e39e159692c509f63b4b24701cd4067206452 | 97558a08d71b43814c2e8884d40cd13fed3efab3 | /R/show_vinagre.R | a3925b2541e584ca6f1b23342120d529b06f1375 | [
"MIT"
] | permissive | sambold/bagR | 3cde66501497d3da08209d34d4cfdc19c9ea5a2f | eba539bd5aedcb766921a00183592d3d196d8f18 | refs/heads/master | 2020-09-25T08:25:55.671750 | 2020-09-08T20:45:36 | 2020-09-08T20:45:36 | 225,961,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 283 | r | show_vinagre.R | #' show_vinagre:
#'
#' @description oeffnet Vinagre
#' @param vinagre_port Numeric, gibt Port an auf dem Vinagre-Instanz laeuft
#' @export
#'
show_vinagre <- function(vinagre_port=5901){
# show browser (password: secret)
system(glue::glue("vinagre 127.0.0.1:{vinagre_port}"))
} |
9323ad63ef3223825472a0f19ef59e74f8488a8e | 246154d146ecfd6b6dd579200a8b65c4eba6b7ba | /man/unadj.Rd | f575a0061386bac85f3dcc81c5f8873c1f609420 | [] | no_license | cran/subtee | 310c2d481959e69237401ef303ecf0720e7d0247 | 92c86b0a4334580fb5010a78701d566b5c735080 | refs/heads/master | 2022-05-07T11:55:45.240047 | 2022-03-22T13:10:05 | 2022-03-22T13:10:05 | 162,720,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,914 | rd | unadj.Rd | \name{unadj}
\alias{unadj}
\title{
Treatment effect estimation based on marginal subgroup models.
}
\description{
Unadjusted estimation of treatment effects in subgroups.
Fits separate (marginal) models for each candidate subgroup, i.e. including
the subgroup as a main effect and interaction with treatment for each model.
}
\usage{
unadj(resp, trt, subgr, covars = NULL, data,
fitfunc = c("lm", "glm", "glm.nb", "survreg", "coxph", "rlm"),
event, exposure, level = 0.1, ...)
}
\arguments{
\item{resp}{
Character giving the name of the response variable. The variable can be
either defined in the global environment or in the data-set \code{data}
specified below. For interactive use it is also possible to use unquoted
names (i.e. \code{unadj(resp,...)} instead of \code{unadj("resp",...)}),
avoid this for non-interactive use of the function.
}
\item{trt}{
Character giving the name of the treatment variable. The variable can
be either defined in the global environment or in the data-set
\code{data} specified below. Note that the treatment variable itself
needs to be defined as a numeric variable, with control coded as 0, and
treatment coded as 1. For interactive use it is also possible to use unquoted
names (as for the resp argument. see above).
}
\item{subgr}{
Character vector giving the variable names in \code{data} to use as
subgroup identifiers. Note that the subgroup variables in \code{data}
need to be numeric 0-1 variables.
}
\item{covars}{
Formula, specifying additional (prognostic) covariates to be included
in the models (need to be available in \code{data}). It is crucial for
the model averaging approach to include the important prognostic
covariates (in particular if the corresponding prognostic covariate
also defines a subgroup; otherwise models/subgroup might get
upweighted just because the variable has prognostic value, but not
because the treatment effect is modified).
}
\item{data}{
Data frame containing the variables referenced in \code{resp},
\code{trt}, \code{subgr} and \code{covars}
(and possibly \code{event} and \code{exposure}).
}
\item{fitfunc}{
Model fitting functions. Currrently one of \code{'lm'}, \code{'glm'},
\code{'glm.nb'}, \code{'survreg'}, \code{'coxph'} or \code{'rlm'}.
}
\item{event}{
Character giving the name of the event variable. Has to be specified
when using fit functions \code{'survreg'} and \code{'coxph'}. The variable can be
either defined in the global environment or in the data-set \code{data}.
}
\item{exposure}{
Character giving the name of the exposure variable, needed for
negative binomial regression, when using fit functions
\code{'glm.nb'}. This is typically the time each patient is exposed to the drug.
The fitted model uses the call
\code{glm.nb(.~.+offset(log(exposure)))}. The variable
needs to be defined either in the global environment or in the data-set \code{data}.
}
\item{level}{
Significance level for confidence intervals will be calculated for
treatment effect estimates.
}
\item{\dots}{
other arguments passed to the model fitting function.
}
}
\details{
In the simple linear case (e.g when using fitfunc \code{\link{lm}}) for each of the \eqn{P} candidate subgroups the fitted model is of the form
\deqn{M_p : y_i \sim N(\mu_i^{(p)}, \sigma_p^2), i=1,...,n}{M_p : y_i ~ N(\mu_i^(p), \sigma_p^2), i=1,...,n}
where \deqn{\mu_i^{(p)} =
\alpha_p + \beta_p z_i +
(\gamma_p + \delta_p z_i) s_{pi} + \sum_{k = 1}^{K} \tau_k x_{ik}
}
where \eqn{s_i} denotes the subgroup indicators (the column vectors of \code{subgr}), \eqn{z_i} is the treatment indicator (from \code{trt}) and \eqn{x{.1}, ..., x{.K}} are additional covariates as specified in \code{covars}.
For other fitting functions the models are of similar form, including prognostic and predictive effects of subgroups.
A treatment effect (on the scale determined by \code{fitfunc}) for the candidate subgroups is estimated as \eqn{\hat{\beta} + \hat{\delta_p}} and a treatment effect estimate for the complement is given by \eqn{\hat{\beta}}.
Note that choosing subgroups based on these unadjusted treatment effect estimates may lead to overoptimistic conclusions in regards to the treatment effect in that subgroup. Naive estimates do not consider model
selection uncertainty and will often suffer from selection bias.
}
\value{
A list (object of class \code{subtee}). The most important entries are (i) \code{fitmods} containing all
fitted subgroup models and the overall model (ii) \code{trtEff}
containing the treatment effect estimates and CI for subgroup and subgroup
complements. (iii) \code{trtEffDiff} containing the differences in
treatment effect estimates (subgroup vs complement) and CI.
}
\references{
Ballarini, N. Thomas, M., Rosenkranz, K. and Bornkamp, B. (2021) "{subtee}: An {R} Package for Subgroup Treatment Effect Estimation in Clinical Trials"
Journal of Statistical Software, 99, 14, 1-17,
doi: 10.18637/jss.v099.i14
Thomas, M., and Bornkamp, B. (2017) "Comparing Approaches to Treatment
Effect Estimation for Subgroups in Early Phase Clinical Trials."
Statistics in Biopharmaceutical Research, 9, 160-171,
doi: 10.1080/19466315.2016.1251490
Bornkamp, B., Ohlssen, D., Magnusson, B. P., and Schmidli, H. (2017)
"Model averaging for treatment effect estimation in subgroups."
Pharmaceutical Statistics, 16, 133-142,
doi: 10.1002/pst.1796
Raftery, A. E. (1995) "Bayesian model selection in social research."
Sociological Methodology, 25, 111-163.
}
\seealso{
\code{\link{summary.subtee}}, \code{\link{plot.subtee}},
\code{\link{lm}}, \code{\link{glm}}, \code{\link{glm.nb}},
\code{\link{survreg}}, \code{\link{coxph}}
}
\examples{
## toy example calls using the simulated datnorm data-set without
## treatment and subgroup effect, see ?datnorm for details
data(datnorm)
head(datnorm)
## first need to create candidate subgroups (if not already defined in data-set)
## here generate candidate subgroups manually (need to be numeric 0-1 variables)
groups <- data.frame(labvalL.5=as.numeric(datnorm$labvalue < 0.5),
regUS=as.numeric(datnorm$region == "US"),
hgtL175=as.numeric(datnorm$height < 175))
fitdat <- cbind(datnorm, groups) # bind subgroup variables to main data
## subgroups of interest
subgr <- c("labvalL.5", "regUS", "hgtL175")
res <- unadj(resp = "y", trt = "treat", subgr = subgr, data = fitdat,
covars = ~ x1 + x2, fitfunc = "lm")
summary(res)
plot(res)
## generate candidate subgroups using the subbuild function
## semi-automatically i.e. some groups specified directly (height and
## smoker), for region and labvalue subbuild generates subgroups (see
## ?subbuild).
cand.groups <- subbuild(datnorm, height < 175, smoker == 1, region, labvalue)
head(cand.groups)
fitdat <- cbind(datnorm, cand.groups)
subgr <- colnames(cand.groups)
res <- unadj(resp = "y", trt = "treat", subgr = subgr, data = fitdat,
covars = ~ x1 + x2, fitfunc = "lm")
summary(res)
plot(res)
## toy example call for binary data on simulated datbin data-set
data(datbin)
cand.groups <- subbuild(datbin, height < 175, smoker == 1, region, labvalue)
fitdat <- cbind(datbin, cand.groups)
subgr <- colnames(cand.groups)
res <- unadj(resp = "y", trt = "treat", subgr = subgr, data = fitdat,
covars = ~ x1 + x2, fitfunc = "glm",
family = binomial(link = "logit"))
## scale of the treatment effect estimate: difference on log-odds scale
summary(res)
plot(res)
## toy example call for parametric and semi-parametric survival data on
## datsurv data-set
data(datsurv)
cand.groups <- subbuild(datsurv, height < 175, smoker == 1, region, labvalue)
fitdat <- cbind(datsurv, cand.groups)
subgr <- colnames(cand.groups)
res.survreg <- unadj(resp = "y", trt = "treat", subgr = subgr, data = fitdat,
covars = ~ x1 + x2,
fitfunc = "survreg", event = "event", dist = "exponential")
## parametric survival model (here exponential distribution)
## scale of treatment effect estimate: log scale (see ?survreg for details)
summary(res.survreg)
plot(res.survreg)
res.cox <- unadj(resp = "y", trt = "treat", subgr = subgr, data = fitdat,
covars = ~ x1 + x2, fitfunc = "coxph", event = "event")
## scale of treatment effect estimate: difference in log-hazard rate
summary(res.cox)
plot(res.cox)
## toy example call overdispersed count data on datcount data-set
data(datcount)
cand.groups <- subbuild(datcount, height < 175, smoker == 1, region, labvalue)
fitdat <- cbind(datcount, cand.groups)
subgr <- colnames(cand.groups)
res <- unadj(resp = "y", trt = "treat", subgr = subgr, data = fitdat,
covars = ~ x1 + x2, fitfunc = "glm.nb", exposure = "exposure")
## scale of treatment effect estimate: difference on log scale
summary(res)
plot(res)
}
\keyword{ models}
|
bc82435a5d2d094ea1d23c7131d432a966358b06 | b4aa624f520353afa3c82111d274b7d68b0c112a | /ARIMA model code.R | 9ae6dc87374d0acd9ab81983ea349f770e191673 | [] | no_license | jiaxijiaxi410/time-series-Rcode | b5eefa856d71d3dc59f7624254a0fa1d73e0f2d5 | d0f34b790c2e0ce10d1f219797ce881c59d52374 | refs/heads/main | 2023-04-04T22:27:32.478136 | 2021-04-03T19:10:06 | 2021-04-03T19:10:06 | 354,373,228 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,057 | r | ARIMA model code.R | rm(all)
library(forecast)
##We will R to simulate and plot some data from simple ARIMA models.
##Use the following R code to generate data from an AR(1) model with ϕ1=0.6ϕ1=0.6 and σ2=1.
#The process starts with y0=0.
y <- ts(numeric(500))
e <- rnorm(500)
for(i in 2:500)
y[i] <- 0.6*y[i-1] + e[i]
#Produce a time plot for the series.
plot(y)
acf(y)
pacf(y)
# How does the plot change as you change ϕ1?
y2 <- ts(numeric(500))
for(i in 2:500)
y2[i] <- 0.9*y2[i-1] + e[i]
plot(y2)
acf(y2)
pacf(y2)
#compare the two graphs
par(mfrow=c(2,1))
plot(y)
plot(y2)
#Write your own code to generate data from an MA(1) model with θ1=0.6 and σ2=1.
y3 <- ts(numeric(500))
for(i in 2:500)
y3[i] <- 0.6*e[i-1] + e[i]
#Produce a time plot for the series. How does the plot change as you change θ1?
plot(y3)
y4 <- ts(numeric(500))
for(i in 2:500)
y4[i] <- 0.95*e[i-1] + e[i]
par(mfrow=c(2,1))
plot(y3)
plot(y4)
#Generate data from an ARMA(1,1) model with ϕ1 = 0.6 and θ1=0.6 and σ2=1.
y5 <- ts(numeric(500))
for(i in 2:500)
y5[i] <- 0.6*y5[i-1]+0.6*e[i-1] + e[i]
#Generate data from an AR(2) model with ϕ1=0.3 and ϕ2=-0.8 and σ2=1. (Note that these parameters will give a non-stationary series.)
y6 <- ts(numeric(500))
for(i in 3:500)
y6[i] <- 0.3*y6[i-1]-0.8*y6[i-2] + e[i]
#Graph the latter two series and compare them.
par(mfrow=c(2,1))
plot(y5)
plot(y6)
#We can try to fit an AR(1) and check the coefficients
fit<-arima(y,order=c(1,0,0))
summary(fit)
#Now try fitting an AR(2) on y. How does it perform?
#the auto.arima command uses an algorithm to pin down the best arima model.
auto.arima(y)
#Try auto.arima on y2-y6.
#Look at the residuals
#The ACF plot of the residuals from the fit model shows all correlations within the threshold limits
#indicating that the residuals are behaving like white noise.
res<-residuals(fit)
acf(res)
pacf(res)
#A portmanteau test returns a large p-value, also suggesting the residuals are white noise.
Box.test(res)
#forecast
plot(forecast(fit, h=20, level=c(80, 95) ))
|
e8c2525f37aedb916a1ef9d4f4242e4ffa430347 | eac759ea418d8522b239cd420039c5047f34b546 | /R/size.comparing_probs.mcnemar.R | 1971646138403dc5d39c6f97ae1d2064cbf526f1 | [] | no_license | cran/OPDOE | 1dac02018283dcbe1ad0cd8b85736baf930718f4 | cc4f73c76e7a3655ddbc7fc8be069d150182f15d | refs/heads/master | 2021-05-15T01:55:50.474710 | 2018-03-17T21:49:18 | 2018-03-17T21:49:18 | 17,681,194 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 550 | r | size.comparing_probs.mcnemar.R | size.comparing_probs.mcnemar <-
function(p1=NULL, p2=NULL, delta=NULL,
alpha, beta,
alternative=c("two.sided","one.sided")){
if ((is.null(p1))&(is.null(p2)))
{h <- p1 <- 0.5}
if (is.null(p2))
{p2 <- p1-delta
h <- p1+p2-2*p1*p2}
if(alternative == "two.sided") {q1 <- qnorm(1-alpha/2)}
else {q1 <- qnorm(1-alpha)}
q2 <- qnorm(1-beta)
if ((!is.null(p1))&(!is.null(p2)))
{h <- p1+p2-2*p1*p2
delta <- p1-p2}
n <- (q1*h+q2*sqrt(h^2-(3+h)*delta^2/4))/(h*delta^2)
n <- ceiling(n)
return(n)
}
|
2642dd0eace03b73b66c9ad0bcfcfa6915280b09 | 6b04bdd75e32f2c290638b067f8c58ec1bdbe4a0 | /mvn_bernoulli_AntMAN.R | 76f1c7eaba263a635e5c1f7bef9876e09c1061c2 | [] | no_license | prisong97/AntMAN_submission | 3f3bc910cc82e10cabd983db23d907ebf58ae3e0 | c732dc771ad4f2a9857c3f0b7b21a6aacb55eb88 | refs/heads/master | 2023-03-27T19:15:37.375061 | 2021-04-04T03:21:35 | 2021-04-04T03:21:35 | 354,310,913 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,002 | r | mvn_bernoulli_AntMAN.R | ########################################################
library("devtools")
load_all('/Users/Priscilla/Desktop/antman/AntMAN/AntMAN')
# load external file
source("run_experiment_AntMAN.R")
########################################################
#load the data
data(carcinoma, package="AntMAN")
y_mvb <- carcinoma
n <- dim(y_mvb)[1]
d <- dim(y_mvb)[2]
# specify the kernel and priors
mixture_mvb_params <- AM_mix_hyperparams_multiber(a0=rep(1,d),b0= rep(1,d))
weights_prior <- AM_mix_weights_prior_gamma(init=5, a=1, b=1)
components_prior <- AM_mix_components_prior_negbin(R=1, init_P=0.1,a_P=1,b_P =1)
# specify the MCMC parameters
mcmc_params <- AM_mcmc_parameters(niter=255000, burnin=5000, thin=50,
verbose=1)
# run multivariate bernoulli model
fit <- run_benchmarks("group", y_mvb, initial_clustering_ = NULL, mixture_mvb_params, components_prior, weights_prior, mcmc_params, thinning_interval = 50, no_of_iterations = 10, to_plot = FALSE, is_lca = TRUE) |
14cd0091380639808db14486260aa86ad8a661c5 | daa8a987518a0251c4788c8ccc707b5d059f371e | /R/font.R | 3671f20ced86a62dea8e820b8f98a08e70de1770 | [] | no_license | ajrominger/sfiR | e74de40aa0028ba4d5962022ff869f27f066fd7b | e0b4e9757cc809ff7eca6157fb2a9a6f76555821 | refs/heads/master | 2020-03-19T00:50:56.958574 | 2018-06-01T23:19:54 | 2018-06-01T23:19:54 | 135,505,481 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 249 | r | font.R | extrafont::ttf_import(pattern = 'cmunbmo.ttf')
extrafont::loadfonts(quiet = TRUE)
pdf('test.pdf', family = 'CMU Bright', width = 3.7, height = 6.5)
plot(1, xlab = 'Long ass name', ylab = 'Other stuff')
text(1.3, 1.3, labels = 'fooFOOfoo')
dev.off() |
8af48b723d6097b85728f8b850b9f158f2f6ad5d | 5b62e239de22b36aa5f827e0e0fe7c732cecd551 | /marked/man/mixed.model.admb.Rd | 0d91d1c58b014652ff89af021a8f1a2aeadcc6f7 | [] | no_license | jlaake/marked | 46b3b2327f5ac6625a47f1da53f8fc9a1c2e7eac | 5ef47016f36056dc51e4e1638630447bec333c69 | refs/heads/master | 2021-09-08T11:22:28.162273 | 2021-09-06T21:26:43 | 2021-09-06T21:26:43 | 2,009,632 | 7 | 10 | null | 2019-09-16T15:52:25 | 2011-07-06T23:59:24 | R | UTF-8 | R | false | true | 2,351 | rd | mixed.model.admb.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixed.model.r
\name{mixed.model.admb}
\alias{mixed.model.admb}
\alias{mixed.model}
\alias{mixed.model.dat}
\alias{reindex}
\title{Mixed effect model contstruction}
\usage{
mixed.model.admb(formula,data)
mixed.model(formula,data,indices=FALSE)
mixed.model.dat(x,con,idonly,n)
reindex(x,id)
}
\arguments{
\item{formula}{formula for mixed effect mode in the form used in lme4; ~fixed +(re1|g1) +...+(ren|gn)}
\item{data}{dataframe used to construct the design matrices from the formula}
\item{x}{list structure created by mixed.model.admb}
\item{con}{connection to data file which contents will be appended}
\item{id}{vector of factor values used to split the data up by individual capture history}
\item{idonly}{TRUE, if random effects not crossed}
\item{n}{number of capture history records}
\item{indices}{if TRUE, outputs structure with indices into dm for random effects}
}
\value{
mixed.model.admb returns a list with elements re.dm, a combined design matrix for all of the random effects; and
re.indices, matrix of indices into a single vector of random effects to be applied to the
design matrix location.
mixed.model returns a list (re.list) with an element for each random effect structure. The contents
are a standard design matrix (re.dm) if indices==FALSE and a re.dm and re.indices which matches the
structure of mixed.model.admb. mixed.model will be more useful with R than ADMB.
}
\description{
Functions that develop structures needed for a mixed effect model
}
\details{
mixed.model.admb - creates design matrices and supporting index matrices
for use of mixed model in ADMB
mixed.model - creates design matrices and supporting index matrices
in an alternate list format that is not as easily used in ADMB
mixed.model.dat - writes to data file (con) for fixed and random effect stuctures
reindex - creates indices for random effects that are specific to the individual capture
history; it takes re.indices, splits them by id and creates
a ragged array by id (used.indices) with the unique values for that id. index.counts is the number
of indices per id to read in ragged array. It then changes re.indices to be an index
to the indices within the id from 1 to the number of indices within the id.
}
\author{
Jeff Laake
}
|
c37163975f8a82f884e0c8a94a8a83b8e6524155 | a8e4f7ae7f9e695c50bba0998193a4dd42cfd508 | /Titanic case study/titanic_101.R | 861f6865f153aaa90b54f26db341cf065f16ba86 | [] | no_license | 106035007/Text-Mining | fe3c77a3c198c32ade8c4b8ff59f5b00912aec94 | abca47596e8de7181ab6edc965c3aabb7287d3cb | refs/heads/master | 2020-09-11T15:24:26.073308 | 2020-03-24T14:46:41 | 2020-03-24T14:46:41 | 222,110,050 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,443 | r | titanic_101.R | # http://www.networkx.nl/programming/titanic-machine-learning-from-disaster-part-1/
# https://www.kaggle.com/c/titanic
# https://trevorstephens.com/kaggle-titanic-tutorial/r-part-1-booting-up/
# https://trevorstephens.com/kaggle-titanic-tutorial/r-part-3-decision-trees/
# How to do the Titanic Kaggle competition in R - Part 1
# https://www.youtube.com/watch?v=Zx2TguRHrJE
# Part 1:
# Data Exploration and basic Model Building
rm(list=ls())
#setwd('../DataScience/Titanic case study/')
train <- read.csv("train.csv")
test <- read.csv("test.csv")
str(train)
str(test)
head(train,2)
head(test,2)
# The training set has 891 observations and 12 variables and the testing set has 418 observations and 11 variables.
# The traning set has 1 extra varible. Check which which one we are missing. I know we could see that in
#a very small dataset like this, but if its larger we want two compare them.
colnames_check <- colnames(train) %in% colnames(test)
colnames(train[colnames_check==FALSE])
table(train$Survived)
prop.table(table(train$Survived))
table(train$Sex, train$Survived)
prop.table(table(train$Sex, train$Survived),margin = 1)
#train$Child <- 0
#train$Child[train$Age < 18] <- 1
#aggregate(Survived ~ Child + Sex, data=train, FUN=sum)
#aggregate(Survived ~ Child + Sex, data=train, FUN=length)
#aggregate(Survived ~ Child + Sex, data=train, FUN=function(x) {sum(x)/length(x)})
# Model Building
# First prediction – All Female Survived
test_female <- test
test_female$Survived <- 0
test_female$Survived[test_female$Sex == "female"] <- 1
# Create a data frame with two columns: PassengerId & Survived and write the solution away to a csv file.
my_solution <- data.frame(PassengerId = test_female$PassengerId, Survived = test_female$Survived)
# write.csv(my_solution, file = "all_female_survive.csv", row.names = FALSE)
# Clean up the dataset
colSums(is.na(train))
colSums(is.na(test))
# To tackle the missing values I’m going to predict the missing values with the full data set.
# First we need to combine the test and training set together.
train2 <- train
test2 <- test
test2$Survived <- NA
full <- rbind(train2, test2)
#
# First we tackle the missing Fare, because this is only one value. Let see in wich row it’s missing.
full[!complete.cases(full$Fare),]
# As we can see the passenger on row 1044 has an NA Fare value. Let’s replace it with the median fare value.
full$Fare[1044] <- median(full$Fare, na.rm = TRUE)
# We make a prediction of a passengers Age using the other variables and a decision tree model.
# This time we give method = “anova” since you are predicting a continuous variable.
library(rpart)
predicted_age <- rpart(Age ~ Pclass + Sex + SibSp + Parch + Fare + Embarked,
data = full[!is.na(full$Age),], method = "anova")
full$Age[is.na(full$Age)] <- predict(predicted_age, full[is.na(full$Age),])
# split back to original data set
train2 <- full[1:891,]
test2 <- full[892:1309,]
# Build a Decision Tree with rpart
my_dt1 <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked,
data = train2,
method = "class")
plot(my_dt1)
text(my_dt1)
# Load in the packages to create a fancified visualized version of your tree.
library(rattle)
library(rpart.plot)
library(RColorBrewer)
fancyRpartPlot(my_dt1)
# The root node, at the top, shows 62% of passengers die, while 38% survive.
# The number above these proportions indicates the way that the node is voting
# (recall we decided at this top level that everyone would die, or be coded as zero) and
# the number below indicates the proportion of the population that resides in this node, or bucket
# (here at the top level it is everyone, 100%).
# If the passenger was male, only 19% survive, so the bucket votes that everyone here
# (65% of passengers) perish, while the female bucket votes in the opposite manner, most of them survive
# as we saw before.
# Move to the right side: Given sex = female, then if Pclass >= 2.5, then death ratio is 50%.
# If Pclass<2.5, then survival ratio is 95%.
Prediction <- predict(my_dt1, test, type = "class")
submit <- data.frame(PassengerId = test$PassengerId, Survived = Prediction)
# write.csv(submit, file = "myfirstdtree.csv", row.names = FALSE)
# Categorical casting
str(full)
full$Pclass <- as.factor(full$Pclass)
table(full$Embarked)
full[full$Embarked == '', ]
full[full$Embarked == '', 'Embarked'] <- 'S'
table(full$Embarked)
# split back to original data set
train2 <- full[1:891,]
test2 <- full[892:1309,]
train2$Survived <- as.factor(train2$Survived)
test2$Survived
test2$Survived[is.na(test2$Survived)] <- 0
# randomForest
library(rsample) # data splitting
library(randomForest) # basic implementation
library(ranger) # a faster implementation of randomForest
library(caret) # an aggregator package for performing many machine learning models
library(h2o) # an extremely fast java-based platform
survival.equation <- 'Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked'
survival.formula <- as.formula(survival.equation)
model1_rf <- randomForest(survival.formula, data = train2, mtree = 500, mtry = 3, nodesize = 0.01*nrow(train2))
#
feature.equation <- 'Pclass + Sex + Age + SibSp + Parch + Fare + Embarked'
#
Survived <- predict(model1_rf, newdata = test2)
PassengerID <- test2$PassengerId
output <- as.data.frame(PassengerID)
output$Survived <- Survived
|
591f36510fe164c638de7c1f3a1bb31413678736 | f7f61a04ae1f0b30c1c11083aa5424558da06df3 | /bk/S/clang/mlhfit/masaomi_mlh/mlh_fit/mlh_fit_kidera/result/16N_d_b.r | 11f4afbb0e4301a788eeb109efcfff2b88d594d4 | [] | no_license | gezhuang0717/ms2 | ac789867a621ad96cd789ccc74ae837348a97758 | 65fbeb43623dfc1b68d27a0cbffdd9c80065145b | refs/heads/master | 2020-12-03T08:04:57.084395 | 2017-06-28T10:02:02 | 2017-06-28T10:02:02 | 95,652,410 | 0 | 0 | null | null | null | null | SHIFT_JIS | R | false | false | 777 | r | 16N_d_b.r | output file : result/16N_d_b.r result/16N_d_b.s
Data file name : data/16N_d.dat
Number of points of data = 244
Number of parameters = 2
Number of free parameters = 2
Fitting region : 153 -> 231
Initial value of free parameters
AAI( 1) = 0.8638910000D+00
AAI( 2) = 0.3823500000D-01
** 二回微分係数も入れる **
Fitting region(ch) : 153 --> 231
Fitting region (arb.) : 152.000000000000 --> 230.000000000000
Free parameters
AA( 1) = 0.1311452943D+02 +- 0.2808649828D+01
AA( 2) = -0.2008402314D-01 +- 0.1450360968D-01
chisq = 75.0704245894942
reduced chisq = 0.974940579084341
|
6b687eb7e034e1288a407df1a34920ee03e0f27b | 029f359113e3b40674bc9220c00406b60c302e42 | /support/readme2_va2_replication.R | 17371f05374ed6638ee3dffaa4f9cdf2b0ccfdf3 | [] | no_license | leiqi/readme-software | f713d195e30c84ae363074a85b2bd7ee70d8546c | 49ec05b56b3bfe1f6024a1bfa4ce12cfd6f69a2a | refs/heads/master | 2020-03-28T17:58:27.573173 | 2018-05-19T02:32:57 | 2018-05-19T02:32:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,253 | r | readme2_va2_replication.R | va2 <-
function(termMatrix,seed, nsymps=2, ...) {
# This is the function which runs all of the VA2 clusters and merges them into a single "ensemble" estimate.
# Takes the term matrix which contains
ensemble_VA2<-function(termMatrix,seed, nsymptoms=2){
# Measure running time
startTimeOrg <- proc.time()
ensembleUnits<-c("va2Stacked","va2BiasCorrection","va2NoiseMinimization","va2CentroidPrediction","va2NmPrediction","ensemble")
timeList<-data.frame(algorithm=ensembleUnits,elapsedTime=NA)
#log <- file("readme2.log", "w")
#out <- file("results.csv", "w")
if (is.na(seed)) seed<-getSeed()
set.seed(seed)
#cat("The random seed used in this run is", seed, "\n", file=log)
#cat(paste(RNGkind()[1]), " is used to generate random numbers\n", file=log)
data<-getData(termMatrix)
#cat("Training Size(",paste(data$categoryLabels,collapse=","),")\n",file=log)
#cat(paste(data$size,collapse=","),"\n",file=log)
#cat("Term Matrix Test Size,",dim(data$test)[1],",",dim(data$test)[2],"\n",file=log)
#cat("Running va2stacked\n",file=log)
startTime <- proc.time()
coefBase<-coefva2Stacked<-try(va2Stacked(data, nsymptoms=nsymptoms), T)
timeList[1,2]<-round((proc.time()-startTime)[3])
coefList<-data.frame(array(dim=c(6,length(data$categoryLabels)+1)))
colnames(coefList)<-c("algorithm",data$categoryLabels)
coefList$algorithm<-ensembleUnits
coefList[1,names(coefva2Stacked)]<-coefva2Stacked
set.seed(seed)
#cat("Running va2BiasCorrection\n",file=log)
startTime <- proc.time()
coefva2BiasCorrection<-try(va2BiasCorrection(data,coefBase), T)
timeList[2,2]<-round((proc.time()-startTime)[3])
coefList[2,names(coefva2BiasCorrection)]<-coefva2BiasCorrection
set.seed(seed)
#cat("Running va2NoiseMinimization\n",file=log)
startTime <- proc.time()
coefva2NoiseMinimization<-try(va2NoiseMinimization(data,coefBase), T)
timeList[3,2]<-round((proc.time()-startTime)[3])
coefList[3,names(coefva2NoiseMinimization)]<-coefva2NoiseMinimization
set.seed(seed)
#cat("Running va2CentroidPrediction\n",file=log)
startTime <- proc.time()
coefva2CentroidPrediction<-try(va2CentroidPrediction(data), T); coefva2CentroidPrediction <- try(table(coefva2CentroidPrediction)/sum(table(coefva2CentroidPrediction)), T)
timeList[4,2]<-round((proc.time()-startTime)[3])
coefList[4,names(coefva2CentroidPrediction)]<-coefva2CentroidPrediction
set.seed(seed)
#cat("Running va2NmPrediction\n",file=log)
startTime <- proc.time()
coefva2NmPrediction <- centroidClassifier(training=data$train[,-1],trainingLabels=data$train[,1], test=data$test[,-1],distanceMetric="euclidean")
coefva2NmPrediction <- table(coefva2NmPrediction)/sum(table(coefva2NmPrediction))
coefList[5,names(coefva2NmPrediction)]<- coefva2NmPrediction
coefEnsembleRange<-try(apply(coefList[-6,-1],2,function(x) max(x,na.rm=T)-min(x,na.rm=T)), T)
coefEnsemble<-apply(coefList[-6,-1],2,mean,na.rm=T)
#logInfo(out,cbind(names(coefEnsemble),coefEnsemble,coefEnsembleRange))
coefList[6,-1]<-coefEnsemble
#logInfo(log,coefList,header=paste("Coefs(",paste(names(coefEnsemble),collapse=","),")",sep=""))
timeList[6,2]<-round((proc.time()-startTimeOrg)[3])
#cat("Running SVM\n",file=log)
startTime <- proc.time()
coefSVMprediction<-try(svmPrediction(data), T)
timeListSVM <-data.frame(algorithm=c("svm"),elapsedTime=NA)
timeListSVM[1,2]<-round((proc.time()-startTime)[3])
coefListSVM<-data.frame(array(dim=c(1,length(data$categoryLabels)+1)))
colnames(coefListSVM)<-c("algorithm",data$categoryLabels)
coefListSVM$algorithm <- c("svm")
coefListSVM[1,names(coefSVMprediction)] <- coefSVMprediction
coefList <- rbind(coefList, coefListSVM)
timeList <- rbind(timeList, timeListSVM)
#cat("Running Multinomial Logit\n", file=log)
startTime <- proc.time()
coefMNLprediction <- try(mnlPrediction(data), T)
timeListMNL <- data.frame(algorithm=c("maxentMNL"), elapsedTime=NA)
timeListMNL[1,2] <- round((proc.time()-startTime)[3])
coefListMNL <-data.frame(array(dim=c(1,length(data$categoryLabels)+1)))
colnames(coefListMNL)<-c("algorithm",data$categoryLabels)
coefListMNL$algorithm <- c("maxentMNL")
coefListMNL[1,names(coefMNLprediction)] <- coefMNLprediction
coefList <- rbind(coefList, coefListMNL)
timeList <- rbind(timeList, timeListMNL)
#logInfo(log,timeList,header="Runtimes")
#close(out)
#close(log)
list(functionName="va2",coefs=coefList,runTimes=timeList, seed=seed)
}
va2NoiseMinimization<-function(data,coef){ colMeans(getTopMinSSE(coef,getDist(data),topN=200,simN=5000,density=25)) }
va2BiasCorrection<-function(data,coef) { adjustCoef(applyBiasCorrectionFactor(getDist(data),coef)) }
va2Stacked<-function(data,Ntry=3, nsymptoms=nsymptoms){ rowMeans(replicate(Ntry,calculateRegression(getDist(data, nsymptoms=nsymptoms))),na.rm=TRUE) }
va2CentroidPrediction<-function(data) centroidClassifier(training = data$train[,-1], trainingLabels = as.matrix(data$train[,1]), test = data$test[,-1])
#va2CentroidPrediction<-function(data) predictUsingConfusionMatrixMultiIteration(data,centroidClassifier)
va2NmPrediction<-function(data) predictUsingConfusionMatrixMultiIteration(data, nearestMeanClassifier)
svmPrediction <- function(data) svmclassifier(data, "linear")
mnlPrediction <- function(data) mnlclassifier(data)
library2(quadprog, loadin = F)
normalizeColumnsToProbability <-function(aMatrix) sweep(data.matrix(aMatrix),2,margin.table(data.matrix(aMatrix),2),"/")
repmat<-function(x,n) matrix(rep(x,n),nrow=n,byrow=T)
adjustCoef<-function(coef){ coef[coef<0]<-0.001; prop.table(coef)}
logInfo<-function(log,logData,header=NULL){
if (!is.null(header)) cat(header,"\n",file=log)
apply(logData,1,function(x) cat(paste(x,collapse=","),"\n",file=log))
}
getSeed<-function() {
sample(.Random.seed[-c(1:2)],1)
}
quad.constrain<-function(Y, X, W=1)
{
p<-dim(X)[1]
q<-dim(X)[2]
ind <- matrix(0,q,2)
rownames(ind) <- colnames(X)
ind[,1] <- 1
const<-1
Y.new<-Y
lbd <- rep(0,q)
ubd <- rep(1,q)
X0 <- X[,ind[,1]==1]
if(is.null(dim(W))){
Dmat<-t(X0)%*%X0
dvec<-(Y.new)%*%X0
} else {
Dmat<-t(X0)%*%W%*%X0
dvec<-(Y.new)%*%W%*%X0
}
q0<-ncol(X0)
Amat<-matrix(0, q0,q0*2+1)
Amat[,1]<-rep(1,q0)
Amat[,2:(q0+1)]<-diag(1,q0)
Amat[,(q0+2):(2*q0+1)]<-diag(-1,q0)
bvec<-c(const, lbd[ind[,1]==1], -ubd[ind[,1]==1])
res<-quadprog::solve.QP(Dmat,dvec, Amat, bvec, meq=1)$solution
ind[(ind[,1]==1),2] <- res
ind[,2]
}
getData<-function(termMatrix){
tColSums<-colSums(termMatrix[termMatrix[,3]==1,4:ncol(termMatrix)])
colsToRemove<-c(3+which(tColSums==0 | tColSums==nrow(termMatrix[termMatrix[,3]==1,])))
if (length(colsToRemove)>0) termMatrix<-termMatrix[,-colsToRemove]
tColSums<-colSums(termMatrix[termMatrix[,3]==0,4:ncol(termMatrix)])
colsToRemove<-c(3+which(tColSums==0 | tColSums==nrow(termMatrix[termMatrix[,3]==0,])))
if (length(colsToRemove)>0) termMatrix<-termMatrix[,-colsToRemove]
rowsToRemove<-which(rowSums(termMatrix[,-c(1,2,3)])==0)
if (length(rowsToRemove)>0) termMatrix<-termMatrix[-rowsToRemove,]
trainingSet <- termMatrix[termMatrix$TRAININGSET == 1, ]
testSet <- termMatrix[termMatrix$TRAININGSET == 0, ]
trainingSet <- trainingSet[, -c(1,3)]
testSet <- testSet[, -c(1,3)]
size<-table(trainingSet[,1])
categoryLabels<-names(size)
#print(head(trainingSet)[,1:10])
list(train=trainingSet,test=testSet,testSize=nrow(testSet),size=size,categoryLabels=categoryLabels)
}
getProb<-function(testSet,trainingSet){
temp<-rbind(testSet, trainingSet)
prob.wt<-apply(temp[,-1],2,function(x) prod(prop.table(table(x))))
prob.wt[colSums(testSet[,-1]) == 0]<-0
prob.wt[colSums(trainingSet[,-1])== 0]<-0
prob.wt[colSums(testSet[,-1]) == nrow(testSet)]<-0
prob.wt[colSums(trainingSet[,-1])== nrow(trainingSet)]<-0
prop.table(prob.wt)
}
trainDist<-function(wmat) normalizeColumnsToProbability(table(2*wmat[,2]+wmat[,3], by=wmat[,1]))
testDist<-function(wmat) prop.table(table(2*wmat[,1]+wmat[,2]))
getIndexList<-function(testSet,trainingSet,indexSize,n=2) {
prob=getProb(testSet,trainingSet)
nLast<-ncol(trainingSet)
array(replicate(indexSize,sample(2:nLast, n, prob=prob)),c(n,indexSize))
}
getDist<-function(data, stackN=500, nsymptoms=7){
#stackN=max(stackN,round(log(ncol(data$train,2)))
trainingSet<-data$train # Create training set
testSet<-data$test # save test set
size<-data$size # Get the size of each category in the training set
k<-length(size) # Number of categories
indexList<-getIndexList(testSet,trainingSet,stackN, n=nsymptoms) # 500 iterations and n words to form WSPs
d<-rep(0,2^nsymptoms)
names(d)<- 0:(2^nsymptoms-1)
test<-matrix(0,(2^nsymptoms)*stackN,1)
test[,1]<-c(apply(indexList,2,function(x) { z<-testDist(testSet[,x]);d[names(z)]<-z;d}))
d<-matrix(0,2^nsymptoms,k)
rownames(d)<-0:(2^nsymptoms-1)
train<-apply(indexList,2,function(x) {z<-trainDist(trainingSet[,c(1,x)]);d[rownames(z),]<-z;list(d)})
train<-do.call("rbind",do.call("rbind",train))
colnames(train)<-data$categoryLabels
variance<-rowSums(train*(1-train)/matrix(rep(size,nrow(train)),ncol=k,byrow=T))
train<-train[variance>0,]
test<-test[variance>0]
variance<-variance[variance>0]
W<-diag(1/variance)
list(testDist=test,trainDist=train, W=W, size=data$size, testSize=data$testSize,categoryLabels=data$categoryLabels)
}
gram.schmidt<-function(mat, orthnorm=NULL)
{
if (det(mat)==0) stop("mat is not full rank")
omat<-mat
p<-dim(omat)[1]
for (i in 2:p){
temp <- rep(0,p)
for (j in 1:(i-1))
temp <- temp+t(omat[j,])%*%omat[i,]/(t(omat[j,])%*%omat[j,])*omat[j,]
omat[i,] <- omat[i,]-temp
}
if (!is.null(orthnorm)) {
oind<-orthnorm
for ( i in oind)
omat[i,] <- omat[i,]/(t(omat[i,]%*%omat[i,]))^0.5
}
return(omat)
}
calculateRegression<-function(dist) {
result<-adjustCoef(quad.constrain(dist$testDist,dist$trainDist,dist$W))
if (is.null(result)) result<-rep(NA,length(dist$categoryLabels))
result
}
rdirichlet_va2<-function (alpha) prop.table(rgamma(length(alpha),alpha))
getTopMinSSE<-function(coef,dist,topN=100,simN=10000,density=25){
adjustedSSE<-rep(0,simN)
pList<-cbind(c(1:simN),t(replicate(simN,rdirichlet_va2(density*coef))))
colnames(pList)<-c("id",dist$categoryLabels)
train<-dist$trainDist
test<-dist$testDist
N<-dist$testSize
size<-dist$size
var<-train*(1-train)*repmat((size-1)/size,nrow(train))
M<-nrow(var)
k<-ncol(var)
z<-apply(pList,1,function(p) {
varTotal<-repmat(p[-1]*(N*p[-1]+size)/(N*size),M)*var
err<-sum((test-train%*%p[-1])^2)-sum(varTotal)
adjustedSSE[p[1]]<<-err
return(NULL)
}
)
pList[order(adjustedSSE),-1][1:topN,]
}
applyBiasCorrectionFactor<-function(dist,coef){
train<-dist$trainDist
size<-dist$size
k<-length(size)
G<-diag(1, k)
G[1,]<-rep(1/k,k)
G<-as.matrix(gram.schmidt(G, orthnorm=2:k))
if (k==2) A<-t(as.matrix(G[2:k,])) else A<-G[2:k,]
I<-matrix(0,k,k)
diag(I)<-1
EE<-matrix(0,nrow=k,ncol=k)
diag(EE)<-colSums(train*(1-train)/matrix(rep(size,nrow(train)),nrow=nrow(train),byrow=T))
XX<-t(train)%*%train
factor<-solve(I-t(A)%*%solve(A%*% XX %*% t(A) + A%*% EE %*% t(A)) %*%A%*%(EE))
coef<-c(factor%*%coef)
names(coef)<-dist$categoryLabels
coef
}
predictUsingConfusionMatrixMultiIteration <- function(data, classifierFunction, numberOfCrossValidations=10, numberOfIterations=10) {
colMeans(t(replicate(numberOfIterations,predictUsingConfusionMatrixSingleIteration(data, classifierFunction, numberOfCrossValidations))),na.rm=TRUE)
}
predictUsingConfusionMatrixSingleIteration <- function(data, classifierFunction, numberOfCrossValidations=10) {
confusionMatrix<-getConfusionMatrix(data,classifierFunction,data$categoryLabels)
naivePredictions<-getNaivePredictions(data, classifierFunction,data$categoryLabels)
prediction<-tryCatch(correctNaivePredictionsUsingConfusionMatrix(confusionMatrix, c(naivePredictions)),error=function(e) cat(paste(e,collapse=","),"\n"))
if (is.null(prediction)) prediction<-rep(NA,length(data$categoryLabels))
prediction
}
svmclassifier <- function(data, kern){
# Fit SVM
svm_out <- e1071::svm(x=data$train[,2:length(data$train[1,])], y=as.factor(data$train[,1]), kernel=kern)
#print(svm_out)
# Predict on the test set
pred_out <- predict(svm_out, newdata=data$test[,2:length(data$test[1,])])
#print(pred_out)
prop_out <- table(pred_out)/sum(table(pred_out))
out_vec <- vector(length=length(data$categoryLabels))
names(out_vec) <- data$categoryLabels
out_vec[names(prop_out)] <- prop_out
return(out_vec)
}
mnlclassifier <- function(data){
## load mnl library
#library2(maxent, loadin = F)
# Fit MNL classifier
#max_out <- maxent::maxent(feature_matrix=data$train[,2:length(data$train[1,])],
#code_vector=as.factor(data$train[,1]))
#max_pred <- predict(max_out, feature_matrix=data$test[,2:length(data$test[1,])])
#Fit MNL classifier
max_pred <- predict( glmnet::cv.glmnet(x=as.matrix(data$train[,2:length(data$train[1,])]),
y=as.factor(data$train[,1]), nfolds = 5,
family = "multinomial"),
as.matrix(data$test[,2:length(data$test[1,])]), type = "response", s = "lambda.1se")[,,1]
# Predict on the test set
label_val <- colnames(max_pred)
proportions <- apply(max_pred, 2, function(x) sum(as.numeric(x)))/sum(as.numeric(max_pred))
proportions <- proportions/sum(proportions)
# Proportions
names(proportions) <- label_val
out_vec <- vector(length=length(data$categoryLabels))
names(out_vec) <- data$categoryLabels
out_vec[names(proportions)] <- proportions
return(out_vec)
}
correctNaivePredictionsUsingConfusionMatrix <- function(confusionMatrix, naivePredictions) {
quad.constrain(naivePredictions, confusionMatrix, W=1)
}
getConfusionMatrix<-function(data, classifierFunction, categoryLabels, numberOfCrossValidations=10){
crossValLabels <- getCrossValidationIndices(data$train$TRUTH, numberOfCrossValidations)
numberOfCategories<-length(categoryLabels)
confusionMatrix <- matrix(0, numberOfCategories, numberOfCategories, dimnames=list(categoryLabels,categoryLabels))
diag(confusionMatrix) <- 1
for (cv in 1:numberOfCrossValidations) {
singleCrossValidationResults<-getSingleCrossValidationResults(data,crossValLabels,cv,classifierFunction)
if (!is.null(singleCrossValidationResults)) confusionMatrix[rownames(singleCrossValidationResults),colnames(singleCrossValidationResults)] <- confusionMatrix[rownames(singleCrossValidationResults),colnames(singleCrossValidationResults)] + singleCrossValidationResults
}
normalizeColumnsToProbability(confusionMatrix)
}
getNaivePredictions<-function(data, classifierFunction,categoryLabels){
naivePredictionsRaw <- prop.table(table(classifierFunction(data.matrix(data$train[,-1]), data$train$TRUTH, test=as.matrix(data$test[,-1]))))
naivePredictions<-rep(0,length(categoryLabels))
names(naivePredictions)<- categoryLabels
naivePredictions[names(naivePredictionsRaw)]<-naivePredictionsRaw
naivePredictions
}
getSingleCrossValidationResults<-function(data, crossValLabels, cv,classifierFunction){
booleanTrainingLabels<-crossValLabels!=cv
trainingSetMat = data.matrix(data$train[,-1])
cvTestSet = trainingSetMat[!booleanTrainingLabels,]
if (nrow(cvTestSet) == 0) return(NULL)
cvTrainingSet = trainingSetMat[booleanTrainingLabels,]
cvTruthLabels = data$train$TRUTH[booleanTrainingLabels]
predictions <- classifierFunction(cvTrainingSet, cvTruthLabels, cvTestSet)
data.matrix(table(pred = predictions, true = data$train$TRUTH[!booleanTrainingLabels]))
}
getCentroids <- function(training, truthLabels) {
centroids<-rowsum(training,truthLabels)
centroids/matrix(rep(table(truthLabels)[rownames(centroids)],ncol(centroids)),nrow=nrow(centroids),byrow=FALSE)
}
euclideanDistance<-function(x,c){
x2 = colSums(t(x)^2)
c2 = colSums(t(c)^2)
return(x2 %*% matrix(1, 1, nrow(c)) + matrix(1, nrow(x), 1) %*% c2 - (2 * x %*% t(c)))
}
centroidClassifier<-function(training,trainingLabels, test,distanceMetric="cosine"){
centroids<-as.matrix( getCentroids(training, trainingLabels) )
distances<-matrix(0,nrow(test),nrow(centroids))
if (distanceMetric=="cosine") {
centroids <- centroids / sqrt(rowSums(centroids*centroids))
test<-test/sqrt(rowSums(test*test))
#distances<- -1* test %*% t(centroids)
distances<- -1* t( centroids %*% t(test) )
distances[is.nan(distances)]<-100000
predictions<-apply(distances,1,function(x) sample(rep(which(x==min(x)),2),1))
rownames(centroids)[predictions]
} else{
distances[is.nan(distances)]<-100000
predictions <- apply(test, 1, function(da){ myD <- rowSums( (matrix(rep(f2n(da), times = nrow(centroids)),
ncol = ncol(centroids), byrow =T) - centroids )^2 );
which_min <- sample(which(myD == min(myD)), 1) } )
rownames(centroids)[predictions]
}
}
nearestMeanClassifier<-function(training,truthLabels, test) centroidClassifier(training,truthLabels, test,distanceMetric="euclidean")
getCrossValidationIndices <- function(truthLabels, numberOfCrossValidations=10) {
crossValidationIndicesForAllTrainingRows = matrix(0, length(truthLabels), 1)
for (categoryLabel in unique(truthLabels)) {
indicesOfACategory = which(truthLabels == categoryLabel)
categorySize<-length(indicesOfACategory)
uniformCrossValidationIndices=c(replicate(ceiling(categorySize/numberOfCrossValidations),sample(1:numberOfCrossValidations, numberOfCrossValidations)))[1:categorySize]
crossValidationIndicesForAllTrainingRows[sample(indicesOfACategory,categorySize)] = uniformCrossValidationIndices
}
crossValidationIndicesForAllTrainingRows
}
testResult<-try(ensemble_VA2(termMatrix,seed,nsymptoms=nsymps), T)
if (is.null(testResult)) {
y<-termMatrix$TRUTH
trainingIndices<-which(termMatrix$TRAININGSET==1)
categoryLabels<-sort(unique(y[trainingIndices]))
coefList<-data.frame(array(dim=c(1,length(categoryLabels)+1)))
testIndices<- which(termMatrix$TRAININGSET==0)
colnames(coefList)<-c("algorithm",categoryLabels)
coefList[1,2]<--1
coefList[1,3]<--1
coefList[1,1]<-"va2-error"
timeList<-data.frame(algorithm=c("va2-error"),elapsedTime=-1)
return(list(functionName="va2-error", coefs=coefList,runTimes=timeList, seed=NA))
} else return(testResult$coefs[6,-1])
}
|
c74967f0f5d9059d661d5b5d5c049a8eaa1fbd0a | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/ccaPP/man/ccaPP-package.Rd | ba3e53c95ba12805c8b4edc3c8c0378268b39de2 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 711 | rd | ccaPP-package.Rd | \name{ccaPP-package}
\alias{ccaPP-package}
\alias{ccaPP}
\docType{package}
\title{
\packageTitle{ccaPP}
}
\description{
\packageDescription{ccaPP}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{ccaPP}
\packageIndices{ccaPP}
}
\author{
\packageAuthor{ccaPP}
Maintainer: \packageMaintainer{ccaPP}
}
\references{
A. Alfons, C. Croux and P. Filzmoser (2016) Robust maximum association between
data sets: The \R Package \pkg{ccaPP}. \emph{Austrian Journal of Statistics},
\bold{45}(1), 71--79.
A. Alfons, C. Croux and P. Filzmoser (2016) Robust maximum association
estimators. \emph{Journal of the American Statistical Association}. DOI
10.1080/01621459.2016.1148609. In press.
}
\keyword{package}
|
cc529d1704f24f3ea90febec7875f26711f3e1a7 | 7c39da976f28af016e5b1f847e68473c659ea05d | /tests/testthat/test_segmentation.R | 01bfa6bf8185f8d446af7867eaec2f83d520817a | [] | no_license | cancer-genomics/trellis | b389d5e03959f8c6a4ee7f187f7749048e586e03 | 5d90b1c903c09386e239c01c10c0613bbd89bc5f | refs/heads/master | 2023-02-24T05:59:44.877181 | 2023-01-09T20:38:36 | 2023-01-09T20:38:36 | 59,804,763 | 3 | 1 | null | 2023-01-11T05:22:52 | 2016-05-27T04:45:14 | R | UTF-8 | R | false | false | 833 | r | test_segmentation.R | context("Segmentation")
test_that("duplicate segments", {
library(trellis)
library(svbams)
library(svfilters.hg19)
library(Rsamtools)
path <- system.file("extdata", package="svbams", mustWork=TRUE)
data(bins1kb)
data(germline_filters, package="svfilters.hg19")
bins <- keepSeqlevels(bins1kb, "chr3", pruning.mode="coarse")
bins <- bins[seq(1, length(bins), by=5)]
bam.file <- file.path(path, "cgov10t.bam")
bview <- BamViews(bamRanges=bins, bamPaths=bam.file)
bins$cnt <- binnedCounts(bview)
bins$std_cnt <- binNormalize(bins)
set.seed(123)
gc.adj <- binGCCorrect(bins)
gc.adj <- gc.adj - 0.6 ## why?
bins$log_ratio <- gc.adj
seg.params <- SegmentParam()
bins$adjusted <- bins$log_ratio
##trace(segmentBins, browser)
g <- segmentBins(bins, seg.params)
expect_true(!any(duplicated(g)))
})
|
1c744da205bb7c7dc69d1666a7600a0dd4aa6bce | 7e0191ea7e31539200b1c6bebd35972f2f8bd7c9 | /run_analysis.R | c47e38e0b22484c25807dfcc976782144f2dc443 | [] | no_license | OzturkArda/Coursera_CleaningData_ProjectAssignment | a634b52194561663abf23abca27a536609e3c5c6 | 991ee0e50832ad8c841d4027acbc450c5de1e3c0 | refs/heads/master | 2021-01-10T15:59:33.904526 | 2016-01-31T20:16:25 | 2016-01-31T20:16:25 | 50,789,756 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,705 | r | run_analysis.R | # Author : Arda OZTURK
# Subject : Getting and Cleaning Data Course Project
# Date : 28.01.2016
#
##############################################################################
#
#This script is created to fulfill the tasks listed below:
#
#Task 1:
#Merge the training and the test sets to create one data set.
#
#Task 2:
#Extract only the measurements on the mean and standard deviation for each
#measurement
#
#Task 3:
#Use descriptive activity names to name the activities in the data set
#
#Task 4:
#Appropriately labels the data set with descriptive variable names
#
#Task 5:
#Creates a second, independent tidy data set with the average of each variable
#for each activity and each subject (From the data set in step 4)
#
###############################################################################
#Fecth required libraries
library(plyr)
# Task 1
#==============================================================================
#Files are downloaded and extracted (from zip file) manually
#Create datas set for training files
xTrain <- read.table("train/X_train.txt")
yTrain <- read.table("train/y_train.txt")
subjectTrain <- read.table("train/subject_train.txt")
#Create data sets for training files
xTest <- read.table("test/X_test.txt")
yTest <- read.table("test/y_test.txt")
subjectTest <- read.table("test/subject_test.txt")
#bind x data
xData <- rbind(xTrain, xTest)
#bind y data
yData <- rbind(yTrain, yTest)
#bind subject data
subjectData <- rbind(subjectTrain, subjectTest)
# Task 2
#==============================================================================
#read features for mapping
features <- read.table("features.txt")
# get only columns with mean() or std() in their names
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
#read only related columns
xData <- xData[, mean_and_std_features]
# correct column name
names(xData) <- features[mean_and_std_features, 2]
# Task 3
#==============================================================================
#read activities for mapping
activities <- read.table("activity_labels.txt")
# update values with correct activity names
yData[, 1] <- activities[yData[, 1], 2]
# correct column name
names(yData) <- "activity"
# Task 4
#==============================================================================
# correct column name
names(subjectData) <- "subject"
#bind all data
allData <- cbind(xData, yData, subjectData)
# Task 5
#==============================================================================
averagesData <- ddply(allData, .(subject, activity), function(x) colMeans(x[, 1:66]))
#generate output
write.table(averagesData, "averages_data.txt", row.name=FALSE)
|
5f9ded0d42e589d8c1705695094d3b47d8ab10ca | 8213941f015535abc4eda6aabaad02a6f31736de | /Github_Run_Code.R | bc02f0d6731697ebe0fbe2a0bc3f7ef8d6402aba | [] | no_license | mayank221/TABA-Assignment | 515c40c820f322011fedb607c87b5452e47fc6b1 | 30822abd1a306e2dc4480a8a81eaaa0cf35a26c9 | refs/heads/master | 2020-06-25T11:02:57.629882 | 2019-07-28T19:51:16 | 2019-07-28T19:51:16 | 199,291,247 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 153 | r | Github_Run_Code.R | library(shiny)
source("https://raw.githubusercontent.com/mayank221/TABA-Assignment/master/Dependency.R")
runGitHub("TABA-Assignment","mayank221")
|
8b2a0498208684ea6a91841b967644e7eb8e6839 | 91be731811b205de722f4a7b960133638be48713 | /R/markerQualityControl.R | 8e8eb95a2319ee02e48568e935e1e28decd6cdcf | [] | no_license | TanerArslan/SubCellBarCode | caa47f2b50120eea93abe3a1542a7f866389e41d | fb5b25d6c56cda4cc21cd13bfe0580ca6c800d2c | refs/heads/master | 2021-06-08T22:32:18.314935 | 2021-06-05T10:47:08 | 2021-06-05T10:47:08 | 172,898,704 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,930 | r | markerQualityControl.R | #' @title Evaluate the quality of the marker proteins
#' @description Given the proteomics data, quality of the overlapped
#' marker proteins are evaluated by correlating replicates of fractions.
#' @param coveredProteins character; list of marker proteins, gene
#' symbols, that are covered in 3365 marker proteins.
#' @param protein.data data.frame; fractionated proteomics data,
#' rownames are gene symbols associated protein.
#'@export
#'@examples {
#'
#'df <- loadData(SubCellBarCode::hcc827Ctrl)
#'
#'c.prots <- calculateCoveredProtein(rownames(df), markerProteins[,1])
#'
#'r.markers <- markerQualityControl(c.prots[1:5], df)
#'}
#' @import ggplot2
#' @import gridExtra
#' @importFrom stats cor
#' @return robustMarkers
markerQualityControl <- function(coveredProteins, protein.data){
# replicate-wise correlation marker QC
m.prot.df <- protein.data[coveredProteins, ]
m.prot.df <- m.prot.df[complete.cases(m.prot.df),]
if( nrow(m.prot.df) < 1)
stop('Make sure your inputs are correct. Type ?markerQualityControl')
fracs.df.A <- m.prot.df[, grepl("\\.A\\.", colnames(m.prot.df))]
fracs.df.B <- m.prot.df[, grepl("\\.B\\.", colnames(m.prot.df))]
cor.reps.pearson <- vapply(rownames(fracs.df.A),
function (x) {cor(unlist(fracs.df.A[x, ]), unlist(fracs.df.B[x, ]),
method="pearson")},
as.numeric(""))
replicate.df <- data.frame(Protein = names(cor.reps.pearson),
Correlation = cor.reps.pearson)
p1 <- ggplot(replicate.df, aes(x = Correlation)) +
geom_density(alpha = .7, fill = "deepskyblue") +
theme_minimal() +
theme(text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, size = 14),
axis.text.x = element_text(face = "bold", color="black"),
axis.text.y = element_text(face = "bold", color="black")) +
geom_vline(xintercept = 0.8, linetype="dashed", color = "red") +
labs(title = "Replicate-Wise Correlation Marker QC",
tag = "A",
y = "Density",
x = "Pearson Corr.")
#remove replicate-wise markerp proteins
rep.prots <- names(cor.reps.pearson[cor.reps.pearson < 0.8 ])
message("Number of removed replicate-wise proteins: ", length(rep.prots))
# sample-wise correlation marker QC
prot.names <- setdiff(rownames(m.prot.df), rep.prots)
markerProteins <- SubCellBarCode::markerProteins[prot.names,][,3:7]
m.prot.df <- m.prot.df[prot.names,]
# function for to calculate sample-wise correlation for each protein
prot.cor <- function(df, marker.df, cor.method = c("spearman", "pearson")){
unlist(lapply(rownames(m.prot.df), function(x){
p.cor <- cor(t(df[x,]), t(marker.df[x,]), method = cor.method)
names(p.cor) <- x
p.cor
}))}
pearson.corA <- prot.cor(df = m.prot.df[grepl("\\.A\\.",
colnames(m.prot.df))],
marker.df = markerProteins,
cor.method = "pearson")
pearson.corB <- prot.cor(df = m.prot.df[grepl("\\.B\\.",
colnames(m.prot.df))],
marker.df = markerProteins,
cor.method = "pearson")
pearson.cor <- data.frame(RepA = pearson.corA, RepB = pearson.corB)
pearson.cor$MinP.Cor <- apply(pearson.cor, 1, min)
spear.corA <- prot.cor(df = m.prot.df[grepl("\\.A\\.",
colnames(m.prot.df))],
marker.df = markerProteins,
cor.method = "spearman")
spear.corB <- prot.cor(df = m.prot.df[grepl("\\.B\\.",
colnames(m.prot.df))],
marker.df = markerProteins,
cor.method = "spearman")
spear.cor <- data.frame(RepA = spear.corA, RepB = spear.corA)
spear.cor$MinS.cor <- apply(spear.cor, 1, min)
df <- data.frame(Protein = rownames(spear.cor),
Pearson = pearson.cor$MinP.Cor,
Spearman = spear.cor$MinS.cor)
cols <- SubCellBarCode::markerProteins[prot.names,][8]
Color <- cols$Colour
p2 <- ggplot(df, aes(x = Pearson, y = Spearman)) +
geom_point(colour = Color, size = 2) +
geom_hline(yintercept = 0.6, linetype="dashed", color = "red") +
geom_vline(xintercept = 0.8, linetype="dashed", color = "red") +
labs(title = "Sample-Wise Correlation Marker QC",
tag = "B",
y = "Spearman Corr.",
x = "Pearson Corr.") +
theme_minimal() +
theme(text = element_text(size = 14),
plot.title = element_text(hjust = 0.5, color = "black", size = 14),
axis.text.x = element_text(face = "bold", color="black"),
axis.text.y = element_text(face = "bold", color="black"))
sample.removed.prot <- df[df$Pearson < 0.8 | df$Spearman < 0.599,]
sample.removed.prot <- as.character(sample.removed.prot$Protein)
message("Number of removed sample-wise proteins: ",
length(sample.removed.prot))
robustMarkerProteins <- setdiff(prot.names, sample.removed.prot)
message("Number of total removed marker proteins: ",
length(sample.removed.prot) + length(rep.prots))
grid.arrange(p1, p2, ncol=2)
# check if there is a depletion after marker qc
compartment.size <- c(358, 351, 252, 174, 192, 121, 231, 198, 242,
132, 220, 215, 341, 69, 269)
compartments <- c("S1", "S2", "S3", "S4", "N1", "N2", "N3", "N4",
"C1", "C2", "C3", "C4", "C5", "M1", "M2")
r.marker.df <- SubCellBarCode::markerProteins[robustMarkerProteins, ]
coverageCompWise <- lapply(seq_len(length(compartments)), function(x){
temp.df <- r.marker.df[r.marker.df$Compartments == compartments[x], ]
values <- list(Compartments = compartments[x],
ProteinCoverage = 100 * ((dim(temp.df)[1]) /compartment.size[x]))
})
r.cov.df <- as.data.frame(do.call("rbind", coverageCompWise))
non.enriched.loc <- r.cov.df[r.cov.df$ProteinCoverage < 20, ]
if(nrow(non.enriched.loc) == 1){
warning("There is not enough enrichment at: ",
as.character(non.enriched.loc$Compartments),
"\nWe recommend you to perform the fractionation, again.")
}else if(nrow(non.enriched.loc) > 1){
comp <- paste(as.character(non.enriched.loc$Compartments),
collapse = ",")
warning("There are not enough enrichments at: ",
comp, "\nWe recommend you to perform the fractionation.")
}
return(robustMarkerProteins)
}
|
f1ede1f0fdaa81ca6d617879704232199edf27c5 | ae9970a483fa3023732ab726f12354f4d2aa9911 | /GIS pH analysis (all crops) v4.R | d3c8ae007628c224b3f24610205383807ed4a73a | [] | no_license | aj-sykes92/pH-optimisation-arable | 1460fb00c89de3c6e0c96724edfb28a4ea0788e0 | 09ba5d0355b1b20aa0dae855efeceabe433dad2b | refs/heads/master | 2023-01-21T12:40:16.379472 | 2020-12-05T15:51:30 | 2020-12-05T15:51:30 | 211,346,121 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 30,497 | r | GIS pH analysis (all crops) v4.R | # packages
library(raster)
library(tidyverse)
library(sp)
library(soiltexture)
data_repo <- "DEFRA Clean Growth Project/pH Optimisation/Extension for publication"
UK <- find_onedrive(dir = data_repo, path = "GIS data/DA shapefile/GBR_adm_shp/GBR_adm1.shp") %>% shapefile()
# read in soil raster data and stack
Soil_stack <- stack(#find_onedrive(dir = data_repo, path = "GIS data/SoilGrids 5km/Soil pH/Fixed/PHIHOX_M_sl4_5km_ll.tif"), # pH
find_onedrive(dir = data_repo, path = "GIS data/SoilGrids 5km/Sand content/Fixed/SNDPPT_M_sl4_5km_ll.tif"), # sand %
find_onedrive(dir = data_repo, path = "GIS data/SoilGrids 5km/Silt content/Fixed/SLTPPT_M_sl4_5km_ll.tif"), # silt %
find_onedrive(dir = data_repo, path = "GIS data/SoilGrids 5km/Clay content/Fixed/CLYPPT_M_sl4_5km_ll.tif"), # clay %
#find_onedrive(dir = data_repo, path = "GIS data/SoilGrids 5km/OC tonnes per ha/Fixed/OCSTHA_M_30cm_5km_ll.tif"), # OC tonnes per ha
find_onedrive(dir = data_repo, path = "GIS data/SoilGrids 5km/Bulk density/Fixed/BLDFIE_M_sl4_5km_ll.tif"))
# read in crop area raster data and stack
readdir <- find_onedrive(dir = data_repo, path = "GIS data/MapSPAM data/Physical area")
file.names <- dir(readdir, pattern =".tif")
Crop_area_stack <- raster::stack()
for(i in 1:length(file.names)){
readpath <- paste(readdir, file.names[i], sep="/") # aggregate strings to create filepath
x <- raster(readpath) # read raster
Crop_area_stack <- addLayer(Crop_area_stack, x)
rm(x)
print(file.names[i])
}
# read in crop yield raster data and stack
readdir <- find_onedrive(dir = data_repo, path = "GIS data/MapSPAM data/Yield")
file.names <- dir(readdir, pattern =".tif")
Crop_yield_stack <- raster::stack()
for(i in 1:length(file.names)){
readpath <- paste(readdir, file.names[i], sep="/") # aggregate strings to create filepath
x <- raster(readpath) # read raster
Crop_yield_stack <- addLayer(Crop_yield_stack, x)
rm(x)
print(file.names[i])
}
rm(readdir, file.names, readpath, i)
# add area layer
Area <- Soil_stack[[1]] %>% area()
Soil_stack <- addLayer(Soil_stack, Area)
rm(Area)
# consolidate
Master_stack <- stack(Soil_stack, Crop_area_stack, Crop_yield_stack)
rm(Soil_stack, Crop_area_stack, Crop_yield_stack)
# mask out to UK only
Master_stack <- Master_stack %>% crop(UK) %>% mask(UK)
# read data created using CLC raster as base —
# pasture area and yield data (created in Pasture preprocessing scripts)
# land use specific SOC stocks and pH (created in [Soil preprocessing.R])
# fraction of land use type under mineral soils (i.e. not under peat), created in [Soil preprocessing.R]
CLC_stack <- stack(find_onedrive(dir = "GIS data repository", path = "Created rasters/UK-crop-SOC-tonnes-ha-10km-CLC-SG250-WGS84.tif"),
find_onedrive(dir = "GIS data repository", path = "Created rasters/UK-crop-pH-10km-CLC-SG250-WGS84.tif"),
find_onedrive(dir = "GIS data repository", path = "Created rasters/UK-pasture-SOC-tonnes-ha-10km-CLC-SG250-WGS84.tif"),
find_onedrive(dir = "GIS data repository", path = "Created rasters/UK-pasture-pH-10km-CLC-SG250-WGS84.tif"),
find_onedrive(dir = "GIS data repository", path = "Created rasters/UK-crop-fraction-not-under-peat-10km-CLC-based-WGS84.tif"),
find_onedrive(dir = "GIS data repository", path = "Created rasters/UK-pasture-fraction-not-under-peat-10km-CLC-based-WGS84.tif"),
find_onedrive(dir = "GIS data repository", path = "Created rasters/UK-pasture-area-10km-CLC-based-WGS84-lowland-workable.tif"),
find_onedrive(dir = "GIS data repository", path = "Created rasters/UK-pasture-yield-RB209-10km.tif"))
# resample
CLC_stack <- CLC_stack %>% resample(Master_stack)
# consolidate
Master_stack <- stack(CLC_stack, Master_stack)
rm(CLC_stack)
# convert to dataframe
Dat_main <- Master_stack %>% as.data.frame(xy = T) %>% drop_na(SNDPPT_M_sl4_5km_ll) # nothing special about sand % except it has full land area coverage — NA = sea or water body
# we have zeros, >0s and NAs in the area data, NAs and >0s only in the yield data
Dat_main <- Dat_main %>%
tbl_df %>%
rename(x = x,
y = y,
OC_crop = UK.crop.SOC.tonnes.ha.10km.CLC.SG250.WGS84,
OC_pasture = UK.pasture.SOC.tonnes.ha.10km.CLC.SG250.WGS84,
pH_crop = UK.crop.pH.10km.CLC.SG250.WGS84,
pH_pasture = UK.pasture.pH.10km.CLC.SG250.WGS84,
Min_frac_crop = UK.crop.fraction.not.under.peat.10km.CLC.based.WGS84,
Min_frac_pasture = UK.pasture.fraction.not.under.peat.10km.CLC.based.WGS84,
phys_area_pasture = UK.pasture.area.10km.CLC.based.WGS84.lowland.workable,
yield_pasture = UK.pasture.yield.RB209.10km,
Sand = SNDPPT_M_sl4_5km_ll,
Silt = SLTPPT_M_sl4_5km_ll,
Clay = CLYPPT_M_sl4_5km_ll,
BD = BLDFIE_M_sl4_5km_ll,
Cell_area_km2 = layer) %>%
mutate(pH_crop = pH_crop / 10,
pH_pasture = pH_pasture / 10) # for some reason it's * 10 in original raster
glimpse(Dat_main)
# select out crops with zero area
isnt_zero <- function(x){
y <- x == 0 | is.na(x)
z <- F %in% y == T
return(z)
}
Dat_main <- Dat_main %>%
select_if(isnt_zero)
# convert pasture area from km2 to ha
Dat_main <- Dat_main %>%
mutate(phys_area_pasture = phys_area_pasture * 10^6 / 10^4)
# reorder to something sensible
Dat_main <- Dat_main %>%
select(x:Min_frac_pasture, Sand:Cell_area_km2, # physical data
phys_area_pasture, phys_area_barley:phys_area_wheat, # crop area data
yield_pasture, yield_barley:yield_wheat) # crop yield data
# gather up into crop types and metrics (area/yield)
# function to help rename crop variables
first_upper <- function(string){
start <- str_sub(string, 1L, 1L) %>% str_to_upper()
rest <- str_sub(string, 2L, -1L) %>% str_to_lower()
whole <- paste0(start, rest)
return(whole)
}
Dat_main <- Dat_main %>%
gather(14:ncol(Dat_main), key = "key", value = "value") %>%
mutate(Crop = key %>% str_replace_all("(phys_area_)|(yield_)", "") %>%
first_upper() %>%
str_replace_all("_", " ") %>%
str_replace_all(" other", ", other"),
Metric = key %>%
str_extract("phys_area|yield")) %>%
dplyr::select(-key) %>%
spread(key = Metric, value = value) %>%
rename(Area_ha = phys_area, Yield_tha = yield) %>%
mutate(Area_ha = ifelse(Area_ha == 0, NA, Area_ha)) %>%
drop_na(Area_ha)
# consolidate land-use-specific estimates now data is gathered
Dat_main <- Dat_main %>%
mutate(OC = ifelse(Crop == "Pasture", OC_pasture, OC_crop),
pH = ifelse(Crop == "Pasture", pH_pasture, pH_crop),
Min_frac = ifelse(Crop == "Pasture", Min_frac_pasture, Min_frac_crop))
# tidy up missing data (primarily some few areas where CLC didn't have crop data, but MapSPAM does, c. 600 cells)
# only dropping a few rows at the very end
Dat_main <- Dat_main %>%
mutate(OC = ifelse(is.na(OC), OC_pasture, OC), # first touch — replace NAs due to missing crop OC with equivalent pasture values
OC = ifelse(is.na(OC), OC_crop, OC), # some very few where no pasture data is present, but crop data is
pH = ifelse(is.na(pH), pH_pasture, pH), # exactly the same for pH
pH = ifelse(is.na(pH), pH_crop, pH),
Min_frac = ifelse(is.na(Min_frac), Min_frac_pasture, Min_frac), # and for mineral fraction
Min_frac = ifelse(is.na(Min_frac), Min_frac_crop, Min_frac),
Min_frac = ifelse(is.na(Min_frac), 1, Min_frac)) %>% # we have a default of 1 here
drop_na(OC, pH) %>%
select(x, y, Sand:BD, OC:Min_frac, Cell_area_km2:Yield_tha) # drop land-use-specific variables and reorder
# process rasters so we can make plots at DA level
template <- Master_stack[[9]] # sand % makes a good template
England <- template %>% mask(subset(UK, UK@data[["NAME_1"]]=="England"))
Northern_Ireland <- template %>% mask(subset(UK, UK@data[["NAME_1"]]=="Northern Ireland"))
Scotland <- template %>% mask(subset(UK, UK@data[["NAME_1"]]=="Scotland"))
Wales <- template %>% mask(subset(UK, UK@data[["NAME_1"]]=="Wales"))
Eng_df <- England %>% as.data.frame(xy = T) %>% drop_na(SNDPPT_M_sl4_5km_ll)
NI_df <- Northern_Ireland %>% as.data.frame(xy = T) %>% drop_na(SNDPPT_M_sl4_5km_ll)
Scot_df <- Scotland %>% as.data.frame(xy = T) %>% drop_na(SNDPPT_M_sl4_5km_ll)
Wales_df <- Wales %>% as.data.frame(xy = T) %>% drop_na(SNDPPT_M_sl4_5km_ll)
DA_dat <- bind_rows(list(England = Eng_df, `Northern Ireland` = NI_df, Scotland = Scot_df, Wales = Wales_df), .id = "DA") %>%
dplyr::select(-SNDPPT_M_sl4_5km_ll)
Dat_main <- left_join(Dat_main, DA_dat, by = c("x", "y"))
Dat_main <- Dat_main %>%
select(x, y, DA, Sand:Yield_tha)
# cumulative probability distribution for pH under different crops
Dat_cdf <- Dat_main %>%
mutate(pH = pH %>% round(1)) %>%
group_by(pH, Crop) %>%
summarise(n = n(),
Area_ha = sum(Area_ha)) %>%
arrange(Crop, pH) %>%
group_by(Crop) %>%
mutate(Area_cum = cumsum(Area_ha),
Freq = Area_cum / max(Area_cum))
ggplot(Dat_cdf, aes(x = pH, y = Freq, colour = Crop)) +
geom_line() +
theme_classic()
Dat_cdf_av <- Dat_cdf %>%
#filter(Crop != "Pasture") %>%
group_by(pH) %>%
summarise(Area_ha = sum(Area_ha)) %>%
arrange(pH) %>%
mutate(Area_cum = cumsum(Area_ha),
Freq = Area_cum / max(Area_cum))
ggplot(Dat_cdf %>% filter(Crop != "Rest of crops")) + # we lose 913 ha of average land, and mean we have a neat 12 crops for our facets, not unlucky 13..!
geom_line(aes(x = pH, y = Freq), colour = "darkred") +
geom_line(data = Dat_cdf_av, aes(x = pH, y = Freq), colour = "grey", lty = 2) +
facet_wrap(~Crop, nrow = 4) +
labs(x = "pH", y = "Frequency") +
theme_classic()
ggsave(find_onedrive(dir = data_repo, path = "Output plots/pH CDFs, all crops.png"), width = 8, height = 6)
# pH relationship with yield
Dat_main %>%
group_by(Crop) %>%
mutate(Rel_yield = Yield_tha / sum(Yield_tha)) %>%
ggplot(aes(x = pH, y = Rel_yield)) +
geom_point(colour = "darkred", alpha = 0.05) +
geom_smooth(method = "lm", colour = "grey", lty = 2) +
facet_wrap(~Crop, nrow = 4) +
theme_classic()
# compare to pH distributions for UK arable/grassland from PAAG (2016)
pH_PAAG <- tibble(pH = c(4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 8.5),
freq_arable = c(0, 0.01, 0.04, 0.12, 0.22, 0.24, 0.16, 0.14, 0.08),
freq_grass = c(0, 0.02, 0.17, 0.33, 0.27, 0.12, 0.05, 0.03, 0.01)) %>%
mutate(cumfreq_arable = cumsum(freq_arable),
cumfreq_grass = cumsum(freq_grass))
pH_dist_arable <- loess(cumfreq_arable ~ pH, data = pH_PAAG, span = 0.5)
Dat_cdf_av <- Dat_cdf_av %>%
mutate(Freq_pred = predict(pH_dist_arable, pH))
Dat_cdf_av %>%
mutate(error = Freq_pred - Freq,
error2 = error ^ 2) %>%
summarise(error2 = mean(error2)) %>%
mutate(rmse = sqrt(error2))
# use soiltexture package to figure out soil type (details here https://cran.r-project.org/web/packages/soiltexture/vignettes/soiltexture_vignette.pdf)
Dat_soil <- Dat_main %>%
dplyr::select(SAND = Sand, SILT = Silt, CLAY = Clay, OC) %>%
mutate(OC = OC / 10,
tot = SAND + SILT + CLAY) %>%
mutate_at(vars(SAND:CLAY), funs(. / tot * 100)) %>% # it's all basically at 100% but soiltexture seems to require it be exact
dplyr::select(-tot) %>%
as.data.frame()
Dat_main <- Dat_main %>%
mutate(Soil_type = TT.points.in.classes(tri.data = Dat_soil,
class.sys = "UK.SSEW.TT",
PiC.type = "t"))
# operation to add liming factor to main data
DEFRA_LF <- tibble(Soil_type = TT.points.in.classes(tri.data = Dat_soil, class.sys = "UK.SSEW.TT") %>% colnames(),
Liming_factor_arable = c(8, 8, 8, 8, 7.5, 7.5, 7, 7, 7, 6, 6),
Liming_factor_grass = c(6, 6, 6, 6, 5.5, 5.5, 5, 5, 5, 4, 4))
# write_csv(DEFRA_LF, "Defra liming factors.csv")
Liming_factor <- function(Soil_type, Land_use){
Soil_type_search <- str_split(Soil_type, "\\W+") %>% unlist()
LF_match <- DEFRA_LF[DEFRA_LF$Soil_type %in% Soil_type_search, ]
LF_arable <- LF_match$Liming_factor_arable %>% mean()
LF_grass <- LF_match$Liming_factor_grass %>% mean()
LF <- ifelse(Land_use == "Pasture", LF_grass, LF_arable)
return(LF)
}
Dat_main <- Dat_main %>%
mutate(LF = map2_dbl(Soil_type, Crop, Liming_factor))
# match up yield response models to data
Dat_yieldres <- bind_rows(read_csv("Holland et al. (2019) yield curves full data.csv"),
read_csv("Woodlands pH rotation model parameters.csv")) %>%
dplyr::select(-p_value, -phos_effect)
# if r2 is less than 10%, assume no yield response (a = 1, b = 0, d = 0)
# examination of the curves reveals all in this category are very flat anyway
Dat_yieldres <- Dat_yieldres %>%
mutate(a_est = ifelse(r2 < 0.1, 1, a_est),
b_est = ifelse(r2 < 0.1, 0, b_est),
d_est = ifelse(r2 < 0.1, 0, d_est))
# summarise and match models to crop data
Dat_yieldres <- Dat_yieldres %>%
group_by(crop, site) %>%
summarise_at(vars(a_est:d_est), funs(mean(.))) %>%
ungroup() %>%
mutate(data_cropmatch = c("Barley", "Pasture", "Oil crops, other", "Oil crops, other", "Cereals, other",
"Potato", "Potato", "Potato", "Barley", "Barley",
"Pulses, other", "Pulses, other", NA, NA, "Cereals, other",
"Cereals, other", "Vegetable", "Wheat", "Rapeseed", "Rapeseed",
"Cereals, other", "Wheat", "Wheat"))
Dat_yieldres <- Dat_yieldres %>%
mutate(model_no = 1:nrow(Dat_yieldres))
# yield response for pasture from Fornara et al. (2011)
# superceded by Woodlands Field pasture model (produces similar results regardless)
# Dat_yieldres_pasture <- read_csv(find_onedrive(dir = data_repo, path = "Fornara yield response.csv"))
# soil fractions, based on Holland paper for Rothamsted and Woburn, stated soil type/typical fractions for Woodlands — refine if possible
Dat_soil <- tibble(site = c("Rothamsted", "Woburn", "Woodlands"),
sand = c(28, 71, 63),
silt = c(52, 17, 25),
clay = c(20, 12, 12))
# function to select most appropriate yield response model for crop and soil type
# selects most similar soil type for which model is available
expt_select <- function(sand2, silt2, clay2, crop_name){
x <- Dat_yieldres %>%
filter(data_cropmatch == crop_name)
y <- Dat_soil %>%
filter(site %in% x$site) %>%
mutate(SS = (sand - sand2)^2 + (silt - silt2)^2 + (clay - clay2)^2) %>%
filter(SS == min(SS)) # selection using LSS method for sand/silt/clay fractions
z <- x$model_no[match(y$site, x$site)]
if(length(z) == 0) z <- NA
return(z)
}
# match model to data (still feels like it takes longer than it should...)
Dat_main <- Dat_main %>%
mutate(model_no = pmap_dbl(list(Sand, Silt, Clay, Crop), expt_select))
Dat_main <- Dat_main %>%
left_join(Dat_yieldres %>% select(model_no, Crop = data_cropmatch, a_est, b_est, d_est), by = c("model_no", "Crop"))
# calculate relative yields at given pH and possible yields at target pH
rel_yield <- function(A, B, D, pH){
rel_yield <- A + (B / (1 + D * pH))
rel_yield <- ifelse(rel_yield < 0.1, 0.1, rel_yield) # line added to avoid negative or divide by zero errors. Clunky... avoid in future?
return(rel_yield)
}
# adjust crop area to reflect fraction under mineral soils (i.e. exclude peat soils)
Dat_main <- Dat_main %>%
mutate(Area_ha = Area_ha * Min_frac)
# function to infer target pH
target_pH <- function(Crop, pH){
# define target with sequential logicals
target_pH <- pH # assume baseline is no change
target_pH <- ifelse(Crop == "Pasture" & pH < 6.0, 6.0, target_pH) # 6 target for grass (RB209)
target_pH <- ifelse(Crop != "Pasture" & pH < 6.5, 6.5, target_pH) # 6.5 target for crops (RB209)
return(target_pH)
}
# yield increases for croplands
Dat_main <- Dat_main %>%
mutate(Rel_yield = rel_yield(a_est, b_est, d_est, pH),
Target_pH = target_pH(Crop, pH),
Poss_yield = rel_yield(a_est, b_est, d_est, Target_pH),
Yield_increase = Poss_yield / Rel_yield,
pH_diff = Target_pH - pH) %>%
dplyr::select(-(a_est:d_est))
# add in pasture cases
# superceded by Woodlands Field grass model
#Pas_yield_fac <- Dat_yieldres_pasture$mean[1] - 1
#
#Dat_main <- Dat_main %>%
# mutate(Rel_yield = ifelse(Crop == "Pasture",
# 1 / (1 + Pas_yield_fac * pH_diff),
# Rel_yield),
# Poss_yield = ifelse(Crop == "Pasture",
# Rel_yield * (1 + Pas_yield_fac * pH_diff),
# Poss_yield),
# Yield_increase = ifelse(Crop == "Pasture",
# Poss_yield / Rel_yield,
# Yield_increase))
# load SOC response curve functions derived in separate script and apply to main data
# function output is fractional
load("SOC response functions.RData")
Dat_main <- Dat_main %>%
mutate(SOCchange_frac = Crop_SOC_RR_year(pH_diff = pH_diff) * 20)
# add in pasture cases - C sequestration predictions using data from Fornara et al. (2011)
Dat_main <- Dat_main %>%
mutate(SOCchange_frac = ifelse(Crop == "Pasture", Grass_SOC_RR_year * pH_diff * 20, SOCchange_frac))
# we avoided dropping crops without matched models earlier to allow us to add in pasture using a different approach;
# now we need to drop those crops which still have no data for yield etc.
Dat_main <- Dat_main %>%
drop_na(Yield_increase)
# EI for different crops in g CO2-eq per kg, data from Feedprint for on-farm production only
# 'cereals, other' classed as oats, 'oil crops. other' classed as linseed, 'pulses, other' classed as beans
# EI for vegetables based on root crops/onions/cabbages figure from Wallen et al. (2004) (Feedprint doesn't do vegetable EFs)
# EI for pasture based on feedprint EI for grass silage
Dat_EI <- tibble(Crop = Dat_main %>% pull(Crop) %>% unique(),
EI = c(235, 343, 465, 1222, 226, 766, 984, 500, 349))
Dat_main <- Dat_main %>%
left_join(Dat_EI, by = "Crop")
# calculate equivalent crop production emissions per hectare, and EI based emissions 'savings' resulting from yield improvements
Dat_main <- Dat_main %>%
mutate(GHG_tha_nolime = EI * 10^-6 * Yield_tha * 10^3,
EI_limed = EI / Yield_increase,
GHG_tha_lime = EI_limed * 10^-6 * Yield_tha * 10^3,
GHGmit_yield = GHG_tha_nolime - GHG_tha_lime)
# calculate emissions mitigation from SOC accumulation
Dat_main <- Dat_main %>%
mutate(OC = ifelse(Crop != "Pasture", OC * 0.7, OC), # from IPCC 2019 guidelines, FLU for cropland
OC_lime = OC + OC * SOCchange_frac,
GHGmit_SOC = ((OC_lime - OC) * 44/12) / 20)
# calculate emissions from lime application
Dat_main <- Dat_main %>%
mutate(Limerate = (LF * (pH_diff + 0.2)) / 5, # Assuming a 5 year interval between applications (based on a variety of data sources) + 0.2 pH unit overshoot as recommended in RB209
Limeemb_GHG = 0.074 * Limerate, # Kool et al. (2012)
Limedir_GHG = 0.125 * 44/12 * Limerate, # IPCC (2006)
Dies_GHG = (336 * 0.7) / 36.9 * 3.165 * 10^-3) # Williams et al (2006) for diesel usage * DEFRA/DECC for EF
# sale values for different crops from FMH 17/18, all in 2017 GBP
# for grass, estimate is mean production cost from FMH 17/18
# linseed uses OSR values, potatoes assumes dual purpose and price is weighted according to relative yields
# vegetables takes data for potatoes — very similar to most veg prices
Dat_saleval <- tibble(Crop = Dat_main %>% pull(Crop) %>% unique(),
Maincrop_saleval = c(22.5, 145, 155, 325, 113, 200, 325, 113, 165),
Bycrop_saleval = c(0, 55, 50, 0, 0, 0, 0, 0, 50), # secondary crop e.g. straw
Bycrop_ratio = c(0, 0.55, 0.60, 0, 0, 0, 0, 0, 0.53)) # ratio of secondary crop to main crop yield
# join sale values to main data
Dat_main <- Dat_main %>% left_join(Dat_saleval, by = "Crop")
# calculate costs and benefits
Dat_main <- Dat_main %>%
mutate(Crop_revenue = Yield_tha * Maincrop_saleval + Yield_tha * Bycrop_ratio * Bycrop_saleval,
Crop_revenue_lime = Crop_revenue * Yield_increase,
Crop_revenue_net = Crop_revenue_lime - Crop_revenue,
Lime_cost = Limerate * 35, # FMH 17/18 lime cost,
Cont_cost = Limerate * 4, # FMH 17/18 contractor cost
Cost_net_ha = (Lime_cost + Cont_cost) - Crop_revenue_net
)
#####################
# added 21/01/2020 to include N2O predictions using the Hillier/Cornulier model
#####################
####################
# augmenting main dataset with additional variables req'd for N2O model
library(ncdf4)
# 8 year dataset of wet days / month
nc_open(find_onedrive(dir = data_repo, path = "GIS data/CRU TS v4-03/cru_ts4.03.2011.2018.wet.dat.nc"))
# convert to raster brick
wet_days <- brick(find_onedrive(dir = data_repo, path = "GIS data/CRU TS v4-03/cru_ts4.03.2011.2018.wet.dat.nc"), varname = "wet")
# cropping for efficiency, but not masking until we resample
wet_days <- wet_days %>% crop(UK)
# resample to master stack values
wet_days <- wet_days %>% resample(Master_stack[[1]])
# mask to UK
wet_days <- wet_days %>% mask(UK)
# convert to df
Dat_wetdays <- wet_days %>%
as.data.frame(xy = T) %>%
as_tibble() %>%
drop_na()
# gather and prep year variable
Dat_wetdays <- Dat_wetdays %>%
gather(-x, -y, key = "Year", value = "Wet_days") %>%
mutate(Year = Year %>%
str_replace_all("\\..+", "") %>%
str_replace_all("X", "") %>%
as.numeric())
# annual sums
Dat_wetdays <- Dat_wetdays %>%
group_by(x, y, Year) %>%
summarise(Wet_days = sum(Wet_days),
n = n())
Dat_wetdays$n %>% table()
# 8-year average
Dat_wetdays <- Dat_wetdays %>%
group_by(x, y) %>%
summarise(Wet_days_mean = mean(Wet_days),
Wet_days_sd = sd(Wet_days),
Wet_days_se = sd(Wet_days) / sqrt(n()),
Wet_days_min = min(Wet_days),
Wet_days_max = max(Wet_days)) %>%
ungroup()
# anti_join(Dat_wetdays, Dat_main, by = c("x", "y")) # anti join checked, all required cells match fine (some non-joined cells in areas cut from Dat_main)
#ggplot(Dat_wetdays, aes(x = x, y = y, fill = Wet_days_se)) +
# geom_raster() +
# coord_quickmap() +
# theme_void()
# read in WorldClim temperature data
temperature <- raster::stack()
for(i in 1:12){
path <- find_onedrive(dir = data_repo, path = paste("GIS data/Average temperature (oC)/wc2.0_5m_tavg_", formatC(i, width=2, flag="0"), ".tif", sep=""))
x <- raster(path)
temperature <- addLayer(temperature, x)
}
rm(x, i , path)
# convert to UK DF and calculate degree days
library(lubridate)
temperature <- temperature %>%
crop(UK) %>%
resample(Master_stack[[1]]) %>%
mask(UK)
Dat_temp <- temperature %>%
as.data.frame(xy = T) %>%
as_tibble() %>%
drop_na() %>%
gather(-x, -y, key = "Key", value = "oC") %>%
mutate(Date = paste0("01/", str_sub(Key, start = -2L, end = -1L), "/2019") %>% dmy(), # random non-leap year
Days = days_in_month(Date),
Degree_days = oC * Days) %>%
group_by(x, y) %>%
summarise(Degree_days = sum(Degree_days)) %>%
ungroup()
# N rate data based on analysis of BSFP reports
Dat_fert <- read_csv(find_onedrive(dir = data_repo, path = "N rate data/BSFP Fertiliser rates annual summary.csv"))
Dat_man <- read_csv(find_onedrive(dir = data_repo, path = "N rate data/BSFP manure rates summary.csv"))
Dat_man <- Dat_man %>%
gather(3:10, key = "Year", value = "Rate") %>%
group_by(`Crop type`, Nutrient) %>%
summarise(Rate = mean(Rate)) %>%
filter(Nutrient == "N (kg / ha)") %>%
select(-Nutrient) %>%
rename(Crop_name3 = `Crop type`, Mrate_mean = Rate)
Dat_Nrate <- Dat_main %>%
distinct(Crop) %>%
arrange(Crop) %>%
mutate(Crop_name2 = c("Winter barley", "Minor cereals", "Linseed", "Grass < 5", "Potatoes", "Other arable", "Oilseed Rape", "Rootcrops and brassicae", "Wheat"),
Crop_name3 = c("Winter sown", "Winter sown", "Spring sown", "Grass", "Spring sown", "Winter sown", "Winter sown", "Spring sown", "Winter sown")) %>%
left_join(Dat_fert %>% select(Crop_name2, Frate_mean = Nrate_mean), by = "Crop_name2") %>%
left_join(Dat_man %>% select(Crop_name3, Mrate_mean), by = "Crop_name3") %>%
mutate(Nrate = Frate_mean + Mrate_mean) %>%
select(Crop, Nrate)
#################
# add data into main workflow and calculate new variables
# join up new data for N2O model to new model dataset
Dat_model <- Dat_main %>%
select(x, y, Crop, Clay, OC, BD, pH, SOCchange_frac, Target_pH) %>%
left_join(Dat_wetdays %>% select(x, y, Wet_days_mean), by = c("x", "y")) %>%
left_join(Dat_temp, by = c("x", "y")) %>%
left_join(Dat_Nrate, by = "Crop")
# fix one row where Wet_days_mean couldn't be joined (impute from cell next door)
Dat_model <- Dat_model %>%
mutate(Wet_days_mean = ifelse(is.na(Wet_days_mean),
lag(Wet_days_mean),
Wet_days_mean))
# function to calculate OC percentage based on C (t/ha) and BD (kg / m2)
OC_perc <- function(BD_kg_m2, C_t_ha){
C_kg_m2 <- C_t_ha * 10^3 * 10^-4 * 1 / 0.3
Cfrac <- C_kg_m2 / BD_kg_m2
Cperc <- Cfrac * 100
return(Cperc)
}
# calculate required variables
Dat_model <- Dat_model %>%
mutate(Is_grass = Crop == "Pasture",
C_perc_initial = OC_perc(BD, OC),
C_perc_final = C_perc_initial * (1 + SOCchange_frac))
#################
# Make predictions with N2O model
# model prepared in script [Hillier-Cornulier N2O model.R]
load(find_onedrive(dir = data_repo, path = "Jon H N2O paper/H-C N2O model lite.RData"))
# method from TC to negate study effect
# METHOD 1: predict using the studyID with random effect estimate closest to zero (approximate).
# finding the studyID with random effect estimate closest to zero
RE.sub <- grep("s(studyID.f)", jamM15c$term.names, fixed= T) # identify which of the coefficients are random effects
RE.names <- levels(jamM15c$model$studyID.f) # studyID names of the random effects
min_id <- RE.names[abs(jamM15c$coefficients[RE.sub]) == min(abs(jamM15c$coefficients[RE.sub]))] # returns " 53.42 -7.52200310 41220052003-10-152004-11-30"
# we need base + fertilised predictions for initial pH + optimal pH
# i.e. 4x predictions
# revised 27/02/2020 following TC observation that pH-mediated mitigation of naturally-occuring N2O is still a management effect
# in other words, we're not just interested in fertiliser induced N2O and we can drop the 2 baseline predictions (Fert01 = 0)
# only 2x predictions required
# baseline N2O emissions, inital pH
#ipH_base <- data.frame(Fert01 = 0, lowNO3 = 0, highNO3 = 0, Grasslands = Dat_model$Is_grass,
# pH = Dat_model$pH, Clay = Dat_model$Clay, SOC = Dat_model$C_perc_initial,
# WetDays.exp = Dat_model$Wet_days_mean, DegDays.exp= Dat_model$Degree_days,
# N.rate = Dat_model$Nrate, studyID.f= " 32.58 119.702007 8 8120082007-08-152007-11-04") # some random studyID.f (the first one)
#ipH_base <- df.M15c.complete(ipH_base)
#Dat_model <- Dat_model %>%
# mutate(ipH_base_pred = exp(predict(jamM15c, newdata = ipH_base)))
# fertiliser-induced N2O emissions, initial pH
ipH_fert <- data.frame(Fert01 = 1, lowNO3 = 0, highNO3 = 1, Grasslands = Dat_model$Is_grass,
pH = Dat_model$pH, Clay = Dat_model$Clay, SOC = Dat_model$C_perc_initial,
WetDays.exp = Dat_model$Wet_days_mean, DegDays.exp= Dat_model$Degree_days,
N.rate = Dat_model$Nrate, studyID.f= min_id) # min (~0) random effect studyID.f
ipH_fert <- df.M15c.complete(ipH_fert)
Dat_model <- Dat_model %>%
mutate(ipH_fert_pred = exp(predict(jamM15c, newdata = ipH_fert)) - 1)
# baseline N2O emissions, final pH
#fpH_base <- data.frame(Fert01 = 0, lowNO3 = 0, highNO3 = 0, Grasslands = Dat_model$Is_grass,
# pH = Dat_model$Target_pH, Clay = Dat_model$Clay, SOC = Dat_model$C_perc_final,
# WetDays.exp = Dat_model$Wet_days_mean, DegDays.exp= Dat_model$Degree_days,
# N.rate = Dat_model$Nrate, studyID.f= " 32.58 119.702007 8 8120082007-08-152007-11-04") # some random studyID.f (the first one)
#fpH_base <- df.M15c.complete(fpH_base)
#Dat_model <- Dat_model %>%
# mutate(fpH_base_pred = exp(predict(jamM15c, newdata = fpH_base)))
# fertiliser-induced N2O emissions, final pH
fpH_fert <- data.frame(Fert01 = 1, lowNO3 = 0, highNO3 = 1, Grasslands = Dat_model$Is_grass,
pH = Dat_model$Target_pH, Clay = Dat_model$Clay, SOC = Dat_model$C_perc_final,
WetDays.exp = Dat_model$Wet_days_mean, DegDays.exp= Dat_model$Degree_days,
N.rate = Dat_model$Nrate, studyID.f= min_id) # min (~0) random effect studyID.f
fpH_fert <- df.M15c.complete(fpH_fert)
Dat_model <- Dat_model %>%
mutate(fpH_fert_pred = exp(predict(jamM15c, newdata = fpH_fert)) - 1)
# calculate direct N2O emissions difference in kg CO2-eq / ha
Dat_model <- Dat_model %>%
mutate(cN2O_CO2eq = (fpH_fert_pred - ipH_fert_pred) * 44/28 * 298)
qplot(Dat_model$cN2O_CO2eq)
mean(Dat_model$cN2O_CO2eq, na.rm = T)
#####################
# add N2O predictions to main data and scale
Dat_main <- Dat_main %>%
mutate(GHGmit_N2O = -(as.numeric(Dat_model$cN2O_CO2eq) * 10^-3))
# calculate GHG balance in tonnes / ha
Dat_main <- Dat_main %>%
mutate(Tot_GHGmit = GHGmit_yield + GHGmit_SOC + GHGmit_N2O, # tonnes CO2-eq / ha
Tot_GHG = Limeemb_GHG + Limedir_GHG + Dies_GHG, # tonnes CO2-eq / ha
GHG_balance = Tot_GHG - Tot_GHGmit) # GHG balance (sources - sinks, tonnes CO2-eq / ha)
# calculate abatement and MAC
Dat_main <- Dat_main %>%
mutate(Abatement = -GHG_balance * Area_ha, # abatement for full crop area in grid cell, tonnes CO2-eq
Abatement_SOConly = -(Tot_GHG - GHGmit_SOC) * Area_ha,
Abatement_EIonly = -(Tot_GHG - GHGmit_yield) * Area_ha,
Abatement_N2Oonly = -(Tot_GHG - GHGmit_N2O) * Area_ha,
Cost_net = Cost_net_ha * Area_ha,
MAC = Cost_net / Abatement)
# save .RData for plots and decision tree model
save(Dat_main, UK, Dat_cdf, file = "Full model output df.RData")
|
f31f9a235abbf0c13e45c7ce52f769f6cd3c4cbd | b946c489e67a65008de77a6a8da93f4d398e3f33 | /R/reco.R | 6b84eec253b387b4aa2a8b75f317c4410286b88d | [] | no_license | EPauthenet/fda.oce | 6380852e8998a0b956b46acc044c71717f5e1859 | 38c6b1d2dd6a4645235f7ec8db2dda7caad56c65 | refs/heads/master | 2022-12-10T14:37:26.095737 | 2021-02-01T13:14:00 | 2021-02-01T13:14:00 | 138,511,460 | 4 | 3 | null | 2022-12-07T02:45:42 | 2018-06-24T19:58:42 | R | UTF-8 | R | false | false | 1,580 | r | reco.R | #' Reconstruction of hydrographic profiles
#'
#' This function reconstructs hydrographic profiles with a chosen number of Principal Components (PCs).
#'
#' @param pca list containing the modes produced by the function \code{fpca}
#' @param Ntrunc how many PCs to use in the reconstruction, default is set to the total number of PC, \code{Ntrunc = nbas*ndim}.
#'
#' @author Etienne Pauthenet \email{<etienne.pauthenet@gmail.com>}, David Nerini \code{<david.nerini@univ-amu.fr>}, Fabien Roquet \code{<fabien.roquet@gu.se>}
#' @references Pauthenet et al. (2017) A linear decomposition of the Southern Ocean thermohaline structure. Journal of Physical Oceanography, http://dx.doi.org/10.1175/JPO-D-16-0083.1
#' @references Ramsay, J. O., and B. W. Silverman, 2005: Functional Data Analysis. 2nd Edition Springer, 426 pp., Isbn : 038740080X.
#'
#' @seealso \code{\link{bspl}} for bsplines fit on T-S profiles, \code{\link{fpca}} for functional principal component analysis of T-S profiles, \code{\link{proj}} for computing Principal Components.
#' @export
reco <- function(pca,pc,Ntrunc){
nbas = pca$nbas
nobs = dim(pc)[1]
ndim = dim(pc)[2]/nbas
if(missing(Ntrunc)){Ntrunc = nbas*ndim}
coef = array(NaN,c(nbas,nobs,ndim))
for(k in 1:ndim){
#coef[,,k] = repmat(pca.Cm((kk-1)*nbas+1:kk*nbas)',1,nobs) + pca.vectors((kk-1)*nbas+1:kk*nbas,1:Ntrunc)*pc(:,1:Ntrunc)'
d = ((k-1)*nbas+1):(k*nbas)
coef[,,k] = matrix(rep(pca$Cm[d],nobs),nbas,nobs) + pca$vectors[d,1:Ntrunc] %*% t(pc[,1:Ntrunc])
}
fdobj_reco <<- fda::fd(coef,pca$basis,pca$fdnames)
}
|
fd19ecca3a5a7ada13814ccd698c88d14a55b8a1 | 8702f0dd56b29655402a4d4a191d67168cfd5878 | /AEGIS/Machine_Learning/LogisticRegression.R | e3fa5087dfad0aeeab4d2101435bf4cdf3a6494b | [] | no_license | vipin752/datascience-machineLearning | 6efb4f6e5a69dd7b18a75d8f257f44d3d2648811 | 82b1f6b97a0250371efe9a1737da7ec4e13249f5 | refs/heads/master | 2020-03-09T06:02:51.749802 | 2019-09-29T19:24:34 | 2019-09-29T19:24:34 | 128,617,995 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 958 | r | LogisticRegression.R | #Source video link: https://www.youtube.com/watch?v=mteljf020EE
#1: Load and try to understand the data
#install.packages("ISLR")
library(ISLR)
attach(Smarket)
View(Smarket)
?Smarket
summary(Smarket)
cor(Smarket[,1:8])
pairs(Smarket[,1:8])
#2 Split the data into training and test
smp_size <- floor(0.75 * nrow(Smarket))
## set the seed to make your partition reproductible
set.seed(123)
train_ind <- sample(seq_len(nrow(Smarket)), size = smp_size)
train <- Smarket[train_ind, ]
test <- Smarket[-train_ind, ]
train_ind
direction_test <- Direction[-train_ind]
direction_test
#3: Fit logistic regression
stock_model = glm(Direction~ Lag1+Lag2+Lag3+Lag4+Lag5+Volume,
data = train, family = binomial)
summary(stock_model)
model_pred_probs = predict(stock_model, test, type="response")
model_pred_dir = rep("Down", nrow(test))
str(model_pred_dir)
model_pred_dir[model_pred_probs > 0.5] = "Up"
table(model_pred_dir, direction_test)
?glm
|
868356756efef3a74e80b8f5ec81e681369191d3 | eff5959757dca0c497fa636490cdedeea36443bc | /codeLibrary/R/R/ggplot_part2.R | 1c119647c97533c7b73b4de83847889f583440b3 | [] | no_license | profSeeger/LVM-Code | 1b5ae20d18680ea91597ebcdbc3b799b0f5555d3 | f3886f0dcdd3267eda3f7d6c1406f2f0b7db624f | refs/heads/master | 2023-08-03T23:31:34.049529 | 2023-08-01T12:13:28 | 2023-08-01T12:13:28 | 177,437,983 | 0 | 0 | null | 2023-03-04T15:16:36 | 2019-03-24T16:10:06 | JavaScript | UTF-8 | R | false | false | 4,605 | r | ggplot_part2.R | # Introduction #######################################################################################
# The tidyverse is an opinionated collection of R packages designed for data science.
# All packages share an underlying design philosophy, grammar, and data structures.
# You can optionally nstall the complete tidyverse with: install.packages("tidyverse")
# or you can install individual componets such as dplyr or ggplot2 individually.
# For more info see https://www.tidyverse.org
# The dplyr package provides a set of tools for efficiently manipulating datasets in R.
# dplyr is an upgrade to plyr, focussing on only data frames. With dplyr, anything you can do
# to a local data frame you can also do to a remote database table
# install.packages("dplyr")
# library(dplyr)
# ggplot2 is a system for declaratively creating graphics, based on The Grammar of Graphics.
# You provide the data, tell ggplot2 how to map variables to aesthetics, what graphical primitives
# to use, and it takes care of the details. For more details see https://ggplot2.tidyverse.org
# install.packages("ggplot2")
# library(ggplot2)
# options(stringsAsFactors = FALSE)
# Load Packages #########################################################################################
# Because we will be often using both dplyr and ggplot2, we will just load the tidyverse from the start
install.packages("tidyverse")
library(tidyverse)
# Load CSV data #########################################################################################
# Load herds.cvs from working directory
df_herds <- read.csv('herds.csv', header = TRUE, sep = ",")
head(df_herds) # by default this prints the first 6 records. Optionally to print 3 rows head(df_herds, 3)
# Display a summary of the df_herds dataset
summary(df_herds)
# ggplot scatterplot ########################################################################################
# create a dot plot of all herds
# plot the data using ggplot
ggplot(data = df_herds, aes(x = rancher, y = herdSize)) +
geom_point() +
labs(x = "Rancher",
y = "Herd Size",
title = "Vaccine Study Ranches and Herd Size",
subtitle = "Dr. Seeger, April 2020")
# ggplot alternative format style #######################################################################
p <- ggplot(df_herds, aes(x=rancher, y= herdSize))
p + geom_point(size = 4) #change size of the dot
p + geom_point(aes(colour = factor(herdSize)), size = 4) #See example below to make your own color breaks
p + geom_point(aes(colour = factor(state)), size = 4) #Uses the state - so Wyoming is pink for both
#optionally use shapes instead of colored dots
p + geom_point(aes(shape = factor(state)), size = 4) #there appears to be only six shapes
l <-labs(x = "Rancher", y = "Herd Size",title = "Vaccine Study Ranches and Herd Size", subtitle = "Dr. Seeger, April 2020")
p + geom_point(size = 4) + l
# ggplot scatterplot colored at herd size intervals ######################################################
ggplot(data = df_herds, aes(x = rancher, y = herdSize)) +
geom_point(aes(colour = cut(herdSize, c(-Inf, 200, 400, Inf))), size = 5) +
scale_color_manual(name = "Herd Size",
values = c("(-Inf,200]" = "black",
"(200,400]" = "yellow",
"(400, Inf]" = "red"),
labels = c("<= 200", "200 to 400", "> 400")) +
labs(x = "State",
y = "Herd Size",
title = "Vaccine Study Ranches and Herd Size",
subtitle = "Dr. Seeger, April 2020")
# ggplot scatterplot grouped by state #####################################################################
# not working !!!!!
#next make a new plot that shows the total animals in each state
ggplot(data = df_herds, aes(x = state, y = herdSize)) +
stat_summary(fun.y = sum, # adds up all observations for the state
geom = "bar") + # or "line"
geom_point(aes(colour = cut(qsec, c(-Inf, 100, 200, Inf))),size = 5) +
scale_color_manual(name = "herdSize",
values = c("(-Inf,100]" = "black",
"(100,200]" = "yellow",
"(200, Inf]" = "red"),
labels = c("<= 17", "17 < qsec <= 19", "> 19")) +
labs(x = "State",
y = "Herd Size",
title = "Vaccine Study Ranches and Herd Size",
subtitle = "Dr. Seeger, April 2020")
# ggplot bar chart #########################################################################################
# A count of the states expressed in a bar chart
a <- ggplot(df_herds, aes(factor(state)))
a + geom_bar()
a + geom_bar(aes(fill = factor(state)))
|
e994271b7f8d682305cd7e38ef056c617c6c62c3 | 6236bf2c0b93422485fc3b3affe682843bf71895 | /doc/code/data.R | 49c564a865d0be1613297d69e624772853299572 | [] | no_license | zjiayao/stat7614 | 0a6895876a903979fa0f2bed68844076e09f8d47 | a5f639642397e49e5aed8cd36c460f2b3fc966b6 | refs/heads/master | 2021-03-19T13:05:35.791009 | 2018-09-16T22:54:20 | 2018-09-16T22:54:20 | 121,031,089 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,176 | r | data.R | library(dplyr)
raw = read.csv(url("https://i.cs.hku.hk/~jyzhang/misc/cs_reduced.csv"),
header = TRUE) %>% data.frame()
raw$id <- NULL
raw$university <- NULL
raw$season <- NULL
raw$gre = raw$gre_verbal+raw$gre_quant+raw$gre_writing
raw$gre_verbal = NULL
raw$gre_quant = NULL
raw$gre_writing <- NULL
raw$post_data <- NULL
raw$post_timestamp <- NULL
raw$decision_month <- as.Date.POSIXct(raw$decision_timestamp)
%>% format(format="%m") %>% as.factor()
raw$decision_timestamp <- NULL
raw$decision_date <- NULL
raw$decision_method[which(raw$decision_method=="Postal Service")] <- "Other"
raw$decision_month = as.numeric(raw$decision_month )
raw$degree[which(raw$degree!= "PhD")] <- "MS"
raw$major = NULL
raw$decision = ifelse(raw$decision== "Accepted", 1, 0)
raw$decision = as.factor(raw$decision)
train_index <- sample(1:nrow(raw), 0.8 * nrow(raw))
test_index <- setdiff(1:nrow(raw), train_index)
x_train <- raw[train_index,]
x_train$decision = NULL
y_train <- data.frame(raw[train_index,]$decision)
colnames(y_train) =c('decision')
x_test <- raw[test_index,]
x_test$decision <- NULL
y_test <- data.frame(raw[test_index,]$decision)
colnames(y_test) =c('decision')
|
8eafcce44958304f0483a1b81ca22b61d8675418 | cf606e7a3f06c0666e0ca38e32247fef9f090778 | /test/integration/example-models/bugs_examples/vol1/mice/mice.data.R | 7067744884fc70867500a18614f2ea4ba9c7c4fd | [
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | nhuurre/stanc3 | 32599a71d5f82c759fd6768b8b699fb5f2b2d072 | 5612b357c1cd5a08cf2a57db97ce0e789bb87018 | refs/heads/master | 2023-07-05T02:27:08.083259 | 2020-11-12T15:37:42 | 2020-11-12T15:37:42 | 222,684,189 | 0 | 0 | BSD-3-Clause | 2019-11-19T11:50:39 | 2019-11-19T11:50:38 | null | UTF-8 | R | false | false | 689 | r | mice.data.R | N_uncensored <-
65L
N_censored <-
15L
M <-
4
group_uncensored <-
c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4)
group_censored <-
c(1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4)
t_uncensored <-
c(12, 17, 21, 25, 11, 26, 27, 30, 13, 12, 21, 20, 23, 25, 23,
29, 35, 31, 36, 32, 27, 23, 12, 18, 38, 29, 30, 32, 25, 30, 37,
27, 22, 26, 28, 19, 15, 12, 35, 35, 10, 22, 18, 12, 31, 24, 37,
29, 27, 18, 22, 13, 18, 29, 28, 16, 22, 26, 19, 17, 28, 26, 12,
17, 26)
censor_time <-
c(40, 40, 40, 40, 40, 40, 40, 40, 10, 24, 40, 40, 20, 29, 10)
|
03c55a97b3b0d72171b8afb9d1f78265f00370c7 | 599d3aa9869829f5b6508d8169aab8ddcdb907b7 | /build_package.R | aa7e65f3d5b2a1ebe36ca98ea1ebfc1353705dd6 | [
"MIT"
] | permissive | databrew/portfoliodash | a31290e1b99354a12ad338653212158f19b50568 | 0ceeb9b21d5069aee5f7ddeb167bb31462cc761f | refs/heads/master | 2021-05-14T04:59:35.498354 | 2018-04-11T19:28:38 | 2018-04-11T19:28:38 | 116,656,208 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 307 | r | build_package.R | #! /usr/bin/R
# reconstruct_data <- FALSE
library(devtools)
library(roxygen2)
library(rmarkdown)
document('.')
install('.')
# render('README.Rmd')
# setwd('vignettes')
# render('vignette.Rmd')
# setwd('..')
# if(reconstruct_data){
# setwd('data-raw')
# source('create_data_files.R')
# setwd('..')
# }
|
39c63b0da5936a6192806021010fee69e2672576 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ggedit/examples/clone_facet.Rd.R | c993541ac7cdeff89057ee7a361c8fd537393f10 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 264 | r | clone_facet.Rd.R | library(ggedit)
### Name: cloneFacet
### Title: Clone ggplot facet object
### Aliases: cloneFacet
### ** Examples
obj=ggplot2::facet_grid(a+b~c+d,scales = 'free',as.table = FALSE,switch = 'x',shrink = FALSE)
cloneFacet(obj)
cloneFacet(obj,verbose=TRUE)
|
f70d5c52d591c16bac1ade730459391f70e6d3b4 | d184e1fbff172fb64f823abde53b8e8125d78964 | /gg_Tuts.R | 94643709f84267ecec60276bec4bf71c3292e9f6 | [] | no_license | mahendra-16/gg_tutorial | c3c7bd1ed39baad7d276f4ea82079b97058268f7 | 669e6eadc8bb22ad443c6b7276a56a2050711cbe | refs/heads/master | 2020-04-06T16:08:39.907173 | 2018-11-14T20:37:54 | 2018-11-14T20:37:54 | 157,607,274 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 230 | r | gg_Tuts.R |
library(ggplot2)
ggplot(iris, aes(x=Sepal.Length, y=Petal.Length)) + geom_point(aes(color=Species, shape=Species))
library(ggformula)
gf_point(Sepal.Length ~ Petal.Length,data = iris, color = ~Species, shape = ~Species)
|
8df77f76a2615a03242ebf54f7635ce9d4c45d0c | e80f2a5a0e13370e52cc97fe42f5c9edcc8eead5 | /man/cxhull_range.Rd | bf64eeac0c720a727a9c94ffbc775e7821d09579 | [] | no_license | marlonecobos/rangemap | 80cd91c6847338763f793ad7ac66f6fc3a1210eb | 1edfc01612a120de25f92cf651e9ca64a4f8535a | refs/heads/master | 2022-05-21T11:48:08.929230 | 2022-04-14T17:51:43 | 2022-04-14T17:51:43 | 133,424,345 | 17 | 11 | null | 2020-09-15T03:58:57 | 2018-05-14T21:36:23 | R | UTF-8 | R | false | true | 910 | rd | cxhull_range.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ranges_doc.R
\docType{data}
\name{cxhull_range}
\alias{cxhull_range}
\title{Example of sp_range* object based on convex hulls}
\format{
sp_range_iucn with 6 slots.
\describe{
\item{name}{character, name identifying the origin of the object.}
\item{summary}{data.frame, summary of results.}
\item{species_range}{SpatialPolygonsDataFrame of species range.}
\item{species_unique_records}{SpatialPointsDataFrame of species occurrences.}
\item{extent_of_occurrence}{SpatialPolygonsDataFrame of species extent of occurrence.}
\item{area_of_occupancy}{SpatialPolygonsDataFrame of species area of occupancy.}
}
}
\usage{
cxhull_range
}
\description{
A sp_range_iucn object containing the results of the function
\code{\link{rangemap_hull}}.
}
\examples{
data("cxhull_range", package = "rangemap")
summary(cxhull_range)
}
\keyword{datasets}
|
96b04bc818282c68c81772bfba4949ed9e206d9d | 2e0db0e29d4f9be2fdf97750aca3f6834d087fd4 | /exploring categorical and numerical data.R | 6fbf383df5abdde9d351cd7340fb30595bf0ae30 | [] | no_license | agbleze/Academic-repo | 049f0b23de5e9313988a785e03568fcc6146abf7 | f3d959c5e93e5d516081e4cdd167350b0e862031 | refs/heads/main | 2023-08-01T00:08:29.281134 | 2021-09-10T21:05:48 | 2021-09-10T21:05:48 | 372,232,015 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,873 | r | exploring categorical and numerical data.R | library(moments)
#install.packages("moments")
library(outliers)
#install.packages("outliers")
testData <- read_xlsx("~/Downloads/R FOR DATA ANALYSIS AND SCIENCE/R_Business_Analytic/Baseball Salaries 2011.xlsx")
testData
View(testData)
library(ggplot2)
library(readxl)
testDatan <- read_excel("~/Downloads/R FOR DATA ANALYSIS AND SCIENCE/R_Business_Analytic/Baseball Salaries 2011.xlsx",
sheet = "Salaries 2011")
View(testDatan)
mean(testDatan$Salary, na.rm = TRUE)
### function for mode
get_mode <- function(v){
unique_value <- unique(v)
unique_value[which.max(tabulate(match(v, unique_value)))]
}
get_mode(testDatan$Salary)
### variance
var(testDatan$Salary)
### standard deviation
sd(testDatan$Salary)
### mean absolute deviation
mad(testDatan$Salary, center = median(testDatan$Salary))
### MEASURE SHAPE OF DISTRIBUTION
## skewness measures symetry of distribution negative = left skewed
## kurtosis measured peakedness of the distribution negative = platykurtic, positive = leptokurtic
skewness(testDatan$Salary, na.rm = TRUE)
kurtosis(testDatan$Salary, na.rm = TRUE)
## outliers are most extreme values outside the observation
# get most extreme right-tail observation
outlier(testDatan$Salary)
# get most extreme left-tail observation
outlier(testDatan$Salary, opposite = TRUE)
## outliers based on z-scores
z_scores <- scores(testDatan$Salary, type = "z")
z_scores
which(abs(z_scores) > 1.96)
## outliers based on values less than or greater than
## whiskers on a boxplor (1.5 x IQR or more below 1st quartile or above 3rd quartile)
which(scores(testDatan$Salary, type = "iqr", lim = 1.5))
## remove outlier
testOutlierrm <- rm.outlier(testDatan$Salary)
View(data.frame(testOutlierrm$Salary))
testOutlierepl <- rm.outlier(testDatan$Salary, fill = TRUE)
View(testOutlierepl)
##histogram
hist(testDatan$Salary)
## histogram with ggplot2
ggplot(testDatan, aes(Salary)) + geom_histogram(colour = "black",
fill = "white")+
scale_x_log10(labels = scales::dollar) + geom_vline(aes(xintercept = mean(Salary)),
color = "red", linetype = "dashed")+
annotate("text", x = mean(testDatan$Salary) * 2, y = 255,
label = paste0("Avg: $", round(mean(testDatan$Salary)/1000000, 1), "M"))
### dotplot
ggplot(testDatan, aes(Salary)) + geom_dotplot() + scale_x_continuous(labels = scales::dollar)
### boxplot
boxplot(testDatan$Salary, horizontal = TRUE, log = "x")
### boxplot with ggplot
ggplot(testDatan, aes(x=factor(0), y = Salary)) +
geom_boxplot() + xlab("") + scale_x_discrete(breaks = NULL) +
scale_y_log10(labels = scales::dollar)+ coord_flip() +
geom_jitter(shape = 16, position = position_jitter(0.4), alpha = .3) +
stat_summary(fun.y = mean, geom = "point", shape = 23, size = 4, fill = "blue")
### boxplot for comparison
ggplot(testDatan, aes(x = Position, y = Salary)) +
geom_boxplot()+
scale_y_continuous(labels = scales::dollar) +
coord_flip()
#################################################
catData <- read_xlsx("~/Downloads/R FOR DATA ANALYSIS AND SCIENCE/R_Business_Analytic/Supermarket Transactions.xlsx", sheet = "Data")
head(catData)
head(catData[,c(3:5, 8:9, 14:16)])
## frequency of various columns
df <- table(catData$`Marital Status`, catData$`Gender`)
View(catData)
catData2 <- table(catData$`Marital Status`, catData$Gender,
catData$`State or Province`)
NEW <- ftable(catData2)
View(NEW)
########## proportions
prop_catData2 <- prop.table(catData2)
View(prop_catData2)
prop.table(df)
### customer % across location by gender and marital status
ftable(round(prop.table(catData2), 3))
#### marginal
## frequency marginals
#row mrginals total of each marital status across gender
margin.table(catData2, 1)
# column marginals --total of each gender across marital status
margin.table(catData2, 2)
##### Percentage marginals
# row marginals --- row percentages across gender
prop.table(catData2, margin = 1)
# colum marginals --- column % across marital status
prop.table(catData2, margin = 2)
#### barchart
ang = 90
# reoder levels
reorder_size <- function(x){
factor(x, levels = names(sort(table(x), decreasing = TRUE)))
}
ggplot(catData, aes(x=reorder_size(`State or Province`))) +
geom_bar(aes(y = (..count..)/sum(..count..))) +
theme(axis.text.x = element_text(angle = ang, hjust = 1))+
xlab("State or Province") +
scale_y_continuous(labels = scales::percent, name = "Proportion")
### plot for gender and marital status
ggplot(catData, aes(x = reorder_size(`State or Province`))) +
geom_bar(aes(y = (..count..) / sum(..count..))) +
xlab("State or Province") +
scale_y_continuous(labels = scales::percent, name = "Proportion") +
facet_grid(`Marital Status` ~Gender) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
|
eedc90d771711e94001d30a7fadd26808594f71a | d08e69198fbd60086aa35d765c7675006d06cf3f | /R/dlines.R | 92951eb48cc88eaa5bbf108b266ea7de822bc0b6 | [] | no_license | villardon/MultBiplotR | 7d2e1b3b25fb5a1971b52fa2674df714f14176ca | 9ac841d0402e0fb4ac93dbff078170188b25b291 | refs/heads/master | 2023-01-22T12:37:03.318282 | 2021-05-31T09:18:20 | 2021-05-31T09:18:20 | 97,450,677 | 3 | 2 | null | 2023-01-13T13:34:51 | 2017-07-17T08:02:54 | R | UTF-8 | R | false | false | 240 | r | dlines.R | "dlines" <-
function(SetA,SetB,lin="dotted",color="black", ...) {
np<-nrow(SetA)
if (length(color)==1) color = rep(color, np)
for(i in 1:np) lines(rbind(SetA[i,1:2],SetB[i,1:2]),lty=lin,col=color[i], ...)
return(NULL)
} |
dc5488eaf2cbe559a17bb5b35e9e818e01c946d9 | c7a15db8eae83c859bfdbe1e62ab5a7075493637 | /lib/covidPlots/R/newEvents.R | 8376d53e1181bbd2f1ebdfb64d3c531610481900 | [] | no_license | dgJacks0n/covidPlots | 0cd911a9eebe0a97085a2c1a2aad0646b703e9ac | c056bbfe2f8256534d2fe2d78eb274eacef3591c | refs/heads/master | 2021-07-15T18:27:16.258077 | 2021-04-05T00:45:49 | 2021-04-05T00:45:49 | 250,050,837 | 0 | 0 | null | 2021-04-05T00:45:50 | 2020-03-25T17:51:49 | R | UTF-8 | R | false | false | 914 | r | newEvents.R | #' Calculate new deaths and cases from cumulative values
#'
#' @param myData state or county-level data
#' @param level level to group data at
#'
#' @return input data with additional columns 'new_cases' and 'new_deaths'
newEvents <- function(myData, level = c("state", "county")) {
# use on state for state-level but use fips for county-level
myLevel <- ifelse((match.arg(level) == "state"), "state", "fips")
# check inputs
stopifnot(is.data.frame(myData))
stopifnot(myLevel %in% colnames(myData))
# calculate change from previous day by level
# calculate new cases
newData <- myData %>%
dplyr::group_by_at(myLevel) %>%
dplyr::arrange(date, .by_group = T) %>%
dplyr::mutate(new_cases = cases -
dplyr::lag(cases, default = dplyr::first(cases)),
new_deaths = deaths -
dplyr::lag(deaths, default = dplyr::first(deaths))) %>%
dplyr::ungroup()
return(newData)
} |
b0d1c5e9dd670b798e2db99f70693459689bfa8d | 3c10b4570575457750e12d97e2366fb10a21490e | /cachematrix.R | 977b855a26405c874668bc476c4e71aea7e42c98 | [] | no_license | fat-rabbit/ProgrammingAssignment2 | 0e59e55e9a96681e3ddcb68d94f0eb04fb64a0b9 | b15e1110f1c832d88a03e9cc2d2ec74e2977e07d | refs/heads/master | 2021-08-19T14:15:58.331463 | 2017-11-26T16:01:54 | 2017-11-26T16:01:54 | 112,083,342 | 0 | 0 | null | 2017-11-26T13:08:18 | 2017-11-26T13:08:18 | null | UTF-8 | R | false | false | 1,336 | r | cachematrix.R | ## Creates special object 'cacheMatrix'
## and deal with it for getting inverse of the matrix
## Creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(msource = matrix()) {
if(is.matrix(msource)){
functionToApply <- NULL
set <- function(y)
{
msource <<- y
functionToApply <<- NULL
}
get <- function() msource
setSolve <- function(solve) functionToApply <<- solve
getSolve <- function() functionToApply
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
else { NULL }
}
## Get function or it's cached version that returns inverse of the matrix;
cacheSolve <- function(cachedMatrix, ...) {
if(!is.atomic(cachedMatrix) && !is.null(cachedMatrix)){
FUN <- cachedMatrix$getSolve()
if(!is.null(FUN)) {
return(FUN)
}
data <- cachedMatrix$get()
FUN <- solve(data, ...)
cachedMatrix$setSolve(FUN)
FUN
}
else { print("check your input")}
} |
df41857556c289848692d42df8dd67f384cb58ee | 698bb4ca00e389fa6feb2fb8c7f6dc275049f698 | /eagle2/man/eagle2_re.Rd | 20a3e4915d14251b281d597f5c86c3a28155adce | [] | no_license | davidaknowles/eagle2 | 25d324fa3c387d205c90e4cd7346c7430d4810bb | 3ac7b66968ed4ad96104e27f9c17cd4de7c73d0d | refs/heads/master | 2021-01-22T19:49:45.773200 | 2018-10-13T21:04:52 | 2018-10-13T21:04:52 | 85,250,216 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 866 | rd | eagle2_re.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eagle2_re.R
\name{eagle2_re}
\alias{eagle2_re}
\title{Beta binomial GLMM with flips. Prior on concentration parameter is Gamma(concShape,concRate)}
\usage{
eagle2_re(ys, ns, concShape = 1.0001, concRate = 1e-04,
USE_LBFGS = T, burnin = 3000, iterations = 1000,
elbo_samples = 1000, learning_rate = 1, seed = 1, ...)
}
\arguments{
\item{ys}{numerator counts [n x T x K] where n are individuals, T are timepoints, K are SNPs}
\item{ns}{denominator counts [n x T x K]}
\item{concShape}{Shape of prior on concentration}
\item{concRate}{Rate of prior on concentration}
}
\value{
List with likelihood ratio, p-value and fits
}
\description{
Includes a per individual, per SNP random effect (shared across conditions) and uses stochastic
variational inference to integrate over these.
}
|
77e84fcb65ae166fc97cf193f7455737ac0d6dd7 | 5e2d2b544ef2661a4d65dced302730cbc524c1be | /cachematrix.R | 054e0cbafef7625bb9dbce655c713e6b4cbe85f2 | [] | no_license | jc873/ProgrammingAssignment2 | 4cb191a430af5c4d61fa7a50b89e82c63534dae0 | baf2cb69f2baa6e7ab229cc049374c771c4ced1c | refs/heads/master | 2021-01-14T13:07:34.805903 | 2015-04-22T20:41:10 | 2015-04-22T20:41:10 | 34,410,312 | 0 | 0 | null | 2015-04-22T19:10:58 | 2015-04-22T19:10:57 | null | UTF-8 | R | false | false | 3,063 | r | cachematrix.R | ## Matrix inversion is usually a costly computation and there may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## The following functions are used to cache an inverted matrix.
## Write a short comment describing this function
## makeCacheMatrix creates a special "matrix", which is really a
## list containing a function to:
## - set the matrix
## - get the matrix
## - set the value of the inverse matrix
## - get the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
## set initial value of inverseMatrix to NULL -> nothing in cache
inverseMatrix <- NULL
## set function
set <- function(myMatrix) {
## write matrix x
x <<- myMatrix
## no cached value -> set to NULL
inverseMatrix <<- NULL
}
## get function - returns matrix x
get <- function() x
## write inverse matrix to "cache"
setinverse <- function(inverse) inverseMatrix <<- inverse
## read and return inverse matrix from "cache"
getinverse <- function() inverseMatrix
## return the list containing functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve calculates the inverse of a special "matrix" created with
## makeCacheMatrix.
##
## Description:
## - check if inverse matrix has already been calculated (=cached)
## - if cached: return cached inverted matrix
## - else : calculate inverse matrix using solve() an write it to cache
## Returns a matrix that is the inverse of 'x'
##
## Assumption: every matrix can be inverted. (no error handling if not)
cacheSolve <- function(x, ...) {
## get data from inverse Matrix (if cached)
myInverseMatrix <- x$getinverse()
## if the inverse matrix ist cached, return cached data
if(!is.null(myInverseMatrix)) {
## print message, return data later
message("getting cached data")
}
else ## getinverse returned NULL -> inversed matrix is not cached
{
## we need to get the matrix
data <- x$get()
## calculate inverse matrix
myInverseMatrix <- solve(data, ...)
## write inverse matrix to cache
x$setinverse(myInverseMatrix)
}
## Return the value that is the inverse of 'x'
myInverseMatrix
}
## ############################################################################
##
## Testdata / test run
##
## Matrix - pass as argument:
## x = rbind(c(1, -2), c(-2, 1))
##
## > x
## [,1] [,2]
## [1,] 1 -2
## [2,] -2 1
## > solve(x)
## [,1] [,2]
## [1,] -0.3333333 -0.6666667
## [2,] -0.6666667 -0.3333333
##
## Call makeCacheMatrix
## m <- makeCacheMatrix (x)
##
## > m$get()
## [,1] [,2]
## [1,] 1 -2
## [2,] -2 1
##
##
## Call cacheSolve
## cacheSolve(m)
## [,1] [,2]
## [1,] -0.3333333 -0.6666667
## [2,] -0.6666667 -0.3333333
## > cacheSolve(m)
## getting cached data
## [,1] [,2]
## [1,] -0.3333333 -0.6666667
## [2,] -0.6666667 -0.3333333
## |
bacfb1b6459a16803d1dc5dad4d14bf004435ecf | 15828f1f15e5600801cf705fbc3dcbf0b6eb972c | /churn reduction.R | 089772ac3212404db39990ac8f21e11a246e7149 | [] | no_license | asif786raza/project | 41d74eb1c1d7a3c41979848f9a2b1c942e86a6f0 | 87e2914bf6a35d11d27f83b573b85aca0f03ba65 | refs/heads/master | 2020-03-25T19:42:30.231171 | 2018-08-09T04:26:06 | 2018-08-09T04:26:06 | 144,096,081 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,830 | r | churn reduction.R | rm(list=ls())
setwd("C:/Users/user")
getwd()
#LOAD Libraries
x=c("ggplt2", "corrgram", "DMwR", "Caret", "RandomForest", "unbalance", "C50", "dummies", "e1071", "information", "MASS", "rpart", "gbm", "ROSE")
lapply(x, require, character.only=TRUE)
#LOAD Data
Train_data=read.csv("Train_data.csv", header=T)
Test_data=read.csv("Test_data.csv", header = T )
names(Train_data)
#library for ploting the graph
library(scales)
library(psych)
library(gplots)
library(ggplot2)
#histogram of all predictors variable
ggplot(Train_data, aes(x=Train_data$account.length))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("account.length")
ggplot(Train_data, aes(x=Train_data$area.code))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("area code")
ggplot(Train_data, aes(x=Train_data$number.vmail.messages))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("account.length")
ggplot(Train_data, aes(x=Train_data$total.day.minutes))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total day minutes")
ggplot(Train_data, aes(x=Train_data$total.day.calls))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total day calls")
ggplot(Train_data, aes(x=Train_data$total.day.charge))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total day charge")
ggplot(Train_data, aes(x=Train_data$total.eve.minutes))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total evening minutes")
ggplot(Train_data, aes(x=Train_data$total.eve.calls))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total evening calls")
ggplot(Train_data, aes(x=Train_data$total.eve.charge))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total evening charge")
ggplot(Train_data, aes(x=Train_data$total.night.minutes))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total night minutes")
ggplot(Train_data, aes(x=Train_data$total.night.calls))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total night calls")
ggplot(Train_data, aes(x=Train_data$total.night.charge))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total night charge")
ggplot(Train_data, aes(x=Train_data$total.intl.minutes))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total international minutes")
ggplot(Train_data, aes(x=Train_data$total.intl.calls))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total international calls")
ggplot(Train_data, aes(x=Train_data$total.intl.charge))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("total international charge")
ggplot(Train_data, aes(x=Train_data$number.customer.service.calls))+geom_histogram(fill="DarkSlateBlue", colour="black")+ggtitle("service customer calls")
#check missing value analysis
#explore the data
str(Train_data)
#create dataframe with missing percentage
missing_val= data.frame(apply(Train_data, 2, function(x)(sum(is.na(x)))))
missing_val
#check unique value
unique(Train_data$state)
length(unique(Train_data$state))
table(Train_data$state)
#check outlier analysis of numerical variable
ggplot(Train_data, aes(x=Train_data$account.length, fill= Train_data$account.length))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- Account Length")
#check outliers anlysis(boxplot of numerical variable with dependent variable)
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$account.length, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- Account Length")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$voice.mail.plan, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- no of voice mail message")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.day.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total day calls")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.day.minutes, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total day minutes")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.day.charge, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total day charge")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.eve.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total evening calls")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.eve.minutes, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total evening minutes")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.eve.charge, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total evening charge")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.night.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total night calls")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.night.minutes, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total night minutes")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.night.charge, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total night charge")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.intl.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total international calls")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.intl.minutes, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total international minutes")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.intl.charge, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- total international charge")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$number.customer.service.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- service customer calls")
View(Train_data)
unique(Train_data$international.plan)
unique(Train_data$area.code)
unique(Train_data$number.vmail.messages)
unique(Train_data$voice.mail.plan)
unique(Train_data$Churn)
unique(Train_data$state)
#Data Manipulation convert string categorical into factor numeric
for(i in 1:ncol(Train_data)){
if (class(Train_data[,i])== 'factor'){
Train_data[,i]=factor(Train_data[,i], labels = (1:length(levels(factor(Train_data[,i])))))
}
}
View(Train_data)
numeric_index= sapply(Train_data, is.numeric)
numeric_index
numeric_data= Train_data[, numeric_index]
numeric_data
cnames= colnames(numeric_data)
cnames
View(numeric_data)
# another method of plotting box plot
install.packages("ggplot2")
library(ggplot2)
for (i in 1:length(cnames)) {
assign(paste0("gn", i), ggplot(aes_string((cnames[i]),x= "Churn"), data= subset(Train_data))+
stat_boxplot(geom= "errorbar", width=0.5)+
geom_boxplot(outlier.colour="red", fill="grey", outlier.shape=18,
outlier.size=1, notch=FALSE)+
theme(legend.position="bottom")+
labs(y=cnames[i], x="Churn")+
ggtitle(paste("box plot of Churn for", cnames[i])))
}
gridExtra::grid.arrange(gn1, gn2, gn3, ncol=3)
gridExtra::grid.arrange(gn4,gn5, gn6, ncol=3)
gridExtra::grid.arrange(gn7,gn8, gn9, ncol=3)
gridExtra::grid.arrange(gn10,gn11, gn12, ncol=3)
gridExtra::grid.arrange(gn13,gn14, gn15, ncol=3)
gridExtra::grid.arrange(gn16, ncol=1)
df=Train_data
val=Train_data$area.code[Train_data$area.code %in% boxplot.stats(Train_data$area.code)$out]
Train_data=Train_data[which(Train_data$area.code %in% val),]
Train_data=Train_data[which(!Train_data$area.code %in% val),]
marketing_train=df
#detect and delete the outliners from all numerical variable by iterating the loop
for (i in cnames){
print(i)
val=Train_data[,i][Train_data[,i] %in% boxplot.stats(Train_data[,i])$out]
print(length(val))
Train_data=Train_data[which(!Train_data[,i] %in% val),]
}
#checking boxplot for outlier after doing removing outlier
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$account.length, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without outlier- Account Length")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$area.code, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- arear code")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$voice.mail.plan, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- no of voice mail message")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.day.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total day calls")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.day.minutes, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total day minutes")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.day.charge, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total day charge")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.eve.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 0.1)+ggtitle("Without Outlier- total evening calls")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.eve.minutes, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "black", outlier.size = 3)+ggtitle("Without Outlier- total evening minutes")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.eve.charge, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total evening charge")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.night.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total night calls")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.night.minutes, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total night minutes")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.night.charge, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total night charge")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.intl.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total international calls")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.intl.minutes, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total international minutes")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$total.intl.charge, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- total international charge")
ggplot(Train_data, aes(x=Train_data$Churn, y=Train_data$number.customer.service.calls, fill= Train_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Without Outlier- service customer calls")
marketing_train=df
#replace the outlier with NA
for (i in cnames){
val= Train_data[,i][Train_data[,i] %in% boxplot.stats(Train_data[,i])$out]
#print(length(val))
Train_data[,i][Train_data[,i] %in% val] =NA
}
sum(is.na(Train_data))
View(Train_data)
library
Train_data=knnImputation(Train_data, k=3)
#correlation plot
corrgram(Train_data[,numeric_index], order=F,
upper.panel=panel.pie, text.panel=panel.txt, main="correlation plot")
#chi sqaure of independence and selecting only categorical variable
factor_index=sapply(Test_data, is.factor)
factor_data= Test_data[, factor_index]
factor_index
View(factor_data)
factor_index=subset(factor_index, select=-phone.number)
#chi-square test data
for (i in 1:4){
print(names(factor_data)[i])
print(chisq.test(table(factor_data$Churn, factor_data[,i])))
}
#chi square for train data
factor_index=sapply(Train_data, is.factor)
factor_data= Train_data[, factor_index]
factor_index
View(factor_data)
factor_index=subset(factor_index, select=-phone.number)
#chi-square test
for (i in 1:1){
print(names(factor_data)[i])
print(chisq.test(table(factor_data$Churn, factor_data[,i])))
}
#dimension reduction operation
Train_data_new= subset(Train_data,
select = -c(total.day.minutes, total.eve.minutes, total.night.minutes, total.intl.minutes, phone.number))
View(Train_data_new)
#feature scaling
#Check the data normality
qqnorm(Train_data_new$total.day.calls)
hist(Train_data_new$total.day.calls)
#normalizaion mehod
cnames1=c("account.length", "area.code", "number.vmail.messages", "total.day.calls",
"total.day.charge", "total.eve.calls", "total.eve.charge", "total.night.calls",
"total.night.charge", "total.intl.calls", "total.intl.charge", "number.customer.service.calls")
for (i in cnames1){
print(i)
Train_data_new[,i]= (Train_data_new[,i]-min(Train_data_new[,i]))/(max(Train_data_new[,i]-min(Train_data_new[,i])))
}
range(Train_data_new$total.intl.charge)
#test data pre processing technique before feedinf into model
#check outlier analysis
ggplot(Test_data, aes(x=Test_data$Churn, y=Test_data$account.length, fill= Test_data$Churn))+geom_boxplot(outlier.colour = "red", outlier.size = 3)+ggtitle("Outlier analysis- Account Length")
#Data Manipulation convert string categorical into factor numeric
for(i in 1:ncol(Test_data)){
if (class(Test_data[,i])== 'factor'){
Test_data[,i]=factor(Test_data[,i], labels = (1:length(levels(factor(Test_data[,i])))))
}
}
View(Train_data)
numeric_index= sapply(Test_data, is.numeric)
numeric_index
numeric_data= Test_data[, numeric_index]
cnames= colnames(numeric_data)
#replace the outlier with NA
for (i in cnames){
val= Train_data[,i][Train_data[,i] %in% boxplot.stats(Train_data[,i])$out]
#print(length(val))
Train_data[,i][Train_data[,i] %in% val] =NA
}
sum(is.na(Train_data))
View(Train_data)
library
Train_data=knnImputation(Train_data, k=3)
#correlation plot
corrgram(Test_data[,numeric_index], order=F,
upper.panel=panel.pie, text.panel=panel.txt, main="correlation plot")
#chi sqaure of independence and selecting only categorical variable
factor_index=sapply(Test_data, is.factor)
factor_data= Test_data[, factor_index]
factor_index
View(factor_data)
factor_index=subset(factor_index, select=-phone.number)
#chi-square test
for (i in 1:4){
print(names(factor_data)[i])
print(chisq.test(table(factor_data$Churn, factor_data[,i])))
}
#chi square for train data
factor_index=sapply(Train_data, is.factor)
factor_data= Train_data[, factor_index]
factor_index
View(factor_data)
factor_index=subset(factor_index, select=-phone.number)
#chi-square test
for (i in 1:4){
print(names(factor_data)[i])
print(chisq.test(table(factor_data$Churn, factor_data[,i])))
}
#dimension reduction operation
Test_data_new= subset(Test_data,
select = -c(total.day.minutes, total.eve.minutes, total.night.minutes, total.intl.minutes, phone.number))
View(Train_data_new)
#feature scaling
#Check the data normality
qqnorm(Train_data_new$total.day.calls)
hist(Train_data_new$total.day.calls)
#normalizaion mehod
cnames1=c("account.length", "area.code", "number.vmail.messages", "total.day.calls",
"total.day.charge", "total.eve.calls", "total.eve.charge", "total.night.calls",
"total.night.charge", "total.intl.calls", "total.intl.charge", "number.customer.service.calls")
for (i in cnames1){
print(i)
Test_data_new[,i]= (Test_data_new[,i]-min(Test_data_new[,i]))/(max(Test_data_new[,i]-min(Test_data_new[,i])))
}
range(Test_data_new$total.intl.charge)
#building Decision tree on train data
#decision tree classificatio
#develop modle on the training data
library(C50)
C50_model= C5.0(Churn ~., Train_data_new, trails=100, rules= TRUE)
summary(C50_model)
write(capture.output(summary(C50_model)), "C50rules2.txt")
C50_model_prediction=predict(C50_model, Test_data_new[-16], type="class")
C50_model_prediction
#error matrix
#evaluate the performance of model
confmatrix_C50=table(Test_data_new$Churn, C50_model_prediction)
confmatrix_C50
#accuracy
(1341+147)/(1341+102+77+147)
#False negative rate
77/(77+147)
#performance of model
library(ROCR)
pred=predict(C50_model, Test_data_new, type='prob')
C50_model_prediction= prediction(C50_model_prediction, Test_data_new$Churn)
eval=performance(pred, "acc")
plot(eval)
#Random forest algorithm
library(randomForest)
RF_model= randomForest(Churn ~., Train_data_new, importance=TRUE, ntree=100)
#extract rules from random forest
library(inTrees)
treelist= RF2List(RF_model)
exec=extractRules(treelist, Train_data_new[,-16])
exec[1:2,]
#make rules more readable
readablerules= presentRules(exec, colnames(Train_data_new))
readablerules[1:2,]
rulemetrix=getRuleMetric(exec, Train_data_new[,-16], Train_data_new$Churn)
rulemetrix[1:2,]
RF_prediction= predict(RF_model, Test_data_new[, -16])
RF_prediction
confmatrix_RF= table(Test_data_new$Churn, RF_prediction)
confmatrix_RF
#accuracy
(1312+163)/(1312+131+61+163)
#False negative rate
61/(61+163)
#logistic regression
logit_model= glm(Churn ~., Train_data_new, family = 'binomial')
summary(logit_model)
#predict logistic regression
logit_prediction= predict(logit_model, Test_data_new, type="response")
logit_prediction
logit_prediction=ifelse(logit_prediction > 0.5, 1, 0)
logit_prediction
#confusion matrix
confmatrix_LR= table(Test_data_new$Churn, logit_prediction)
confmatrix_LR
confmatrix_LR
#ROC Curve
logit_prediction=prediction(logit_prediction, Test_data_new$Churn)
eval= performance(logit_prediction, "acc")
plot(eval)
abline(h=0.85, v=1)
eval
#identity best value
max= which.max(slot(eval, "y.values")[[1]])
acc= slot(eval, "y.values")[[1]][max]
acc
cut=slot(eval, "x.values")[[1]][max]
cut
print(c(Accuracy=acc, cutoff=cut))
#receiver operating characteristics (ROC) CURVE
roc=performance(logit_prediction, "tpr", "fpr")
plot(roc,
colorize=T,
main="ROC CURVE",
xlab="sensitivity",
ylab="1-Specificity")
abline(a=0, b=1)
#area under AUC(auc)
auc=performance(logit_prediction, "auc")
auc=unlist(slot(auc, "y.values"))
auc
auc=round(auc, 4)
auc
legend(0.6, 0.2, auc, title="AUC", cex = 0.8)
#accuracy
(1374+66)/(1374+69+158+66)
#False negative rate
158/(158+66)
#KNN(k nearest neighbour) classification
library(class)
knn_predictions= knn(Train_data_new[,1:15], Test_data_new[,1:15], Train_data_new$Churn, k=7)
knn_predictions
confmatrix_knn=table(knn_predictions, Test_data_new$Churn)
confmatrix_knn
#accuracy
sum(diag(confmatrix_knn))/nrow(Test_data_new)
#false negative rate
51/(51+43)
#naive bayes
library(e1071)
NB_model=naiveBayes(Churn ~., Train_data_new)
NB_prediction= predict(NB_model, Test_data_new[,1:15], type='class')
confmatrix_NB= table(Test_data_new[,16], NB_prediction)
confmatrix_NB
#accuracy
sum(diag(confmatrix_NB))/nrow(Test_data_new)
#False negative rate
120/(120+104)
# scatter plot
ggplot(Train_data, aes(x= Train_data$total.day.minutes, y= Train_data$total.day.calls))+geom_point(aes(colour=Train_data$Churn), size=2)+
theme_bw()+ggtitle("main")+ theme(text=element_text(size = 10))+scale_shape_discrete(name="international")+
scale_color_discrete(name="Churn")
ggplot(Train_data, aes(x= Train_data$account.length, y= Train_data$number.vmail.messages))+geom_point(aes(colour=Train_data$Churn), size=2)+
theme_bw()+ggtitle("main")+ theme(text=element_text(size = 10))+scale_shape_discrete(name="international.plan")+
scale_color_discrete(name="Churn")
ggplot(Train_data, aes(x= Train_data$total.intl.minutes, y= Train_data$total.intl.calls))+geom_point(aes(colour=Train_data$Churn), size=2)+
theme_bw()+ggtitle("main")+ theme(text=element_text(size = 10))+scale_shape_discrete(name="international.plan")+
scale_color_discrete(name="Churn")
ggplot(Train_data, aes(x= Train_data$area.code, y= Train_data$number.vmail.messages))+geom_point(aes(colour=Train_data$Churn), size=2)+
theme_bw()+ggtitle("main")+ theme(text=element_text(size = 10))+scale_shape_discrete(name="international.plan")+
scale_color_discrete(name="Churn")
ggplot(Train_data, aes(x= Train_data$total.eve.minutes, y= Train_data$total.eve.calls))+geom_point(aes(colour=Train_data$Churn), size=2)+
theme_bw()+ggtitle("main")+ theme(text=element_text(size = 10))+scale_shape_discrete(name="international.plan")+
scale_color_discrete(name="Churn")
ggplot(Train_data, aes(x= Train_data$total.night.calls, y= Train_data$total.night.minutes))+geom_point(aes(colour=Train_data$Churn), size=2)+
theme_bw()+ggtitle("main")+ theme(text=element_text(size = 10))+scale_shape_discrete(name="international.plan")+
scale_color_discrete(name="Churn")
|
db6dad9d9834fc0f17e6aa5fd82c7d395c56f328 | d1c351ea97dc9442bea3b6230e31fa060e23df8b | /man/queries.redshift.Rd | aeb27b1cc30c6907c4359acfcab4db4699ecd128 | [] | no_license | zhaojkun/db.r | 59967213cbd4d9255cc2c4d6f3248e3b65ab8f8a | 1899501d609ca76faba47a04ab1a4d629d746064 | refs/heads/master | 2020-12-07T05:14:35.627805 | 2014-12-17T14:30:20 | 2014-12-17T14:30:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,682 | rd | queries.redshift.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{queries.redshift}
\alias{queries.redshift}
\title{Queries for Redshift}
\format{\preformatted{List of 3
$ column:List of 4
..$ head : chr "select \%s from \%s limit \%d;"
..$ all : chr "select \%s from \%s;"
..$ unique: chr "select distinct \%s from \%s;"
..$ sample: chr "select \%s from \%s order by random() limit \%d;"
$ table :List of 5
..$ select: chr "select \%s from \%s;"
..$ head : chr "select * from \%s limit \%d;"
..$ all : chr "select * from \%s;"
..$ unique: chr "select distinct \%s from \%s;"
..$ sample: chr "select * from \%s order by random() limit \%d;"
$ system:List of 5
..$ schema_no_system : chr " select table_name , column_name , udt_name "| __truncated__
..$ schema_with_system : chr " select table_name , column_name , udt_name "| __truncated__
..$ foreign_keys_for_table : chr " SELECT kcu.column_name , ccu.table_name AS foreign_table_name , ccu.co"| __truncated__
..$ foreign_keys_for_column: chr " SELECT kcu.column_name , ccu.table_name AS foreign_table_name , ccu.co"| __truncated__
..$ ref_keys_for_table : chr " SELECT ccu.column_name , kcu.table_name AS foreign_table_name , kcu.co"| __truncated__
}}
\usage{
queries.redshift
}
\description{
Queries for Redshift
}
\keyword{datasets}
|
da7fb0c9f0206e4fd234ac209269533e19d7729b | 8f0431de29762061acb57e06f492d22d5ce2604f | /tests/testthat/test-gt_win_loss.R | bb2097b250e7bf709e460bf3b877c5a5a6354ac0 | [
"MIT"
] | permissive | adamkemberling/gtExtras | 2c3e1a81d5dd97666dedab710d49377a2a7572dd | 40d1e5a006fa67833a702733055c94606f8cffb7 | refs/heads/master | 2023-08-17T11:12:00.431133 | 2021-10-13T16:28:10 | 2021-10-13T16:28:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,708 | r | test-gt_win_loss.R | # Function to skip tests if Suggested packages not available on system
check_suggests <- function() {
skip_if_not_installed("rvest")
skip_if_not_installed("xml2")
}
test_that("SVG exists and has expected values", {
check_suggests()
set.seed(37)
data_in <- dplyr::tibble(
grp = rep(c("A", "B", "C"), each = 10),
wins = sample(c(0,1,.5), size = 30, prob = c(0.45, 0.45, 0.1), replace = TRUE)
) %>%
dplyr::group_by(grp) %>%
dplyr::summarize(wins=list(wins), .groups = "drop")
pill_table <- data_in %>%
gt::gt() %>%
gt_plt_winloss(wins) %>%
gt::as_raw_html() %>%
rvest::read_html()
box_table <- data_in %>%
gt::gt() %>%
gt_plt_winloss(wins, type = "square") %>%
gt::as_raw_html() %>%
rvest::read_html()
# SVG Exists and is of length 3 ----
pill_len <- pill_table %>%
rvest::html_nodes("svg") %>%
length()
square_len <- box_table %>%
rvest::html_nodes("svg") %>%
length()
expect_equal(pill_len, 3)
expect_equal(square_len, 3)
# SVG has specific points ----
pill_vals <- pill_table %>%
rvest::html_nodes("tr:nth-child(2) > td") %>%
rvest::html_nodes("svg > g > line") %>%
rvest::html_attrs() %>%
lapply(function(xy) xy[['y1']]) %>%
unlist()
square_vals <- box_table %>%
rvest::html_nodes("tr:nth-child(2) > td") %>%
rvest::html_nodes("svg > g > polygon") %>%
rvest::html_attr("points") %>%
substr(1, 4)
expect_equal(pill_vals, c("1.89","8.91","1.89","1.89","6.10",
"8.91","8.91","1.89","8.91","8.91"))
expect_equal(square_vals, c("3.26","6.72","10.1","13.6","17.1",
"20.5","24.0","27.5","30.9","34.4"))
})
|
112f429c7eab69168a243e28cd079897e4908654 | 8f94ccd8d3aed33b418cb9639dc64a159931ae4e | /R/transform-helper-functions.R | d57bc37ffa97443d77d9c2a166019ca473c6b686 | [] | no_license | cran/scan | 8c9d9b2dc44bbb8c339be3795a62bb5c49be87b0 | 860599c21c4c5e37746fa8e6234c6f6cc8028070 | refs/heads/master | 2023-08-22T17:47:22.450439 | 2023-08-08T13:00:02 | 2023-08-08T14:31:36 | 70,917,562 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,054 | r | transform-helper-functions.R | #' @rdname transform.scdf
#' @param lag Number of values surrounding a value to calculate the average
#' @export
moving_median <- function(x, lag = 1) .moving_average(x, lag, median)
#' @rdname transform.scdf
#' @param lag Number of values surrounding a value to calculate the average
#' @export
moving_mean <- function(x, lag = 1) .moving_average(x, lag, mean)
#' @rdname transform.scdf
#' @param f the proportion of surrounding data influencing each data point.
#' @param mt A vector with measurement times.
#' @export
local_regression <- function(x, mt = 1:length(x), f = 0.2) {
lowess(x ~ mt, f = f)$y
}
#' @export
#' @rdname transform.scdf
#' @param x A logical vector.
#' @param positions A numeric vector with relative positions to the first
#' appearance of a TRUE value in x.
first_of <- function(x, positions = 0) {
match(TRUE, x) + positions
}
#' @export
#' @rdname transform.scdf
across_cases <- function(...) {
# a helper function
}
#' @export
#' @rdname transform.scdf
all_cases <- function(...) {
# a helper function
}
|
edf0cc96c8d5865c9de31b343c830b1f12a3d71a | a1ddcadac53260794a3198e6e614b3427bcae19a | /demo/MA(2).R | 9cfa8d0ca61c9392edc749aea3816b1c03ff3dfd | [] | no_license | xiaoyulu2014/ABCpack | 2b2b3ce54d3713a8e4b2ae9bc4f90d9cfbb28444 | a536410320cee3e94b6de23649c9e404af5c3d14 | refs/heads/master | 2020-05-18T18:03:11.675068 | 2015-02-06T12:47:38 | 2015-02-06T12:47:38 | 30,258,157 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,727 | r | MA(2).R |
######################################################
##data generation MA(2)
n = 100
q = 2
theta = c(0.6,0.2)
u = rnorm(n+q, 0, 1) #shifts the indices by q
y = c()
for (k in 1:n) {
y[k] = u[k+q] + t(theta) %*% u[(k-1+q):k]
}
##define prior and likelihood sampler
prior = function() {
a = runif(2)
x = c(-2*sqrt(a[1]) + 4*a[2]*sqrt(a[1]),2*sqrt(a[1])-1)
return(x)
}
f = function(x) {
z = c()
u = rnorm(n+q, 0, 1)
for (k in 1:n) {
z[k] = u[k+q] + t(x) %*% u[(k-1+q):k]
}
return(z)
}
###evaluate in parallel
require(doParallel)
require(plyr)
require(doSNOW)
#registerDoSNOW(makeCluster(2, type="SOCK"))
registerDoParallel()
getDoParWorkers()
getDoParName()
getDoParVersion()
time<-system.time({
theta_raw <- foreach(icount(100),.combine=rbind) %dopar%
ABC(1000,0.01,function(y,z) sqrt(sum((y-z)^2)) ,function(x) x,prior,f)
theta_auto <- foreach(icount(100),.combine=rbind) %dopar%
ABC(1000,0.01,function(y,z) sqrt(sum((y-z)^2)) ,function(x) c(x[q:n]%*%x[1:(n-q+1)],x[(q+1):n]%*%x[1:(n-q)]),prior,f)
})
par(mfrow=c(1,2))
hist(theta_raw[,1])
hist(theta_raw[,2])
par(mfrow=c(1,2))
hist(theta_auto[,1])
hist(theta_auto[,2])
par(mfrow=c(1,2))
plot.new()
plot.window(xlim=c(-2,2),ylim=c(-1,1))
polygon(c(-2,0,2),c(1,-1,1),col="yellow")
axis(1)
axis(2)
points(theta_raw[,1],theta_auto[,2],col=2)
points(0.6,0.2,lwd=2)
title("sampled particles based on raw distance")
plot.new()
plot.window(xlim=c(-2,2),ylim=c(-1,1))
polygon(c(-2,0,2),c(1,-1,1),col="yellow")
axis(1)
axis(2)
points(theta_auto[,1],theta_auto[,2],col=2)
points(0.6,0.2,lwd=2)
title("sampled particles based on auto distance")
time
|
709d6fe5a6edbb5ee1b434aed121749a68bf0e8e | 4c311a95facb2b74a65329b9fffac402888d914f | /data_science_capstone/model_builder.R | 23c296539171029a45b734c4d6eff1df56fd49ad | [] | no_license | ttirm/Coursera_DScience | 5a4115e0e8138501590f87bca931c978717e397a | 05d72ee36f9a15c2084da6d67202a8cd551fbe55 | refs/heads/master | 2021-01-16T23:09:59.660515 | 2016-10-29T16:26:54 | 2016-10-29T16:26:54 | 72,292,251 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,437 | r | model_builder.R |
# Load the necessary libraries
library(plyr)
library(tm)
library(RWeka)
library(NLP)
library(openNLP)
library(magrittr)
library(openNLPmodels.en);
library(SnowballC)
library(slam)
start <- "sstrtss"
# Bad words list
#################################
url <- 'https://gist.githubusercontent.com/ryanlewis/a37739d710ccdb4b406d/raw/0fbd315eb2900bb736609ea894b9bde8217b991a/google_twunter_lol'
badwords <-read.csv(url, header = FALSE)
# Clean corpus
# This function removes pontuation, numbers, badwords,
# it also introduces a start mark on each sentence
#############################################################
clean_sent <- function(j, name){
print("00")
j <- gsub("[Ii]t's ", "it is ",j)
j <- gsub("'s ", " ",j)
j <- gsub("'ve ", " have ",j)
j <- gsub("'u ", " you ",j)
j <- gsub("'r ", " are ",j)
j <- gsub("n't", " not",j)
print("01")
j <- gsub("'d ", " would ",j)
j <- gsub("'ll ", " will ",j)
j <- gsub("[Nn]'t", " not",j)
j <- gsub("'m ", " am ",j)
j <- gsub(" 'n ", " ",j)
j <- gsub("^i | i ", " I ",j)
j <- gsub(" r ", " are ",j)
print("1")
corp <- Corpus(VectorSource(j))
print("2")
corp <- tm_map(corp, PlainTextDocument)
print("3")
corp <- tm_map(corp, removePunctuation)
print("4")
corp <- tm_map(corp, removeNumbers)
print("5")
corp <- tm_map(corp, content_transformer(tolower))
print("6")
corp <- tm_map(corp, removeWords, badwords$V1)
print("7")
#corp <- tm_map(corp, removeWords, stopwords("english"))
print("8")
corp <- tm_map(corp, content_transformer( function(x) gsub(paste0("^", start), "<s>", x)))
print("9")
corp <- tm_map(corp, stripWhitespace)
print("10")
#corp_copy <- corp
#corp <- tm_map(corp, stemDocument, language = "english")
corp
}
load("./files/train1.Rda")
w <- train[1:1000000]
# Remove sentences with less than three words
w <- w[sapply(gregexpr("\\W+", w), length) + 1 > 2]
res <- clean_sent(w)
save(res, file = "./files/res_new_1.Rda")
load("./files/res_new_1.Rda")
# Create 3-Grams
TrigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
dtm <- DocumentTermMatrix(res, control = list(tokenize = TrigramTokenizer,
wordLengths=c(6, Inf),
bounds=list(global=c(2, Inf))))
print("11")
#dtm1 <- removeSparseTerms(dtm, 0.999)
print("12")
freq1 <- colapply_simple_triplet_matrix(dtm,sum)
print("13")
freq <- sort(freq1, decreasing = TRUE)
print("14")
words <- as.character(names(freq))
tri_freq <- data.frame(name = words, count = freq)
tri_tot <- tri_freq
# Save de the frequency table
write.table(tri_tot,file=paste0("./freq/tri_freq_","train",".txt"))
rm(tri_tot)
rm(tri_freq)
gc()
# Create 2-Grams
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
dtm <- DocumentTermMatrix(res, control = list(tokenize = BigramTokenizer,
wordLengths=c(3, Inf),
bounds=list(global=c(2, Inf))))
print("11")
#dtm1 <- removeSparseTerms(dtm, 0.999)
print("12")
freq1 <- colapply_simple_triplet_matrix(dtm,sum)
print("13")
freq <- sort(freq1, decreasing = TRUE)
print("14")
words <- as.character(names(freq))
bi_freq <- data.frame(name = words, count = freq)
bi_tot <- bi_freq
# Save the frequency table
write.table(bi_tot,file=paste0("./freq/bi_freq_","train",".txt"))
rm(bi_tot)
rm(bi_freq)
gc()
# Create 1-Grams
UnigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
dtm <- DocumentTermMatrix(res, control = list(tokenize = UnigramTokenizer, wordLengths=c(1,Inf),
bounds=list(global=c(2, Inf))))
print("11")
#dtm1 <- removeSparseTerms(dtm, 0.999)
print("12")
freq1 <- colapply_simple_triplet_matrix(dtm,sum)
print("13")
freq <- sort(freq1, decreasing = TRUE)
print("14")
words <- as.character(names(freq))
uni_freq <- data.frame(name = words, count = freq)
uni_tot <- uni_freq
# Save the frequency table
write.table(uni_tot,file=paste0("./freq/uni_freq_","train",".txt"))
rm(uni_tot)
rm(uni_freq)
gc()
# Create 4-Grams
QuadgramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4))
dtm <- DocumentTermMatrix(res, control = list(tokenize = QuadgramTokenizer,
wordLengths=c(8, Inf),
bounds=list(global=c(2, Inf))))
print("11")
#dtm1 <- removeSparseTerms(dtm, 0.999)
print("12")
freq1 <- colapply_simple_triplet_matrix(dtm,sum)
print("13")
freq <- sort(freq1, decreasing = TRUE)
print("14")
words <- as.character(names(freq))
quad_freq <- data.frame(name = words, count = freq)
quad_tot <- quad_freq
# Save de the frequency table
write.table(quad_tot,file=paste0("./freq/quad_freq_","train",".txt"))
rm(quad_tot)
rm(quad_freq)
gc()
#load("./files/tri_all.Rda")
quad_all <- read.table(file="./freq/quad_freq_train.txt")
nrow(quad_all)
colnames(quad_all) <- c("word", "freq")
#quad_all$freq <- sapply(quad_all$freq, function(x){y <- x - 1})
#quad_all <- quad_all[quad_all$freq > 0,]
quad_all$first <- sapply(strsplit(as.character(quad_all$word), " "), "[", 1)
quad_all$sec <- sapply(strsplit(as.character(quad_all$word), " "), "[", 2)
quad_all$tri <- sapply(strsplit(as.character(quad_all$word), " "), "[", 3)
quad_all$quad <- sapply(strsplit(as.character(quad_all$word), " "), "[", 4)
quad_all$discount <- 1
quad_ind <- vector(length = 5)
k <- 5
for(i in 1:5){
quad_ind[i] <- ((i+1)*length(quad_all$freq[quad_all$freq == (i+1)])/(length(quad_all$freq[quad_all$freq == i]))
-(i*(k+1)*length(quad_all$freq[quad_all$freq == (k+1)])/(length(quad_all$freq[quad_all$freq == 1]))))/
(1-((k+1)*length(quad_all$freq[quad_all$freq == (k+1)])/(length(quad_all$freq[quad_all$freq == 1]))))/i
}
for(i in 1:5){
quad_all[quad_all$freq == i , "discount"] <- quad_ind[i]
}
quad_all <- quad_all[order(-quad_all$freq),]
tri_all <- read.table(file="./freq/tri_freq_train.txt")
colnames(tri_all) <- c("word", "freq")
#tri_all$freq <- sapply(tri_all$freq, function(x){y <- x - 1})
#tri_all <- tri_all[tri_all$freq > 0,]
tri_all$first <- sapply(strsplit(as.character(tri_all$word), " "), "[", 1)
tri_all$sec <- sapply(strsplit(as.character(tri_all$word), " "), "[", 2)
tri_all$tri <- sapply(strsplit(as.character(tri_all$word), " "), "[", 3)
tri_all$discount <- 1
bi_all <- read.table(file="./freq/bi_freq_train.txt")
colnames(bi_all) <- c("word", "freq")
#bi_all$freq <- sapply(bi_all$freq, function(x){y <- x - 1})
#bi_all <- bi_all[bi_all$freq > 0,]
nrow(bi_all)
length(bi_all[bi_all$freq == 1, "freq"])
bi_all$first <- sapply(strsplit(as.character(bi_all$word), " "), "[", 1)
bi_all$sec <- sapply(strsplit(as.character(bi_all$word), " "), "[", 2)
bi_all$discount <- 1
uni_all <- read.table(file="./freq/uni_freq_train.txt")
colnames(uni_all) <- c("word", "freq")
bi_ind <- vector(length = 5)
tri_ind <- vector(length = 5)
k <- 5
for(i in 1:5){
# bi_ind[i] <- (i+1)*length(bi_freq[bi_freq == (i+1)])/(length(bi_freq[bi_freq == i]))
bi_ind[i] <- ((i+1)*length(bi_all$freq[bi_all$freq == (i+1)])/(length(bi_all$freq[bi_all$freq == i]))
-(i*(k+1)*length(bi_all$freq[bi_all$freq == (k+1)])/(length(bi_all$freq[bi_all$freq == 1]))))/
(1-((k+1)*length(bi_all$freq[bi_all$freq == (k+1)])/(length(bi_all$freq[bi_all$freq == 1]))))/i
#tri_ind[i] <- (i+1)*length(tri_freq[tri_freq == (i+1)])/(length(tri_freq[tri_freq == i]))
tri_ind[i] <- ((i+1)*length(tri_all$freq[tri_all$freq == (i+1)])/(length(tri_all$freq[tri_all$freq == i]))
-(i*(k+1)*length(tri_all$freq[tri_all$freq == (k+1)])/(length(tri_all$freq[tri_all$freq == 1]))))/
(1-((k+1)*length(tri_all$freq[tri_all$freq == (k+1)])/(length(tri_all$freq[tri_all$freq == 1]))))/i
}
for(i in 1:5){
bi_all[bi_all$freq == i , "discount"] <- bi_ind[i]
tri_all[tri_all$freq == i , "discount"] <- tri_ind[i]
}
tri_all <- tri_all[order(-tri_all$freq),]
bi_all <- bi_all[order(-bi_all$freq),]
uni_all <- uni_all[order(-uni_all$freq),]
save(tri_all, file = "./files/tri_all_newA1.Rda")
save(bi_all, file = "./files/bi_all_newA1.Rda")
save(uni_all, file = "./files/uni_all_newA1.Rda")
save(quad_all, file = "./files/quad_all_newA1.Rda")
|
45c2860c91e7e571c1e8e2ffbd2b8d3e69ec17d2 | 62e1665efcbd67bc0de0d9be749d5d2b222c80ce | /R/dipfix.R | 1bd397ca05d54ccf03c11678abe5e8f7cc899a68 | [] | no_license | sewouter/StratigrapheR | 25e669143eeb73051e79e0b4cb490e6060ed0d4b | 2d19b6cc5dbbb4bade454ad83b61842d2f8871e1 | refs/heads/main | 2021-09-28T00:26:51.110494 | 2021-09-24T12:23:45 | 2021-09-24T12:23:45 | 341,558,856 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,121 | r | dipfix.R | #' Fix Dip
#'
#' @description Fix dip and strike of planes so that they fall in the correct
#' quadrant. The provided quadrant is the determining factor. If unavailable or
#' not helpful, the sign of the dip is used as determining factor.
#'
#' @param strike strike of the data; it is the angle from the north of
#' the horizontal line of the plane. Corrected, its range goes from 0° to 360°.
#' @param dip dip of the data; it is the angle from the horizontal taken
#' on the line of the plane perpendicular to the one of the strike. In other
#' words it is the plane's maximum angular deviation from the horizontal.
#' It is positive downward, and ranges from +90° for straight down to -90° for
#' straight up. Dip values in [-180,-90] or/and ]90,180] indicate inversion of
#' the plane.
#' @param quadrant the quadrant where the plane dips downward. Accepted
#' values are NA, 'N', 'S', 'W' or 'E' (lower- or uppercase alike). Is
#' independant of inversion
#' @param inverted whether the plane is upside down.
#' @details the strike will be corrected as the orientation of the dip (i.e.
#' downward) minus 90°; it ranges from 0 to 360°. It is determined firstly from
#' the quadrant. If the quadrant is missing or not helpful (e.g. 'N' or 'S' for
#' a strike of 0° or 180°, 'E' or 'W' for a strike of 90° or 270°), it is
#' determined using the sign of the dip. Inversion will be indicated if the dip
#' values are in [-180,-90] or/and ]90,180], or simply if inverted = T. The
#' inversion does not influence the calculation of the strike, dip and quadrant:
#' whether the plane is upside down does not change these parameters output.
#' @return a list of the corrected strike, dip and quadrant
#' @seealso \code{\link{fmod}}, \code{\link{incfix}} and
#' \code{\link{transphere}}
#' @examples
#' strike <- c(-60, 180,20,0,20)
#' dip <- c(-60,20,-45,110,-90)
#' quadrant <- c("N",NA,NA,NA,"E")
#' inverted <- c(FALSE,TRUE,FALSE,TRUE,FALSE)
#'
#' dipfix(strike,dip,quadrant,inverted)
#'
#' dipfix(strike,dip,quadrant)
#'
#' @export
dipfix <- function(strike, dip, quadrant = NA, inverted = NA)
{
l <- length(strike)
if(l != length(dip)) stop("strike and dip should be of length n")
if(is.na(quadrant[[1]]) & length(quadrant) == 1){
q <- rep(NA,l)
} else {
q <- quadrant
}
if(l != length(q)) stop("quadrant should be of length n or should just be NA")
li <- length(inverted)
if(!((li == 1 & is.na(inverted[[1]])) |
(li == l & class(inverted) == "logical"))){
stop(paste("The 'inverted' parameter should be NA or a logical of ",
"same length than the other parameters", sep = ""))
}
if(isTRUE(any(q == "n"))) q[q == "n"] <- "N"
if(isTRUE(any(q == "s"))) q[q == "s"] <- "S"
if(isTRUE(any(q == "w"))) q[q == "w"] <- "W"
if(isTRUE(any(q == "e"))) q[q == "e"] <- "E"
if(any(!(q == "N" | q == "S" | q == "W" | q == "E" | is.na(q)))){
stop(paste("Invalid quadrant values (should be NA, ",
"'N', 'S', 'W' or 'E' (lower- or uppercase alike)",
sep = ""))
}
s <- fmod(strike, 180)
s2 <- fmod(strike, 360)
d <- fmod(dip,90,-90,bounds = "]]")
lin <- rep(NA, length(s))
lin[s == 0 | s == 180] <- "N/S"
lin[s == 90] <- "W/E"
lin[s > 0 & s < 90] <- "NE/SW"
lin[s > 90 & s < 180] <- "SE/NW"
non.help <- (lin == "N/S" & !is.na(q) & (q == "N" | q == "S")) |
(lin == "W/E" & !is.na(q) & (q == "W" | q == "E"))
if(any(non.help)){
warning(paste("Non helping quadrant values (N or S for a strike of 0 or ",
"180, E or W for a strike of 90 or 270)", sep = ""))
q[non.help] <- NA
}
dat <- data.frame(s,d,q,lin)
cor <- rep(NA, l)
cor[lin == "NE/SW" & (q == "S" | q == "E")] <- 0
cor[lin == "NE/SW" & (q == "N" | q == "W")] <- 180
cor[lin == "SE/NW" & (q == "N" | q == "E")] <- 180
cor[lin == "SE/NW" & (q == "S" | q == "W")] <- 0
cor[lin == "N/S" & q == "E"] <- 0
cor[lin == "N/S" & q == "W"] <- 180
cor[lin == "W/E" & q == "N"] <- 0
cor[lin == "W/E" & q == "S"] <- 180
c1 <- s + cor
qexist <- !is.na(q)
qabsent <- is.na(q) & (d < 0 | dip == -90)
s2[qexist] <- c1[qexist]
s2[qabsent] <- fmod(s2[qabsent] + 180, 360)
res <- list()
res$strike <- s2
res$dip <- abs(d)
nq <- rep(NA, length(s))
nq[res$strike >= 45 & res$strike <= 135] <- "S"
nq[res$strike > 135 & res$strike < 225] <- "W"
nq[res$strike >= 225 & res$strike <= 315] <- "N"
nq[res$strike > 315 | res$strike < 45] <- "E"
res$quadrant <- nq
tdip <- fmod(dip,180,-180)
out <- tdip > 90 | tdip <= -90
if (li == l & class(inverted) == "logical") {
inv <- inverted
if(any(!inv[out])){
warning("Samples having dip values in [-180,-90] or/and ]90,180]",
" or equivalent are filled out as not inverted (inverted = F)",
", which is contradictory. By default they will be considered ",
"as inverted (inverted = T).", sep = "")
}
inv[out] <- T
} else {
inv <- rep(F, length(s))
inv[out] <- T
}
res$inverted <- inv
return(res)
}
|
6898655052159234b48d602aaee26a74552b2ed5 | fffd71a82402b0d147ef1aeadce101f047ebe7fc | /shiny/thermoclineAdjustment/functions.R | b935c3393aba8b1512416798a35c1261c9b2093d | [] | no_license | ferag/DataManagement | b8cb57f18eaabe241a68b1a7435a7009755e3a45 | 49a1fbd36d7b9c51ef951a92d99d4ee880d65d99 | refs/heads/master | 2020-12-02T08:15:01.784829 | 2017-07-11T09:15:09 | 2017-07-11T09:15:09 | 96,793,178 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,098 | r | functions.R | library("RMySQL")
getProfile<- function()
{
db='processed'
query='SELECT startDate, endDate from profile WHERE YEAR(startDate)=2014'
con <- dbConnect(MySQL(),user="webuser",password="webuserpass",dbname=db,host="doriiie02.ifca.es")
profileList <- dbGetQuery(con, query)
dbDisconnect(con)
return(profileList[,1])
}
getTe <- function(data){
media = data$x[1];
numElem = 1;
for(i in 1:length(data$x))
{
if (data$y[i] > -1)
{
media = media + data$x[i]
numElem = numElem + 1
}
}
return (media/numElem)
}
#Funcion de ajusto, que es el minimo de la suma de la formula que crea la curva
min.RSS <- function(data, par) {
Te <- getTe(data)
Th <- min(data$x)
r <- with(data, sum((x-(Th+((Te-Th)/((1+((par[1]*(-y))^par[2]))^(1-1/par[2])))))^2))
return (r)
}
#Funcion para hallar la temperatura en la curva ajustada, a partir de alfa y n
mod.RSS <- function(data, par) {
Te <- getTe(data)
Th <- min(data$x)
with(data, (Th+((Te-Th)/((1+((par[1]*(-y))^par[2]))^(1-1/par[2])))))
}
|
6781b84530f3aa78bf33487c87b0cbb412b53fa5 | 28e4fe8e74911e8adb9c852a3640a4cc3d988ab1 | /R_scripts/test_fanos.R | c7446c617273344e77de720dbc8b852668e92dc8 | [] | no_license | MikeDMorgan/flow_pipeline | 09a7481d9e85cb5248891f59f2c507750d1b1d2a | d551cb53748d87013fe34b9a55dacd813e00589d | refs/heads/master | 2021-01-15T10:07:20.759454 | 2016-10-31T14:49:51 | 2016-10-31T14:49:51 | 41,031,501 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,213 | r | test_fanos.R | library(ggplot2)
library(reshape2)
library(ICC)
source('/ifs/devel/projects/proj052/flow_pipeline/R_scripts/multiplot.R')
# one file - test
df1 <- read.table("/ifs/projects/proj052/pipeline_proj052/twin_files.dir/CD4_Tcell_fano-Aq.CD45RA.CD3-P2",
h=T, stringsAsFactors=F, sep="\t", row.names=1)
df2 <- read.table("/ifs/projects/proj052/pipeline_proj052/twin_files.dir/CD4_Tcell_mean-Aq.CD45RA.CD3-P2",
h=T, stringsAsFactors=F, sep="\t", row.names=1)
# remove Aq data
df1 <- subset(df1, select=-which(colnames(df1) %in% c("Aq")))
df2 <- subset(df2, select=-which(colnames(df2) %in% c("Aq")))
melted1 <- melt(df1, id.vars=c("twin.id", "batch", "panel", "gate", "twin_num", "flowjo_id",
"family_id", "zygosity", "age", "replicate", "visit"))
melted2 <- melt(df2, id.vars=c("twin.id", "batch", "panel", "gate", "twin_num", "flowjo_id",
"family_id", "zygosity", "age", "replicate", "visit"))
# standardise as Z-scores for plotting?
z_scores <- list()
for(x in 1:length(unique(melted1$variable))){
mark <- unique(melted1$variable)[x]
mean.mark <- mean(melted1$value[melted1$variable == mark])
sd.mark <- sd(melted1$value[melted1$variable == mark])
z.mark <- (mean.mark - melted1$value[melted1$variable == mark])/sd.mark
z_scores[[mark]] <- z.mark
}
melted1$z.score <- unlist(z_scores)
mz1 <- subset(melted1, subset=melted1$zygosity == "MZ")
dz1 <- subset(melted1, subset=melted1$zygosity == "DZ")
mz1$family_id <- as.factor(mz1$family_id)
mz1$twin <- rep(c("t1", "t2"), dim(mz1)[1]/2)
mz1_split <- data.frame(split(mz1, f=mz1$twin))
dz1$family_id <- as.factor(dz1$family_id)
dz1$twin <- rep(c("t1", "t2"), dim(dz1)[1]/2)
dz1_split <- data.frame(split(dz1, f=dz1$twin))
mz2 <- subset(melted2, subset=melted2$zygosity == "MZ")
dz2 <- subset(melted2, subset=melted2$zygosity == "DZ")
mz2$family_id <- as.factor(mz2$family_id)
mz2$twin <- rep(c("t1", "t2"), dim(mz2)[1]/2)
mz2_split <- data.frame(split(mz2, f=mz2$twin))
dz2$family_id <- as.factor(dz2$family_id)
dz2$twin <- rep(c("t1", "t2"), dim(dz2)[1]/2)
dz2_split <- data.frame(split(dz2, f=dz2$twin))
p_mz <- ggplot(data.frame(mz1_split), aes(x=t1.z.score, y=t2.z.score, colour=t1.variable)) +
geom_point() + labs(title="Monozygotic twin correlation") + facet_wrap(~t1.variable)
p_dz <- ggplot(data.frame(dz1_split), aes(x=t1.z.score, y=t2.z.score, colour=t2.variable)) +
geom_point() + labs(title="Dizygotic twin correlation") + facet_wrap(~t1.variable)
multiplot(p_mz, p_dz)
# iteratively calculate heritabilities
markers <- unique(as.character(melted1$variable))
h2_list1 = list()
for(i in 1:length(markers)){
m_mz <- mz1[mz1$variable == markers[i],]
m_dz <- dz1[dz1$variable == markers[i],]
mz_icc <- ICCest(family_id, value, m_mz)
dz_icc <- ICCest(family_id, value, m_dz)
h2 <- 2 * (mz_icc$ICC - dz_icc$ICC)
h2_list1[[markers[i]]] <- h2
h2.df1 <- data.frame(t(data.frame(h2_list1)))
h2.df1$marker <- rownames(h2.df1)
colnames(h2.df1) <- c("H2", "marker")
}
h2_list2 = list()
for(i in 1:length(markers)){
m_mz <- mz2[mz2$variable == markers[i],]
m_dz <- dz2[dz2$variable == markers[i],]
mz_icc <- ICCest(family_id, value, m_mz)
dz_icc <- ICCest(family_id, value, m_dz)
h2 <- 2 * (mz_icc$ICC - dz_icc$ICC)
h2_list2[[markers[i]]] <- h2
h2.df2 <- data.frame(t(data.frame(h2_list2)))
h2.df2$marker <- rownames(h2.df2)
colnames(h2.df2) <- c("H2", "marker")
}
p1_h2 <- ggplot(h2.df1, aes(x=marker, y=H2)) + geom_bar(stat="identity") + theme_bw() +
labs(title="H2 of gene expression noise")
p2_h2 <- ggplot(h2.df2, aes(x=marker, y=H2)) + geom_bar(stat="identity") + theme_bw() +
labs(title="H2 of mean gene expression")
multiplot(p1_h2, p2_h2)
h2.merge <- merge(h2.df1, h2.df2, "marker")
colnames(h2.merge) <- c("marker", "H2.fano", "H2.mean")
p_h2merge <- ggplot(h2.merge, aes(x=H2.fano, y=H2.mean, colour=marker)) + geom_point(size=4) +
theme_bw() + xlim(-1.0, 1.0) + ylim(-1.0, 1.0) +
labs(title="Heritability of gene expression noise vs. mean gene expression CD4 Tcell Aq.CD45RA.CD3 ")
print(p_h2merge)
ggsave(file="h2-fano_vs_mean-CD8_Tcell_AqCD45RACD3.png", p_h2merge) |
50942d0320b0d9cea91fd1ffc2db30280e852bad | 84952d1e47391c3f40c652d31cd3d0299b6a168a | /man/Sequence_Duplication_Levels.Rd | c16271d84b5c0c3a9aee8840d601242101a05301 | [] | no_license | AngelikaKritz/ngsReports | 476e3254391d3f94e6335f95b6ba1c5f1ae3113c | 14c8052ff2518dc3f83ed62208837476595d07ab | refs/heads/master | 2020-04-28T22:59:16.813890 | 2019-02-05T16:56:24 | 2019-02-05T16:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,597 | rd | Sequence_Duplication_Levels.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sequence_Duplication_Levels.R
\docType{methods}
\name{Sequence_Duplication_Levels,FastqcData-method}
\alias{Sequence_Duplication_Levels,FastqcData-method}
\alias{Sequence_Duplication_Levels}
\alias{Sequence_Duplication_Levels,FastqcDataList-method}
\alias{Sequence_Duplication_Levels,FastqcFile-method}
\alias{Sequence_Duplication_Levels,FastqcFileList-method}
\alias{Sequence_Duplication_Levels,character-method}
\title{Get the Sequence Duplication Levels information}
\usage{
\S4method{Sequence_Duplication_Levels}{FastqcData}(object)
\S4method{Sequence_Duplication_Levels}{FastqcDataList}(object)
\S4method{Sequence_Duplication_Levels}{FastqcFile}(object)
\S4method{Sequence_Duplication_Levels}{FastqcFileList}(object)
\S4method{Sequence_Duplication_Levels}{character}(object)
}
\arguments{
\item{object}{Can be a \code{FastqcFile}, \code{FastqcFileList},
\code{FastqcData}, \code{fastqcDataList}, or simply a \code{character}
vector of paths to fastqc files}
}
\value{
A single \code{tibble} containing all information combined from all
supplied FastQC reports
}
\description{
Retrieve the Sequence Duplication Levels module from one or
more FastQC reports
}
\examples{
# Get the files included with the package
packageDir <- system.file("extdata", package = "ngsReports")
fileList <- list.files(packageDir, pattern = "fastqc.zip", full.names = TRUE)
# Load the FASTQC data as a FastqcDataList object
fdl <- getFastqcData(fileList)
# Print the Sequence Duplication Levels
Sequence_Duplication_Levels(fdl)
}
|
73c967d1de61af1b6f033c39c68b3b45f7051f9d | 4a58c1124aab594575ca5c87a58e451056f96008 | /gnmDayCourse.R | 275bc9fb22c58c1769766faf612f307ed392726f | [] | no_license | hkejigu/gnm-day-course | c2bd4efa8efc28f7b0265c439915b027f7875fb4 | eb77c995c0ea1d1ef5dcc0b5abbdb81ca7f72762 | refs/heads/master | 2020-04-13T19:58:37.109656 | 2018-06-20T15:34:14 | 2018-06-20T15:34:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,576 | r | gnmDayCourse.R | ## ----setup, include = FALSE----------------------------------------------
library(knitr)
opts_chunk$set(fig.path = 'figure/beamer-', fig.align = 'center',
fig.show = 'hold', size = 'footnotesize')
## markup inline code http://stackoverflow.com/a/16406120/173755
knit_hooks$set(inline = function(x) {
if (is.numeric(x)) return(knitr:::format_sci(x, 'latex'))
highr:::hi_latex(x)
})
# make the printing fit on the page
options(width = 70, digits = 3, show.signif.stars=FALSE)
par(mar = c(4, 4, .1, .1)) # reduce space above/below plots
set.seed(1121) # make the results repeatable
library(gnm)
library(logmult)
## ----glm, eval = FALSE---------------------------------------------------
## glm(y ~ row + col, family = poisson)
## ----quasiIndep, eval = FALSE--------------------------------------------
## y ~ row + col + Diag(row, col)
## ----quasiSymm, eval = FALSE---------------------------------------------
## y ~ row + col + Symm(row, col)
## ----Symm, eval = FALSE--------------------------------------------------
## y ~ Symm(row, col)
## ----RChomog, tidy = FALSE-----------------------------------------------
RCh <- gnm(Freq ~ origin + destination + Diag(origin, destination) +
MultHomog(origin, destination), family = poisson,
data = occupationalStatus, verbose = FALSE)
getContrasts(RCh, pickCoef(RCh, "MultHomog"))
## ----getContrasts--------------------------------------------------------
getContrasts(RCh, pickCoef(RCh, "MultHomog"), ref = "last")
## ----mentalHealth--------------------------------------------------------
xtabs(count ~ SES + MHS, mentalHealth)
## ----trtContr------------------------------------------------------------
mentalHealth$MHS <- C(mentalHealth$MHS, treatment)
mentalHealth$SES <- C(mentalHealth$SES, treatment)
## ----RC------------------------------------------------------------------
RC <- gnm(count ~ SES + MHS + Mult(SES, MHS), family = poisson,
data = mentalHealth, verbose = FALSE, ofInterest = "Mult")
coef(RC)
## ----colScores-----------------------------------------------------------
colProbs <- with(mentalHealth, tapply(count, MHS, sum) / sum(count))
colScores <- getContrasts(RC, pickCoef(RC, "[.]MHS"), ref = colProbs,
scaleRef = colProbs, scaleWeights = colProbs)
colScores
## ----rowScores-----------------------------------------------------------
rowProbs <- with(mentalHealth, tapply(count, SES, sum) / sum(count))
rowScores <- getContrasts(RC, pickCoef(RC, "[.]SES"), ref = rowProbs,
scaleRef = rowProbs, scaleWeights = rowProbs)
## ----assoc---------------------------------------------------------------
phi <- pickCoef(RC, "[.]SES", value = TRUE)
psi <- pickCoef(RC, "[.]MHS", value = TRUE)
sqrt(sum(rowProbs*(phi - sum(rowProbs*phi))^2)) *
sqrt(sum(colProbs*(psi - sum(colProbs*psi))^2))
## ----RC2-----------------------------------------------------------------
RC2 <- update(RC, count ~ SES + MHS + instances(Mult(SES, MHS), 2))
## ----MHtab---------------------------------------------------------------
MHtab <- xtabs(count ~ SES + MHS, data = mentalHealth)
rc(MHtab, verbose = FALSE)
## ----RC_update, results = "hide", fig.show = "hide"----------------------
RC <- rc(MHtab, se = "jackknife", verbose = FALSE, ncpus = 1)
plot(RC, conf.int = 0.95)
## ----RC2_update, results = "hide", fig.show = "hide"---------------------
RC2 <- rc(MHtab, nd = 2, se = "jackknife", verbose = FALSE, ncpus = 1)
plot(RC2, conf.int = 0.95)
## ----anoas---------------------------------------------------------------
anoas(MHtab, nd=2, verbose = FALSE)
## ----unidiffGNM, eval = FALSE--------------------------------------------
## unidiff <- gnm(y ~ row:table + col:table + Mult(Exp(table), row:col),
## family = poisson)
## ----yaish, eval = FALSE-------------------------------------------------
## yaish <- as.table(yaish[,,-7])
## yaish <- aperm(yaish, c("orig", "dest", "educ"))
## ----plotLayer, eval = FALSE---------------------------------------------
## plot(model, se.type = "se")
## ----segments, eval = FALSE----------------------------------------------
## segments(1:5+0.1, exp(conf[,1]), 1:5+0.1, exp(conf[,2]), col = "red")
## ----backPain------------------------------------------------------------
backPain[1:5,]
## ----backPainLong--------------------------------------------------------
backPainLong <- expandCategorical(backPain, "pain", group = TRUE)
head(backPainLong)
## ----stereotype----------------------------------------------------------
stereotype <- gnm(count ~ pain + Mult(pain, x1 + x2 + x3),
eliminate = id, family = poisson,
data = backPainLong, verbose = FALSE)
## ----multLogistic--------------------------------------------------------
logistic <- gnm(count ~ pain + pain:(x1 + x2 + x3),
eliminate = id, family = poisson, data = backPainLong)
anova(stereotype, logistic)
## ----constrainStereotype, results = "hide"-------------------------------
stereotype <- update(stereotype,
. ~ pain + Mult(pain, offset(x1) + x2 + x3),
constrain = "[.]paincomplete.relief",
constrainTo = 1)
## ----ofInterestsAssign, results = "hide"---------------------------------
ofInterest(stereotype) <- pickCoef(stereotype, "Mult")
## ----parameters----------------------------------------------------------
parameters(stereotype)
## ----stereotype5---------------------------------------------------------
.pain <- backPainLong$pain
levels(.pain)[2:3] <- paste(levels(.pain)[2:3], collapse = " | ")
stereotype5 <- update(stereotype,
~ pain + Mult(.pain, x1 + x2 + x3))
anova(stereotype, stereotype5)
## ----stereotypeOther, echo = FALSE---------------------------------------
levels(.pain)[4:5] <- paste(levels(.pain)[4:5], collapse = " | ")
stereotype4 <- update(stereotype5)
levels(.pain)[2:3] <- paste(levels(.pain)[2:3], collapse = " | ")
stereotype3 <- update(stereotype4)
levels(.pain)[2:3] <- paste(levels(.pain)[2:3], collapse = " | ")
stereotype2 <- update(stereotype3)
anova(stereotype, stereotype5, stereotype4, stereotype3, stereotype2)
## ----House2001prep, echo = FALSE-----------------------------------------
## Put the votes in a matrix, and discard members with too many NAs etc:
House2001m <- as.matrix(House2001[-1])
informative <- apply(House2001m, 1,
function(row){
valid <- !is.na(row)
validSum <- if (any(valid)) sum(row[valid]) else 0
nValid <- sum(valid)
uninformative <- (validSum == nValid) || (validSum == 0) || (nValid < 10)
!uninformative})
House2001m <- House2001m[informative, ]
## Make a vector of colours, blue for Republican and red for Democrat:
parties <- House2001$party[informative]
## Expand the data for statistical modelling:
House2001v <- as.vector(House2001m)
House2001f <- data.frame(member = rownames(House2001m),
party = parties,
rollCall = factor(rep((1:20),
rep(nrow(House2001m), 20))),
vote = House2001v)
voteAdj <- 0.5 + 0.94*(House2001f$vote - 0.5)
## ----residSVD------------------------------------------------------------
baseModel <- glm(vote ~ -1 + rollCall,
family = binomial, data = House2001f)
Start <- residSVD(baseModel, rollCall, member)
## ----rasch1--------------------------------------------------------------
rasch1 <- gnm(voteAdj ~ Mult(rollCall, member),
eliminate = rollCall,
family = binomial, data = House2001f,
na.action = na.exclude, tolerance = 1e-03,
start = -Start, verbose = FALSE)
## ----raschPlots, fig.show = "hold", out.width = "0.49\\linewidth"--------
plot(pickCoef(rasch1, "[.]member", value = TRUE),
col = c("red", "black", "black", "blue")[parties],
xlab = "Alphabetical index", ylab = "Member's relative position")
dotchart(pickCoef(rasch1, "[.]rollCall", value = TRUE),
paste0("Roll call ", 1:20))
## ----LeeCarter, eval = FALSE---------------------------------------------
## LCmodel <- gnm(Deaths ~ Mult(Exp(Age), Year),
## eliminate = Age, offset = log(Exposure),
## family = "quasipoisson")
## ----eliminated, eval = FALSE--------------------------------------------
## AgeCoef <- attr(coef(model1), "eliminated")
## ----start, eval = FALSE-------------------------------------------------
## start = c(AgeCoef, rep(0, length(AgeCoef)), 0, coef(model1))
## ----conformity, echo = FALSE--------------------------------------------
conformity <- read.table("E:/Repos/gnm-svn/DataSets/Van_der_Slik/conformity.txt",
colClasses = c("character", "numeric", "numeric",
"factor", "factor", rep("numeric", 6)))
## ----A-------------------------------------------------------------------
A <- gnm(MCFM ~ -1 +
AGEM + MRMM + FRMF + MWORK + MFCM + Dref(MOPLM, FOPLF),
family = gaussian, data = conformity, verbose = FALSE)
## ----w, message = FALSE--------------------------------------------------
w <- DrefWeights(A)
w
## ----wCI-----------------------------------------------------------------
w$MOPLM["weight"] + qnorm(c(0.025, 0.975)) * w$MOPLM["se"]
## ----A2, echo = FALSE----------------------------------------------------
A2 <- update(A, . ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM + FOPLF)
anova(A2, A, test = "Chisq")
## ----F-------------------------------------------------------------------
F <- gnm(MCFM ~ -1 + AGEM + MRMM + FRMF + MWORK + MFCM +
Dref(MOPLM, FOPLF, delta = ~ 1 + MFCM),
family = gaussian, data = conformity, verbose = FALSE)
## ----wF, message = FALSE-------------------------------------------------
DrefWeights(F)
## ----TypeII--------------------------------------------------------------
TypeII <- function(x){
list(predictors = list(a = 1, h = 1),
variables = list(substitute(x)))
}
class(TypeII) <- "nonlin"
## ----paste0--------------------------------------------------------------
term = function(predLabels, varLabels){
paste0(predLabels[1], "*", varLabels[1], "/(1 + ",
predLabels[1], "*", predLabels[2], "*", varLabels[1], ")")
}
term(c("a", "h"), "x")
## ----sprintf-------------------------------------------------------------
term = function(predLabels, varLabels){
sprintf("%s * %s / (1 + %s * %s * %s)",
predLabels[1], varLabels[1],
predLabels[1], predLabels[2], varLabels[1])
}
## ----nonlin--------------------------------------------------------------
TypeII <- function(x){
list(predictors = list(a = 1, h = 1),
variables = list(substitute(x)),
term = function(predLabels, varLabels){
sprintf("%s * %s / (1 + %s * %s * %s)",
predLabels[1], varLabels[1],
predLabels[1], predLabels[2], varLabels[1])
})
}
class(TypeII) <- "nonlin"
## ----prey----------------------------------------------------------------
Density <- rep(c(2,5,10,15,20,30), each = 4)
Eaten <- c(1,1,0,0,2,2,1,1,1,2,3,2,2,2,3,3,3,3,4,3,3,3,4,3)
## ----mod1----------------------------------------------------------------
mod1 <- gnm(Eaten ~ -1 + TypeII(Density), start = c(a = 0.1, h = 0.1),
family = quasipoisson(link = "identity"))
## ----mod1Summary, echo = FALSE-------------------------------------------
summary(mod1)
## ----factor--------------------------------------------------------------
TypeII <- function(C, x){
list(predictors = list(a = substitute(C), h = substitute(C)),
variables = list(substitute(x)),
term = function(predLabels, varLabels){
sprintf("%s * %s / (1 + %s * %s * %s)",
predLabels[1], varLabels[1],
predLabels[1], predLabels[2], varLabels[1])
})
}
class(TypeII) <- "nonlin"
## ----factorResult--------------------------------------------------------
Catchment <- factor(rep(1:2, 6, each = 2))
mod2 <- gnm(Eaten ~ -1 + TypeII(Catchment, Density),
start = rep(0.2, 4),
family = quasipoisson(link = "identity"))
coef(mod2)
## ----formula-------------------------------------------------------------
TypeII <- function(f, x){
list(predictors = list(a = f, h = f),
variables = list(substitute(x)),
term = function(predLabels, varLabels){
sprintf("(%s) * (%s)/ (1 + (%s) * (%s) * %s)",
predLabels[1], varLabels[1],
predLabels[1], predLabels[2], varLabels[1])
})
}
class(TypeII) <- "nonlin"
## ----formulaResult-------------------------------------------------------
mod2 <- gnm(Eaten ~ -1 + TypeII(~ 1 + Catchment, Density),
start = c(0.2, -0.1, 0.2, -0.1),
family = quasipoisson(link = "identity"))
coef(mod2)
## ----binomial, eval = FALSE----------------------------------------------
## count <- with(voting, percentage/100 * total)
## yvar <- cbind(count, voting$total - count)
## ----upward, eval = FALSE------------------------------------------------
## origin <- as.numeric(as.character(voting$origin))
## destination <- as.numeric(as.character(voting$destination))
## upward <- origin > destination
## ----inOut, eval = FALSE-------------------------------------------------
## in1 <- origin != 1 & destination == 1
## out1 <- origin == 1 & destination != 1
|
67e30162b0ab6f1dd42badf2a78585a2fce3f693 | 22793bd62a12a4663e7e3a720a683b021bb5ab73 | /src/examples/console/readLines.R | ab691fb98dc67e578d218e22e7a83d41ca2d9f05 | [] | no_license | veltzer/demos-r | 8f9a4947287ad1c8a5021c449e54153931778c1b | ad19c05ed697a4b0366f04a9401638ab61020547 | refs/heads/master | 2023-07-07T10:28:54.702975 | 2023-07-01T14:37:40 | 2023-07-01T14:37:40 | 28,448,519 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 184 | r | readLines.R | #!/usr/bin/Rscript
# This method of waiting for user input will work
# in a script (Rscript) well.
cat("Press [enter] to continue...")
invisible(readLines('stdin', n=1, warn=FALSE))
|
8aba88ea0a5676f7c4aa62614a763bd420d0c16b | 9f081507cfad007ff1321f3e8553bdadef734d54 | /man/crop_map_spdf_maker.Rd | ce08a90b085bb1ee0ce39d8983ef05ca15cb7628 | [] | no_license | harithmorgadinho/ggg | 42f420602324bdef7cb5f84845d346fa1efb476c | 9c0943cb817bb84485effca80464cc0b7a3347da | refs/heads/master | 2020-05-18T00:57:55.489851 | 2019-05-13T16:57:33 | 2019-05-13T16:57:33 | 184,076,468 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 515 | rd | crop_map_spdf_maker.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crop_map_spdf_maker.R
\name{crop_map_spdf_maker}
\alias{crop_map_spdf_maker}
\title{crop_map_spdf_maker}
\usage{
crop_map_spdf_maker(input, crs = "auto", zoom_out = 1)
}
\arguments{
\item{input}{df or raster}
\item{crs}{projection of data provided ('longlat'/'cea'/'auto')}
\item{zoom_out}{zoom level for the map}
}
\description{
A function to plot biodiversity data.
}
\examples{
crop_map_world(df)
crop_map_world(df,crs='longlat')
}
|
789dfdf3b53c8161529a486627e31febfdaa408a | dd8132404e8c7b028cb13cba904c50aace01c6a7 | /swt/src/spc/c1.u/c1_alloc.r | 532adb90c332038e404a943d05e3d5f522ad3c20 | [] | no_license | arnoldrobbins/gt-swt | d0784d058fab9b8b587f850aeccede0305d5b2f8 | 2922b9d14b396ccd8947d0a9a535a368bec1d6ae | refs/heads/master | 2020-07-29T09:41:19.362530 | 2019-10-04T11:36:01 | 2019-10-04T11:36:01 | 209,741,739 | 15 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,724 | r | c1_alloc.r | # enter_id_decl --- given a pointer to an identifer name, a
# a storage class, mode, and parameter list,
# enter an identifier in the symbol table
subroutine enter_id_decl (id, mode, xsc, params, arg, body)
pointer id # text pointer in, pointer to SYM entry out
pointer mode # pointer to the mode of the identifier
integer xsc # storage class of the identifier
pointer params # for functions: parameter list in declaration;
# LAMBDA for function decls with body
integer arg # YES when declaration a formal parameter
integer body # YES when declaring a function with a body
include "c1_com.r.i"
integer sc, obj
integer findsym, compare_mode, new_obj, new_sym, old_sym
pointer q, np
pointer makesym
untyped info (IDSIZE)
if (id == LAMBDA) # in case of someone else's error
return
for (q = params; q ~= LAMBDA; q = PARAMNEXT (q))
if (PARAMTEXT (q) ~= LAMBDA) {
ERROR_SYMBOL (Mem (PARAMTEXT (q)))
SYNERR ("Named parameters not allowed in this declaration"p)
}
sc = xsc
np = id
if (arg == YES) {
if (sc ~= DEFAULT_SC && sc ~= REGISTER_SC) {
ERROR_SYMBOL (Mem (id))
SYNERR ("Illegal storage class for a parameter"p)
sc = DEFAULT_SC
}
if (MODETYPE (mode) == FUNCTION_MODE) {
ERROR_SYMBOL (Mem (id))
SYNERR ("Functions cannot be passed as parameters"p)
call create_mode (mode, POINTER_MODE, 0)
}
call modify_param_mode (mode)
}
if (MODETYPE (mode) == FUNCTION_MODE) {
mode = MODEPARENT (mode)
call modify_param_mode (mode)
call create_mode (mode, FUNCTION_MODE, 0)
}
if (sc == TYPEDEF_SC) {
if (findsym (Mem (np), q, IDCLASS) == YES && SYMLL (q) == Ll)
SYNERR ("Identifier defined twice"p)
id = new_sym (Mem (np), mode, sc, arg, Ll, 0, YES)
}
else if (Ll == 1) { # It's an external definition
if (MODETYPE (mode) == FUNCTION_MODE && body == YES) {
if (sc == REGISTER_SC || sc == AUTO_SC || sc == EXTERN_SC) {
ERROR_SYMBOL (Mem (np))
SYNERR ("Invalid storage class on a function definition"p)
sc = DEFAULT_SC
}
if (findsym (Mem (np), q, IDCLASS) == NO)
id = new_sym (Mem (np), mode, sc, arg, Ll, new_obj (0), YES)
else if (SYMISDEF (q) == YES) {
ERROR_SYMBOL (Mem (np))
SYNERR ("Function defined twice in this file"p)
id = old_sym (q, sc, mode, YES)
}
else { # Function is previously declared
if (compare_mode (mode, SYMMODE (q)) == NO)
WARNING ("Declaration conflicts with previous declaration"p)
if (sc == DEFAULT_SC) # use the previous declaration
sc = SYMSC (q)
else if (SYMSC (q) == EXTERN_SC) # sc must be STATIC
WARNING ("EXTERN function may not be redeclared STATIC"p)
if (SYMOBJ (q) == 0)
SYMOBJ (q) = new_obj (0)
id = old_sym (q, sc, mode, YES)
}
}
else if (MODETYPE (mode) == FUNCTION_MODE) {
if (sc == REGISTER_SC || sc == AUTO_SC || sc == STATIC_SC) {
ERROR_SYMBOL (Mem (np))
SYNERR ("Invalid storage class on a function declaration"p)
sc = DEFAULT_SC
}
if (findsym (Mem (np), q, IDCLASS) == NO) {
id = new_sym (Mem (np), mode, sc, arg, Ll, 0, NO)
SYMPLIST (id) = params
params = LAMBDA
}
else if (SYMISDEF (q) == YES) {
ERROR_SYMBOL (Mem (np))
SYNERR ("Function defined twice in this file"p)
id = old_sym (q, sc, mode, YES)
}
else { # Function is previously declared
if (compare_mode (mode, SYMMODE (q)) == NO)
WARNING ("Declaration conflicts with previous declaration"p)
if (sc == DEFAULT_SC)
sc = SYMSC (q)
id = old_sym (q, sc, mode, SYMISDEF (q))
if (SYMPLIST (id) == LAMBDA) {
SYMPLIST (id) = params
params = LAMBDA
}
}
}
else { # It's not a function
if (sc == REGISTER_SC || sc == AUTO_SC) {
ERROR_SYMBOL (Mem (np))
WARNING ("Invalid storage class on an external declaration"p)
sc = DEFAULT_SC
}
if (findsym (Mem (np), q, IDCLASS) == NO)
if (sc == EXTERN_SC)
id = new_sym (Mem (np), mode, sc, arg, Ll, 0, NO)
else if (sc == STATIC_SC)
# kludge so we can remember if obj was static & not emit
# ENT for it - only when static is redecl extern
id = new_sym (Mem (np), mode, sc, arg, Ll, new_obj (0), STATIC_SC)
else
id = new_sym (Mem (np), mode, sc, arg, Ll, new_obj (0), YES)
else { # Identifier previously declared at Ll 1
if (compare_mode (mode, SYMMODE (q)) == NO) {
ERROR_SYMBOL (Mem (np))
WARNING ("Declaration conflicts with previous declaration"p)
}
if (sc == EXTERN_SC) {
# ignore redefinition as EXTERN - won't define
# more storage if 'is_stored' thinks sc = EXTERN
# if the old object was static, then SYMISDEF =
# STATIC_SC, otherwise it's YES - & we keep track
# of redefinitions
id = old_sym (q, sc, mode, SYMISDEF (q))
}
else { # new symbol not EXTERN
# if old symbol is EXTERN, new sc supercedes it;
# otherwise, squawk & redefine old symbol
if ((SYMSC (q) ~= EXTERN_SC) ||
# if sc = EXTERN && the symbol is already defined,
# then we can't allocate more space for it - for
# really bizarre things like
#
# int gorf;
# extern gorf;
# static gorf;
(SYMSC (q) == EXTERN_SC && SYMISDEF (q) ~= NO)) {
ERROR_SYMBOL (Mem (np))
SYNERR ("Identifier defined twice"p)
}
if (SYMOBJ (q) == 0) # take care of undef EXTERN
SYMOBJ (q) = new_obj (0)
if (sc == STATIC_SC)
id = old_sym (q, sc, mode, STATIC_SC) # you never know
else
id = old_sym (q, sc, mode, YES)
# use most recent definition
}
}
}
}
else { # Ll > 1 It's an internal definition
if (MODETYPE (mode) == FUNCTION_MODE) {
if (sc == REGISTER_SC || sc == AUTO_SC || sc == STATIC_SC) {
ERROR_SYMBOL (Mem (np))
SYNERR ("Invalid storage class on a function declaration"p)
sc = DEFAULT_SC
}
if (findsym (Mem (np), q, IDCLASS) == NO) {
id = new_sym (Mem (np), mode, sc, arg, 1, 0, NO)
id = new_sym (Mem (np), mode, sc, arg, Ll, 0, NO)
SYMPLIST (id) = params
params = LAMBDA
}
else if (SYMLL (q) == Ll) {
ERROR_SYMBOL (Mem (np))
SYNERR ("Function defined twice"p)
}
else if (findsym (Mem (np), q, IDCLASS, 1) == YES) {
if (compare_mode (mode, SYMMODE (q)) == NO)
WARNING ("Declaration conflicts with previous declaration"p)
if (sc == DEFAULT_SC)
sc = SYMSC (q)
else if (SYMSC (q) == STATIC_SC)
WARNING ("STATIC function may not be redeclared EXTERN"p)
id = old_sym (q, sc, mode, SYMISDEF (q))
if (SYMPLIST (id) == LAMBDA) {
SYMPLIST (id) = params
params = LAMBDA
}
}
else # Function is declared at another LL, but not 1
FATAL ("Function declared at level other than 1"p)
}
else { # It's not a function
if (sc == DEFAULT_SC) # put it on the stack
sc = AUTO_SC
if (findsym (Mem (np), q, IDCLASS) == NO) {
if (sc == EXTERN_SC) {
id = new_sym (Mem (np), mode, sc, arg, 1, 0, NO)
id = new_sym (Mem (np), mode, sc, arg, Ll, 0, NO)
}
else
id = new_sym (Mem (np), mode, sc, arg, Ll, new_obj (0), NO)
}
else if (SYMLL (q) == Ll) {
ERROR_SYMBOL (Mem (np))
SYNERR ("Identifier defined twice at current level"p)
}
else if (sc == EXTERN_SC && findsym (Mem (np), q, IDCLASS, 1) == YES) {
# defined at lexical level 1 - don't need new def
if (compare_mode (mode, SYMMODE (q)) == NO) {
ERROR_SYMBOL (Mem (np))
WARNING ("Declaration conflicts with previous declaration"p)
}
# I don't know why this works!
# id = old_sym (q, sc, mode, SYMISDEF (q))
}
else { # Identifier is declared at another LL
if (sc == EXTERN_SC) {
# it's either a forward definition or a *real*
# external (defined outside of this file)
id = new_sym (Mem (np), mode, sc, arg, 1, 0, NO)
id = new_sym (Mem (np), mode, sc, arg, Ll, 0, NO)
}
else
id = new_sym (Mem (np), mode, sc, arg, Ll, new_obj (0), NO)
}
}
}
call dsfree (np) # free the saved text space
while (params ~= LAMBDA) { # free the parameter list if it wasn't used
if (PARAMTEXT (params) ~= LAMBDA)
call dsfree (PARAMTEXT (params))
q = PARAMNEXT (params)
call dsfree (params)
params = q
}
return
end
# enter_sm_decl --- given a pointer to an identifer name, a mode,
# and parameter list, enter a stucture member
# into the symbol table
subroutine enter_sm_decl (id, mode, params, loc)
pointer id, mode, params
longint loc
include "c1_com.r.i"
integer findsym, compare_mode
pointer q
pointer makesym
longint get_long
untyped info (IDSIZE)
if (id == LAMBDA) # in case of someone else's error
return
if (MODETYPE (mode) == FUNCTION_MODE) {
SYNERR ("Functions cannot be part of a 'struct'"p)
call create_mode (mode, POINTER_MODE, 0)
}
if (params ~= LAMBDA) {
SYNERR ("Parameters not allowed in struct member"p)
while (params ~= LAMBDA) {
q = PARAMNEXT (params)
call dsfree (params)
params = q
}
}
if (findsym (Mem (id), q, SMCLASS) == YES && SYMLL (q) == Ll) {
select (SYMTYPE (q))
when (STSYMTYPE) {
ERROR_SYMBOL (Mem (id))
SYNERR ("Identifier already defined as struct tag"p)
}
when (SMSYMTYPE)
if (get_long (loc) ~= get_long (SYMOFFS (q))
|| compare_mode (mode, SYMMODE (q)) == NO) {
ERROR_SYMBOL (Mem (id))
SYNERR ("Struct member conflicts with previous definition"p)
}
call dsfree (id)
id = q
return
}
q = id
id = makesym (Mem (id), SMSYMTYPE, Ll)
call dsfree (q) # free the saved text space
SYMMODE (id) = mode
SYMPARAM (id) = -1
SYMSC (id) = DEFAULT_SC
SYMOBJ (id) = 0
call put_long (SYMOFFS (id), loc)
return
end
# declare_label --- declare the current symbol as a label
subroutine declare_label (flag)
integer flag
include "c1_com.r.i"
integer findsym, makesym, new_obj
if (findsym (Symtext, Symptr, IDCLASS) == NO
|| SYMLL (Symptr) <= 1) {
Symptr = makesym (Symtext, IDSYMTYPE, 2)
SYMSC (Symptr) = STATIC_SC
SYMMODE (Symptr) = Label_mode_ptr
SYMPARAM (Symptr) = flag
SYMOBJ (Symptr) = new_obj (0)
return
}
if (SYMMODE (Symptr) ~= Label_mode_ptr) {
SYNERR ("Label already declared but not as label"p)
return
}
if (SYMPARAM (Symptr) == YES && flag == YES)
SYNERR ("Label already declared"p)
if (flag == YES)
SYMPARAM (Symptr) = YES
return
end
# check_declaration --- check if current symbol has been declared;
# if not, create a dummy symbol
subroutine check_declaration (class)
integer class
include "c1_com.r.i"
integer findsym, new_obj
pointer q
if (findsym (Symtext, Symptr, class) == NO) {
SYNERR ("Identifier not declared"p)
call gen_int (0)
}
else if (SYMTYPE (Symptr) == COSYMTYPE)
call gen_int (SYMOBJ (Symptr))
else if (SYMTYPE (Symptr) ~= IDSYMTYPE && SYMTYPE (Symptr) ~= SMSYMTYPE) {
SYNERR ("Identifier is not a variable"p)
call gen_int (0)
}
else {
if (SYMTYPE (Symptr) == IDSYMTYPE
&& SYMOBJ (Symptr) == 0) { # Add an object number on reference
SYMOBJ (Symptr) = new_obj (0)
if (SYMLL (Symptr) > 1 && findsym (Symtext, q, class, 1) == YES
&& SYMOBJ (q) == 0)
SYMOBJ (q) = SYMOBJ (Symptr)
DBG (42, call print (ERROUT, "check_declaration: '*s' first ref*n"s,
DB Symtext))
}
call gen_opnd (Symptr)
}
return
end
# clean_up_ll --- check for undefined symbols and labels
subroutine clean_up_ll
include "c1_com.r.i"
pointer p, q
pointer accesssym
character str (MAXTOK)
p = LAMBDA
for (q = accesssym (Ll, str, p, IDCLASS); q ~= LAMBDA;
q = accesssym (Ll, str, p, IDCLASS)) {
DBG (42, if (SYMOBJ (q) == 0) {
DB call print (ERROUT, "Unref: '*s' "s, str)
DB call dump_sym_entry (q); call print (ERROUT, "*n"s)})
if (SYMMODE (q) == Label_mode_ptr && SYMPARAM (q) == NO) {
ERROR_SYMBOL (str)
SYNERR ("Undefined label"p)
}
if (SYMPARAM (q) == 0) {
ERROR_SYMBOL (str)
SYNERR ("Declared as parameter but not in parameter list"p)
}
if (SYMTYPE (q) == ENSYMTYPE && MODESMLIST (SYMMODE (q)) == LAMBDA) {
ERROR_SYMBOL (str)
SYNERR ("'Enum' referenced but not defined"p)
}
}
p = LAMBDA
for (q = accesssym (Ll, str, p, SMCLASS); q ~= LAMBDA;
q = accesssym (Ll, str, p, SMCLASS)) {
if (SYMTYPE (q) == STSYMTYPE && MODESMLIST (SYMMODE (q)) == LAMBDA) {
ERROR_SYMBOL (str)
SYNERR ("'Struct' referenced but not defined"p)
}
}
return
end
# check_function_declaration --- handle default function declarations)
subroutine check_function_declaration
include "c1_com.r.i"
integer findsym, new_obj
pointer q
pointer mode
pointer sdupl
mode = Int_mode_ptr
call create_mode (mode, FUNCTION_MODE, 0)
if (findsym (Symtext, Symptr, IDCLASS) == NO) {
Symptr = sdupl (Symtext)
call enter_id_decl (Symptr, mode, EXTERN_SC, LAMBDA, NO, NO)
}
else if (MODETYPE (SYMMODE (Symptr)) == FUNCTION_MODE)
;
else if (SYMLL (Symptr) == Ll)
SYNERR ("Usage conflicts with previous declaration"p)
else {
Symptr = sdupl (Symtext)
call enter_id_decl (Symptr, mode, EXTERN_SC, LAMBDA, NO, NO)
}
if (SYMOBJ (Symptr) == 0) { # Add an object number on reference
SYMOBJ (Symptr) = new_obj (0)
if (SYMLL (Symptr) > 1 && findsym (Symtext, q, IDCLASS, 1) == YES
&& SYMOBJ (q) == 0)
SYMOBJ (q) = SYMOBJ (Symptr)
DBG (42, call print (ERROUT, "check_function_declaration: '*s' first ref*n"s,
DB Symtext))
}
call gen_opnd (Symptr)
return
end
# allocate_storage --- perform storage allocation
subroutine allocate_storage (id)
pointer id
include "c1_com.r.i"
integer is_stored
longint sz
longint sizeof_mode
if (is_stored (id) == NO)
return
sz = sizeof_mode (SYMMODE (id))
if (sz == 0)
SYNERR ("Data object may not have zero size"p)
else if (sz >= (intl (MAXUNSIGNED) + 1) * 8)
SYNERR ("Data object may not be larger than 65535 words"p)
DBG (29, call print (ERROUT, "in allocate_storage: id=*i sz=*l l=*i*n"s,
DB id, sz, SYMOBJ (id)))
return
end
# alloc_struct --- increment the size of a structure or union by the
# specified size
subroutine alloc_struct (mp, len)
pointer mp
longint len
include "c1_com.r.i"
longint get_long
if (MODETYPE (mp) == STRUCT_MODE)
call put_long (MODELEN (mp), get_long (MODELEN (mp)) + len)
else if (get_long (MODELEN (mp)) < len)
call put_long (MODELEN (mp), len)
return
end
# wsize --- compute the size in words given the size in bits
integer function wsize (b)
longint b
include "c1_com.r.i"
wsize = (b + 15) / 16
DBG (38, call print (ERROUT, "in wsize: b=*l, w=*i*n"s, b, wsize))
return
end
# alloc_temp --- allocate a temporary variable
pointer function alloc_temp (mp)
pointer mp
include "c1_com.r.i"
integer obj
integer new_obj
character str (20)
pointer p
pointer new_sym
obj = new_obj (0)
call encode (str, 20, "#temp*i"s, obj)
p = new_sym (str, mp, AUTO_SC, NO, Ll, obj, NO)
call out_var (p)
call out_oper (NULL_OP)
call out_size (SYMMODE (p))
return (p)
end
|
9b7e74e11c892e6d1b3c4107f616d03c7e19eeb9 | eaf654a68caab37b7dc9231421713c990f3b9b9e | /run_analysis.R | a3b45cb6b0b4e9dcb5e0dbc57e25be4849d64aef | [] | no_license | fritss/GettingAndCleaningData | a1f4e49e6a662f64f7ef62c388d805ce20c92df5 | fce793998b99fc3825c5f7f026a863f2cf008dcb | refs/heads/master | 2021-01-11T15:45:33.676762 | 2017-01-24T15:49:27 | 2017-01-24T15:49:27 | 79,923,052 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,574 | r | run_analysis.R | library(data.table)
# the directory where all data is located
dataDir <- file.path(getwd(), "UCI HAR Dataset")
# the file with activity labels
actfile <- file.path(dataDir, "activity_labels.txt")
# the file with features
featfile <- file.path(dataDir, "features.txt")
# read the feature names
features <- read.table(featfile)
featureNames <- features$V2
# we are only interested in the means and the standard deviations
# these are feature names with mean(), meanFreq() and std() in it.
extractedFeatures <- grepl("mean\\(\\)|meanFreq\\(\\)|std\\(\\)", featureNames)
# read the activity labels
activity_labels <- read.table(actfile)
# the train data and test data must be handled in the same way
# to be sure that this is satisfied it is captured in a function
# that can read them both.
ReadDataFrame <- function(mode="train") {
# the directory where the train/test data is located
dir <- file.path(dataDir, mode)
# the names of the files
subfile <- file.path(dir, paste0("subject_", mode, ".txt"))
xfile <- file.path(dir, paste0("X_", mode, ".txt"))
yfile <- file.path(dir, paste0("Y_", mode, ".txt"))
# read the three files
x <- read.table(xfile)
y <- read.table(yfile)
subjects <- read.table(subfile)
# give the features of the x-file descriptive names
names(x) <- featureNames
# and select only those columns we are interested in.
x <- x[,extractedFeatures]
# add a column indicating whether it is train or test data
x$train_test_status <- mode
# give the activities descriptive labels instead of numbers
y[,1] <- activity_labels[y[,1], 2]
# give the columns descriptive names
names(y) <- "Activity_Label"
names(subjects) <- "Subject"
# combine the subjects, y and x in a single data frame
cbind(subjects, y, x)
}
# read the training data
traindf <- ReadDataFrame("train")
# read the test data
testdf <- ReadDataFrame("test")
# combine them in a single frame
df <- rbind(traindf, testdf)
# calculate the average for every column grouped for every person and activity
# of course, we use dplyr and tidyr
library(dplyr)
library(tidyr)
# to avoid taking the average of the "train_test_status", this is part of the split
tidydf <-
df %>%
group_by (Subject, Activity_Label, train_test_status) %>%
summarise_each(funs(mean))
# write out the results
datafile <- file.path(dataDir, "data.txt")
tidydatafile <- file.path(dataDir, "tidy_data.txt")
# the whole table is not required, only the tidy table
# write.table(df, file = datafile)
write.table(tidydf, file = tidydatafile, row.name=FALSE)
|
3c36c01fb894dd2c23c43d41ba14ce4ff0d38da2 | 610912b74d16e1fafb73357b3ae45bb1a5522ac3 | /R/Kanyama-simplified.R | c3f38db8bc29942295daf6256a6fc8322ae320bf | [] | no_license | loumishima/Kanyama-Data-Exploration | ff24de553b9e080e7dec965f01db306b5c2636a6 | e2b36f458386fcf7fcd7ef60158a476ecc9c6da2 | refs/heads/master | 2020-05-02T15:14:54.889094 | 2019-10-01T15:59:13 | 2019-10-01T15:59:13 | 178,035,003 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,259 | r | Kanyama-simplified.R | #Loading the libraries and dataset ----
library(dplyr)
library(readxl)
library(leaflet)
library(ggplot2)
setwd("/Users/gather3/Documents/Kanyama - Data Exploration/Kanyama Data Exploration/R")
source("functions.R")
setwd("/Users/gather3/Documents/Kanyama - Data Exploration/Kanyama Data Exploration/data")
Kanyama.raw <- read_xlsx("KANYAMA.xlsx", sheet = 2, skip = 1)
Kanyama <- Kanyama.raw %>% select(-c(1:23, 27)) %>% filter(.$`Are you willing to participate?` == "Yes")
#Renaming some columns for better understanding----
Kanyama <- Kanyama %>% rename("Record_plot_number" = `1.3`,
"Families_on_the_plot" = `1.4`,
"People_on_the_plot" = `1.5`,
"VIP toilets" = `1.6 - 1 - 1.5.1`,
"ECOSAN toilets" = `1.6 - 2 - 1.5.1`,
"Inside waterflush toilets" = `1.6 - 3 - 1.5.1`,
"Outside waterflush toilets" = `1.6 - 4 - 1.5.1`,
"Poor flush Inside" = `1.6 - 5 - 1.5.1`,
"Poor flush Outside" = `1.6 - 6 - 1.5.1`,
"Lined Pit latrine" = `1.6 - 7 - 1.5.1`,
"Unlined Pit latrine" = `1.6 - 8 - 1.5.1`,
"Disused/Buried" = `1.6 - 9 - 1.5.1`,
"Water source (fetch)" = `1.6.2`,
"Emptied the toilet before?" = `3.7`,
"Last time emptied" = `3.7.1`,
"Who emptied?" = `3.7.2`,
"Interface Layout" = `4.3 INTERFACE`,
"Width" = `4.4`,
"Diameter" = `4.416`,
"Length" = `4.5`,
"Height" = `4.6`,
"Perception of the fill level" = `4.7`,
"Is emptying feasible?" = `4.8`,
"Is washing hand basin present?" = `4.9`,
"Region" = `1.2`,
"Landlord live in the plot?" = `1.8`,
"Upgraded toilet recently?" = `3.4`,
"Cleanliness rate" = `4.117`
)
#!na/total ratio filtering ----
Boolean.50 <- Columns.Remover(Kanyama, 0.5)
Kanyama.50perc <- Kanyama[Boolean.50]
Boolean.75 <- Columns.Remover(Kanyama, 0.75)
Kanyama.75perc <- Kanyama[Boolean.75]
Boolean.80 <- Columns.Remover(Kanyama, 0.80)
Kanyama.80perc <- Kanyama[Boolean.80]
Boolean.90<- Columns.Remover(Kanyama, 0.90)
Kanyama.90perc <- Kanyama[Boolean.90]
Deep.Learning <- Kanyama.90perc %>% select(contains("TAKE "),
`Perception of the fill level`,
contains("Condition")
)
Deep.Learning <- Deep.Learning %>% filter(!is.na(`TAKE PHOTO OF INSIDE THE TOILET`) &
!is.na(`TAKE PHOTO OF OUTSIDE THE TOILET/CONTAINMENT `))
write.csv(Deep.Learning, file = "Deep_Learning.csv" , row.names = F )
#Removing the answers about the respondent and another columns with no use ----
Kanyama.reduced <- simplify(Kanyama)
#Grouping the last questions----
columns_yes <- list(Kanyama.reduced$`Is the toilet easily accessible to the following people?: Children - Yes`,
Kanyama.reduced$`Is the toilet easily accessible to the following people?: Persons with dissability - Yes`,
Kanyama.reduced$`Is the toilet easily accessible to the following people?: Women at night - Yes`,
Kanyama.reduced$`Is the toilet easily accessible to the following?: Vacuum Tanker - Yes`,
Kanyama.reduced$`Is the toilet easily accessible to the following?: Light Truck - Yes`,
Kanyama.reduced$`Is the toilet easily accessible to the following?: Push Cart - Yes`)
columns_no <- list(Kanyama.reduced$`Is the toilet easily accessible to the following people?: Children - No`,
Kanyama.reduced$`Is the toilet easily accessible to the following people?: Persons with dissability - No`,
Kanyama.reduced$`Is the toilet easily accessible to the following people?: Women at night - No`,
Kanyama.reduced$`Is the toilet easily accessible to the following?: Vacuum Tanker - No`,
Kanyama.reduced$`Is the toilet easily accessible to the following?: Light Truck - No`,
Kanyama.reduced$`Is the toilet easily accessible to the following?: Push Cart - No`)
names <- c("Is the toilet easily accessible to the following people?: Children",
"Is the toilet easily accessible to the following people?: Persons with dissability",
"Is the toilet easily accessible to the following people?: Women at night",
"Is the toilet easily accessible to the following?: Vacuum Tanker",
"Is the toilet easily accessible to the following?: Light Truck",
"Is the toilet easily accessible to the following?: Push Cart")
Kanyama.reduced <- grouping.columns(Kanyama.reduced, columns_yes, columns_no, names)
Kanyama.reduced <- select(Kanyama.reduced, -`Is there another toilet to observe`)
#Solving the Multiple toilets problem ----
more.than.1.toilet <- Kanyama %>% filter(`Is there another toilet to observe` == "Yes")
more.than.2.toilet <- Kanyama %>% filter(`Is there a third toilet to observe` == "Yes")
#removing unused parameters
more.than.1.toilet <- simplify(more.than.1.toilet)
index_beg <- grep("Is there another", colnames(more.than.1.toilet))
index_end <- grep("Is there a third", colnames(more.than.1.toilet))
index_sub <- grep("Inter", colnames(more.than.1.toilet))
test1 <- more.than.1.toilet[, 1:55]
test2 <- more.than.1.toilet[,87:117]
test3 <- cbind(test1,test2)
columns_yes <- list(test3$`Is the toilet easily accessible to the following people?: Children - Yes`,
test3$`Is the toilet easily accessible to the following people?: Persons with dissability - Yes`,
test3$`Is the toilet easily accessible to the following people?: Women at night - Yes`,
test3$`Is the toilet easily accessible to the following?: Vacuum Tanker - Yes`,
test3$`Is the toilet easily accessible to the following?: Light Truck - Yes`,
test3$`Is the toilet easily accessible to the following?: Push Cart - Yes`)
columns_no <- list(test3$`Is the toilet easily accessible to the following people?: Children - No`,
test3$`Is the toilet easily accessible to the following people?: Persons with dissability - No`,
test3$`Is the toilet easily accessible to the following people?: Women at night - No`,
test3$`Is the toilet easily accessible to the following?: Vacuum Tanker - No`,
test3$`Is the toilet easily accessible to the following?: Light Truck - No`,
test3$`Is the toilet easily accessible to the following?: Push Cart - No`)
test3 <- grouping.columns(test3, columns_yes, columns_no, names)
rm(test1,test2)
test3 <- select(test3, -`Is there a third toilet to observe`)
more.than.2.toilet <- simplify(more.than.2.toilet)
test4 <- more.than.2.toilet[,118:ncol(more.than.2.toilet)]
test5 <- more.than.2.toilet[,1:55]
test6 <- cbind(test5,test4)
columns_yes <- list(test6$`Is the toilet easily accessible to the following people?: Children - Yes`,
test6$`Is the toilet easily accessible to the following people?: Persons with dissability - Yes`,
test6$`Is the toilet easily accessible to the following people?: Women at night - Yes`,
test6$`Is the toilet easily accessible to the following?: Vacuum Tanker - Yes`,
test6$`Is the toilet easily accessible to the following?: Light Truck - Yes`,
test6$`Is the toilet easily accessible to the following?: Push Cart - Yes`)
columns_no <- list(test6$`Is the toilet easily accessible to the following people?: Children - No`,
test6$`Is the toilet easily accessible to the following people?: Persons with dissability - No`,
test6$`Is the toilet easily accessible to the following people?: Women at night - No`,
test6$`Is the toilet easily accessible to the following?: Vacuum Tanker - No`,
test6$`Is the toilet easily accessible to the following?: Light Truck - No`,
test6$`Is the toilet easily accessible to the following?: Push Cart - No`)
test6 <- grouping.columns(test6, columns_yes, columns_no, names)
rm(test4,test5)
names(test3) <- names(Kanyama.reduced)
names(test6) <- names(Kanyama.reduced)
Kanyama.final <- rbind(Kanyama.reduced, test3, test6)
write.csv(Kanyama.final, file = "Kanyama_reduced.csv", row.names = F)
#Still have to organize the code (`Check Columns tomorrow`) |
a597555ecc350be6c9718c7e47052994ce06461f | b80df8ae387e2990b04007768ab8633d2c622db1 | /R/ecflow_suite.R | 6ade63c8707ceeb5fc60f563e562238f011e5a95 | [] | no_license | waternumbers/ecFlowR | 215517bf86891580bb0d3a449d24353a5e2fef35 | 923a5db0e06ffa07803967c14d9370dc74a72c2a | refs/heads/master | 2020-07-02T13:47:15.532954 | 2019-08-16T16:52:59 | 2019-08-16T16:52:59 | 201,542,980 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,501 | r | ecflow_suite.R | #' ecflow_suite definition
#'
#' @description Functions for building ecFlow suites
#'
#' @param name name of the suite, task, family or label
#' @param val desired value for \code{name} to take
#' @param par parameter to set
#' @param start start of cron period
#' @param end end of cron period
#' @param step step of cron
#' @param cond the condition for the trigger to occur
#' @param ... nested suite objects
#'
#' @details cron times should be provided as strings. Parameter values are escaped if they are character strings.
#'
#' @examples
#' ## defining a simple suite
#' tmp <- suite("ecFlowR_test",
#' edit("ECF_MICRO","%"),
#' edit("ECF_JOB_CMD","Rscript %ECF_JOB% > %ECF_JOBOUT% 2>&1"),
#' edit("ECF_KILL_CMD","kill -15 %ECF_RID%"),
#' edit("ECF_CHECK_CMD","ps --pid %ECF_RID% -f"),
#' edit("ECF_NODE","%ECF_HOST%"),
#' edit("ECF_STATUS_CMD","ps --pid %ECF_RID% -f > %ECF_JOB%.stat 2>&1"),
#' edit("etste",2),
#' task("shouldRun",
#' cron("00:00","23:59","00:01"),
#' label("lastRun"),
#' label("lastSucess")),
#' task("shouldFail",
#' cron("00:00","23:59","00:01"),
#' label("lastRun"),
#' label("lastSucess")),
#' task("catchFail",
#' cron("00:00","23:59","00:01"),
#' label("lastRun"),
#' label("lastSucess"))
#' )
#' writeLines(tmp)
#'
#' @name ecflow_suite
NULL
join <- function(type,name,...){
c(paste(type,name),
unlist(sapply(list(...),function(x){paste("\t",x)}))
)
}
escapeStr <- function(x){
ifelse(is.character(x),paste0("'",x,"'"),x)
}
#' @name ecflow_suite
#' @export
suite <- function(name,...){
c(join("suite",name,...),"endsuite")
}
#' @name ecflow_suite
#' @export
family <- function(name,...){
c(join("family",name,...),"endfamily")
}
#' @name ecflow_suite
#' @export
task <- function(name,...){
join("task",name,...)
}
#' @name ecflow_suite
#' @export
edit <- function(par,val){
val <- escapeStr(val)
paste("edit",par,val)
}
#' @name ecflow_suite
#' @export
cron <- function(start,end,step){
paste("cron",start,end,step)
}
#' @name ecflow_suite
#' @export
label <- function(name,val="''"){
paste("label",name,val)
}
#' @name ecflow_suite
#' @export
trigger <- function(cond="''"){
paste("trigger",cond)
}
|
3be8b3a1f6f03eb5f7352e00689775a216b5dfb2 | f67fedd3f28b44ffb4f4bb350308301d15d74d1f | /R/functions.R | 09f2d1164de9e52c4033c64eda5b07cb8d8c3dfd | [] | no_license | nandhinidev/IsoPatGen | e547032a84e2d712df26a3fa2c04dc9b882c7d45 | e3ffba94cb25537381db932262d1c23f395d857c | refs/heads/master | 2020-05-30T10:00:00.096463 | 2019-05-31T21:59:14 | 2019-05-31T21:59:14 | 189,662,435 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,961 | r | functions.R | # Functions to calculate the isotope distribution for each element
cdist <- function(carbon,ci)
{
if (carbon >= ci)
{
return(dmultinom(c(carbon-ci,ci),carbon,c(0.9893,0.0107)))
}
else {return(0)}
}
hdist <- function(hydrogen,hi)
{
if (hydrogen >= hi)
{
return(dmultinom(c(hydrogen-hi,hi),hydrogen,c(0.999885,0.000115)))
}
else {return(0)}
}
ndist <- function(nitrogen,ni)
{
if (nitrogen >= ni)
{
return(dmultinom(c(nitrogen-ni,ni),nitrogen,c(0.99632,0.00368)))
}
else {return(0)}
}
odist <- function(oxygen,o17i,o18i)
{
if (oxygen >= (o17i+o18i))
{
return(dmultinom(c(oxygen-o17i-o18i,o17i,o18i),oxygen,c(0.99757,0.00038,0.00205)))
}
else {return(0)}
}
sdist <- function(sulphur,s33i,s34i)
{
if (sulphur >= (s33i+s34i))
{
return(dmultinom(c(sulphur-s33i-s34i,s33i,s34i),sulphur,c(0.9493,0.0076,0.0429)))
}
else {return(0)}
}
# Function to build the isotopic envelope
envelope <- function(niso) {
# Calculate all theoretical possibilities
chcomb <- data.table::CJ(seq(0,niso),seq(0,niso))
#Build isotopic envelope one element at a time
chcomb[,sum:=Reduce(`+`,.SD)]
chcombf <- chcomb[sum<(niso+1),1:2]
chncomb <- chcombf[,seq(0,niso),by=chcombf]
chncomb[,sum:=Reduce(`+`,.SD)]
chncombf <- chncomb[sum<(niso+1),1:3]
oscomb <- CJ(seq(0,niso),seq(0,niso,2))
oscomb[,sum:=Reduce(`+`,.SD)]
oscombf <- oscomb[sum<(niso+1),1:2]
chnocomb <- chncombf[,as.list(oscombf),by=chncombf]
chnocomb[,sum:=Reduce(`+`,.SD)]
chnocombf <- chnocomb[sum<(niso+1),1:5]
chnoscomb <- chnocombf[,as.list(oscombf),by=chnocombf]
chnoscomb[,sum:=Reduce(`+`,.SD)]
chnoscombf <- chnoscomb[sum<(niso+1)]
colnames(chnoscombf) <- c("ciso","hiso","ntiso","o17iso","o18iso","s33iso","s34iso","sum")
return(chnoscombf)
}
# Predict abundances
pred.int <- function(grid.data,niso){
new <- data.frame(mz=numeric(),Intensity=numeric())
for (i in seq(1:(niso+1)))
{
if((i-1)==0){
cat("Calculating Monoisotopic Peak...\n")}
else{
cat(paste((i-1),"isotope...\n",sep=" "))
}
mat <- grid.data[sum==(i-1)]
mat[,cprob:=mapply(cdist,ci=ciso,carbon=carbon)]
mat[,cmass:=13.003355 * ciso + 12 * (carbon - ciso)]
mat[,hprob:=mapply(hdist,hi=hiso,hydrogen=hydrogen)]
mat[,hmass:=2.014102 * hiso + 1.007825 * (hydrogen - hiso)]
mat[,nprob:=mapply(ndist,ni=ntiso,nitrogen=nitrogen)]
mat[,nmass:=15.000109 * ntiso + 14.003074 * (nitrogen - ntiso)]
mat[,oprob:=mapply(odist,o17i=o17iso,o18i=o18iso/2,oxygen=oxygen)]
mat[,omass:=16.999132 * o17iso + 17.999160 * (o18iso/2) + 15.994915 * (oxygen - o17iso - (o18iso/2))]
mat[,sprob:=mapply(sdist,s33i=s33iso,s34i=s34iso/2,sulphur=sulphur)]
mat[,smass:=32.971458 * s33iso + 33.967867 * (s34iso/2) + 31.972071 * (sulphur - s33iso - (s34iso/2))]
prob <- mat[,(cprob*hprob*nprob*oprob*sprob)]
mass <- mat[,(cmass+hmass+nmass+omass+smass)]
plist <- data.frame(cbind(mass,prob))
new <- rbind(new,plist)
}
return(new)
} |
6b2038e82de0fdc7743d12b9067550328bd5757d | eec85f9050812d9ee1a3954b59acf5255fa22a01 | /Plot/Script/Fig1.R | cdb54ba5a883088478ec1f4d822dc3536d2567fc | [] | no_license | kjshan/SARS-CoV-2-Mutation-Spectrum | 58ec588654bc720b60ac9327d27d1f4c6b58041d | 4c18e9e71edfd36001e86271d0f36d31c4c3b0cb | refs/heads/master | 2023-04-07T03:02:09.466515 | 2022-12-07T05:44:15 | 2022-12-07T05:44:15 | 295,629,435 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,936 | r | Fig1.R | ###########################################################
#SARS-CoV-2 mutation spectrum
###########################################################
#color and themes
library(RColorBrewer)
brewer.pal(12, name="Paired")
Self1<-c("#1F78B4","#A6CEE3","#33A02C","#B2DF8A","#E31A1C","#FB9A99","#FF7F00","#FDBF6F","#6A3D9A","#CAB2D6","#B15928","#FFFF99" )
Self<-c("#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#E31A1C","#FB9A99","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA","#AAAAAA" )
Self3<-c("#1F78B4","#33A02C","#E31A1C","#FF7F00","#6A3D9A","#B15928" )
theme_set(theme_bw()+theme(panel.grid=element_blank(),panel.border=element_rect(size=1,color="black")))
my_theme<-theme(axis.line.x=element_line(size=0,color="black"),axis.line.y=element_line(size=0,color="black"),
axis.ticks=element_line(size=0.5,color="black"),axis.ticks.length=unit(0.05,"inches"),
axis.title.x = element_text(size=10),axis.title.y = element_text(size=10),
axis.text.x = element_text(angle = 45,hjust = 1,size=8,color="black"),
axis.text.y = element_text(size=10,color="black"),
strip.text.x = element_text(size=10,face = "bold"),
strip.background = element_rect(color = "black",size=1),
legend.position = 1,
legend.text = element_text(size=10),legend.title = element_text(size=10))
###########################################################
setwd("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/")
###########################################################
#SARS-CoV-2 mutation the in negative strand
###########################################################
#1.parameter's cut-off
Ctrl<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP.csv",stringsAsFactors = F))
Gaussi<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Guassi.txt",sep = "\t",header = F)
head(Ctrl)
head(Gaussi)
colnames(Gaussi)<-c("Pos","Mismatch_Number","Total_Number")
Gaussi$Frac<-Gaussi$Mismatch_Number/Gaussi$Total_Number
Ctrl<-merge(Ctrl,Gaussi,by=c("Pos"))
write.csv(Ctrl,"/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP_Mismatch.csv",row.names = F,quote = F)
#Ctrl<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Vero_Total_Barcode_SNP_Mismatch.csv",stringsAsFactors = F))
#FigS2A
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),#The four konwn polymorphisms sites, which were different between our SARS-CoV-2 reference genome and BetaCoV/Korea/KCDC03/2020
!UMI %in% c("26257-26283")) #Known indel in BetaCoV/Korea/KCDC03/2020
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_noCutoff_1.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+
scale_y_continuous(breaks = seq(0,24000,6000),limits = c(0,24000))+
my_theme
dev.off()
#FigS2B
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_Cutoff_C1and2.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
#FigS2C
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
max(plot$count)
pdf("Control_Cutoff_C1and2and3.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+scale_y_continuous(limits = c(0,250))+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
###########################################################
#To discard potential polymorphisms
#cut-off 0.2%
###########################################################
#FigS2D
#cut-off 0.2%
Gaussi<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/Guassi.txt",sep = "\t",header = F)
head(Gaussi)
colnames(Gaussi)<-c("Pos","Mismatch_Number","Total_Number")
Gaussi$Frac<-Gaussi$Mismatch_Number/Gaussi$Total_Number
ggplot(Gaussi) +
#geom_vline(xintercept = log2(20),color="red",linetype="dotted")+
labs(x='log10(mismatch read number/total read number)')+
#scale_x_continuous(limits = c(0,0.01)) +
geom_density(aes(x =log10(Frac), y =..count..))
Gaussi<-filter(Gaussi, Frac<0.9,Frac>0)
library(mixtools)
mid<-mixtools::normalmixEM(log10(Gaussi$Frac), arbvar = T, epsilon = 1e-03)
mid$lambda
mid$mu
mid$sigma
pnorm(mean = -2.423157,sd=0.5649736,lower.tail = T,q=log10(0.002))*0.06368319
pnorm(mean = -3.044717 ,sd=0.2420202,lower.tail = T,q=log10(0.002))*0.93631681
0.01991428/(0.01991428+0.8646311)
ggplot(Gaussi) +
labs(x='log10(mismatch read number/total read number)')+
geom_density(aes(x =log10(Frac)))+my_theme
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/FigS3_E-2.pdf",width = 3,height = 3)
data.frame(x = mid$x) %>%
ggplot() +
geom_histogram(aes(x, y =..density..), binwidth = .2, colour = "grey30", fill = "grey", lwd = .5) +
stat_function(geom = "line", fun = plot_mix_comps, # here is the function
args = list(mid$mu[1], mid$sigma[1], lam = mid$lambda[1]),
colour = "red", lwd = 1) +
stat_function(geom = "line", fun = plot_mix_comps, # and here again because k = 2
args = list(mid$mu[2], mid$sigma[2], lam = mid$lambda[2]),
colour = "blue", lwd = 1) +
labs(x='log10(mismatch read number/total read number)',y="Density")+ my_theme+#2+guides(fill=F)+
geom_vline(xintercept=log10(0.002),col="black",linetype="dashed",lwd=1)
dev.off()
head(Ctrl)
###########################################################
#De novo mutations in Vero
#Fig1F
###########################################################
Filter<-filter(Ctrl,
!Pos %in% c(4402,5062,8782,28144),
Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
All_Alt_reads/Total_Number <= 0.002,
!UMI %in% c("26257-26283")) #,
#UMI_Alt_no_PCR_reads==1
head(Filter)
CT_Pos<-filter(Filter,SNP=="C > T")$Pos
#mutation spectrum
plot<-as.data.frame(Filter) %>% group_by(SNP) %>% dplyr::summarise(count=n())
sum(plot$count)
pdf("Fig1F.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+
labs(x='',y='Mutation count')+ my_theme
#my_theme
dev.off()
#Count
ts<-sum(51,15,35,21)
tv<-sum(8,4,6,6,9,24,11,7)
ts+tv
binom.test(ts,(ts+tv),4/12)
binom.test(24,tv,1/8)
#Freq
#Coverage_consensus_reads.txt
#Base count in Consensus reads
#A 3631127
#T 3212571
#C 2354397
#G 2175628
ts<-c(9.65E-06,2.17E-05,6.90E-06,6.55E-06)
tv<-c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06)
t.test(log10(ts),log10(tv))
wilcox.test(c(9.65E-06,2.17E-05,6.90E-06,6.55E-06),
c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06))
t.test(c(2.21E-06,1.10E-06,2.55E-06,2.55E-06,4.14E-06,1.10E-05,3.43E-06,2.18E-06), mu = 1.10E-05)
#C>U VS G>A
fisher.test(matrix(c(51,15,2354397,2185628),nrow = 2))
# G>U VS C>A
fisher.test(matrix(c(24,6,2185628,2354397),nrow = 2))
#Fisher exact test, only use the sites covered by junction read pairs
fisher.test(matrix(c(22,6,(5863-22),(5492-6)),nrow=2))
#Potential_mutation_site.txt
# Var1 Freq
# A 8929
# C 5492
# G 5863
# T 9594
###########################################################
#Consenus VS Unconsensus
#FigS3A
###########################################################
#mismatch frequency
#Consensuse
Consensus<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/ConsensusVSunconsensus/Consensus.result2",stringsAsFactors = F,header = F))
colnames(Consensus)<-c("Aver","SNP","Cover","SNP_Freq")
head(Consensus)
##Inconsensuse
Inconsensuse<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/ConsensusVSunconsensus/Unconsensus.result2",stringsAsFactors = F,header = F))
colnames(Inconsensuse)<-c("BQ","SNP","Cover","SNP_Freq")
head(Inconsensuse)
pdf("ConsensusVSInconsensue_Mismatch_Freq.pdf",height = 3,width = 3,useDingbats = F)
ggplot()+
geom_point(data=Consensus,aes(x=Aver,y=log10(SNP_Freq)),col="black")+
geom_point(data=Inconsensuse,aes(x=BQ,y=log10(SNP_Freq)),col="red")+
scale_y_continuous(breaks = seq(-5,0,1),limits = c(-5,0))+
my_theme
dev.off()
###################################################################
#Small indel
#FigS3D-E
###################################################################
test<-read.table("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Vero_JCR_default/JCR_indel/Polymorphism_Consensuse.Indel.txt",header = F,sep="\t")
colnames(test)<-c("Fraction","Barcode","Barcode_All_Read_pair_number","Covered_reads_number","Covered_readpair_number","Pos","Indel","Alt","Indel_len","Dis","Reads_Pos","Reads","Reads_PCR","non_PCR_number","SNP_read_number")
Filter<-filter(test,Fraction<=0.002,abs(Dis)>=15)
write.csv(Count,"/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Vero_JCR_default/JCR_indel/Indel.csv",row.names = F,quote = F)
Count<-Filter %>% group_by(Pos,Indel,Alt,Indel_len) %>% dplyr::summarise(count=n())
nrow(Count)
ggplot()+
geom_histogram(data=filter(Count,count==1),aes(x=Indel_len),binwidth=1)+
labs(x='Indel length(Insertion:+ \t Deletion:-)')+
my_theme1
#Distance
ggplot()+
geom_histogram(data=Filter,aes(x=Dis),binwidth=1)+
scale_x_continuous(limits = c(-260,170)) +
labs(x='Distance to junction site')+#my_theme1
my_theme2+guides(fill=F)
#read distance
as.vector(test$Reads_Pos)
mid<-as.data.frame(na.omit(as.numeric(unlist(map(test$Reads_Pos,~str_split(.,','))))))
colnames(mid)<-"ReadPosition"
ggplot()+
geom_histogram(data=mid,aes(x=ReadPosition),binwidth=1)+
#scale_x_continuous(limits = c(0,220)) +
labs(x='Read Position')+my_theme
###################################################################
#A549
###################################################################
tmp <- list.files(path = "/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",pattern = "*.UMI.mutation.countPvalue.csv")
tmp2 <- list.files(path = "/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",pattern = "*.mismatch")
Filter<-data.frame()
for (i in 1:3) {
Mismatch<-read.table(paste0("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",tmp2[i]),header = T,stringsAsFactors = F)
colnames(Mismatch)<-c("Pos","Mismatch_Number","Total_Number")
Mutation<-read.csv(paste0("/Dell/Dell13/shankj/projects/Cov/SARS_CoV2/Read2/",tmp[i]),header = T,stringsAsFactors = F)
name=sub(".UMI.mutation.countPvalue.csv","",tmp[i])
#Mutation$Qvalue<-qvalue(Mutation$Pvalue)$qvalue
Mid<-merge(Mutation,Mismatch,by=c("Pos"))
Mid$sample<-rep(name,nrow(Mid))
Filter<-rbind(Filter,Mid)
rm(Mid)
rm(Mutation)
}
#Filter$Qvalue<-qvalue(Filter$Pvalue)$qvalue
head(Filter)
nrow(Filter)
write.csv(Filter,"A549.csv",row.names = F,quote = F)
head(Mid)
#C8782T,T28144C,C18060T
Mid<-filter(Filter,Dis >= 15,
UMI_Alt_no_PCR_reads>=2,
UMI_ref_reads==0,
!Pos %in% c(18060,8782,28144)) #Dis >= 15, Count <=5,UMI_Alt_Fraction==1
write.csv(Mid,"A549_Filter",row.names = F,quote = F)
plot<-as.data.frame(Mid %>% group_by(SNP) %>% dplyr::summarise(count=n()))
sum(plot$count)
binom.test(29,sum(5+12+11+1+4+29+11+10),1/8)
binom.test(65,sum(65+14+5+8),1/4)
pdf("A549_1.pdf",width = 3,height = 3)
plot$SNP<-factor(plot$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
ggplot(data=plot, aes(x=factor(SNP),y=count,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+scale_y_continuous(limits = c(0,80)) +
labs(x='',y='Mutation count')+ my_theme2+guides(fill=F)
dev.off()
# A 8637
# C 5338
# G 5677
# T 9253
fisher.test(matrix(c(29,11,(5677-29),(5338-11)),nrow=2))
#mutation rate
#C 260205+283168+273443
#A 378420+410749+394759
#G 239705+262490+254775
#T 293026+316413+303995
#
fisher.test(matrix(c(29,11,(239705+262490+254775),(260205+283168+273443)),nrow=2))
fisher.test(matrix(c(65,5,(260205+283168+273443),(239705+262490+254775)),nrow=2))
Mutation<-read.table("/Dell/Dell13/shankj/projects/Cov/Plot/20210414/Fig1/A549.txt",header = T,sep="\t")
head(Mutation)
Mutation$SNP<-factor(Mutation$SNP,levels = c("C > T","G > A","A > G","T > C","G > T","C > A","G > C","C > G","A > C","T > G","A > T","T > A"))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/SciAdv_VS_Vero_rate.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=SCV2_count,y=SciAdv,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(0,300)) +scale_x_continuous(limits = c(0,52)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation rate in Vero',y='SARS-CoV-2 mutation from Sci. Adv.') +
my_theme2+guides(col=F,size=F)
dev.off()
cor.test(Mutation$SCV2_count,Mutation$SciAdv,method = "s")
ggplot(data=Mutation,aes(x=A549,y=SARS_CoV2_Polymorphism,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+#scale_y_continuous(limits = c(-6,-4)) +scale_x_continuous(limits = c(-6,-4.5)) +
scale_color_manual(values = Self1)+
labs(x='# of SARS-CoV-2 mutation rate from A549',y='# of SARS-CoV-2 polymorphisms') +
my_theme2+guides(col=F,size=F)
cor.test(log10(Mutation$SCV2_count),log10(Mutation$A549))
ggplot(data=Mutation ,aes(x=SCV2_Rate,y=A549_Rate,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+
scale_color_manual(values = Self1)+
scale_x_continuous(limits = c(0,2.5*10^-5)) +
labs(x='log10(SARS-CoV-2 mutation rate from Vero)',y='log10(SARS-CoV-2 mutation rate from A549)') +
my_theme2+guides(col=F,size=F)
cor.test(log10(Mutation$SCV2_Rate),log10(Mutation$A549_Rate))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/A549_VS_Vero_count.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=SCV2_count,y=A549,col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(0,80)) +scale_x_continuous(limits = c(0,52)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation count from Vero',y='SARS-CoV-2 mutation count from A549') +
my_theme2+guides(col=F,size=F)
dev.off()
cor.test(log10(Mutation$SCV2_count),log10(Mutation$A549))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210519/A549_rate.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation, aes(x=factor(SNP),y=A549_Rate*10^5,fill=SNP)) +
geom_bar(position="dodge", stat="identity",color="black",width = 0.8)+
scale_fill_manual(values = Self1)+#scale_y_continuous(limits = c(0,25)) +
labs(x='',y='Mutation rate (x10^-5)')+
my_theme2+guides(fill=F)
dev.off()
t.test(c(4.22323e-06,1.01358e-05,1.34669e-05,1.22427e-06,1.20425e-05,1.09477e-05), mu = 3.83106e-05)
t.test(c(1.18250e-05,6.60528e-06,8.75816e-06), mu = 7.95773e-05)
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210712/20210716/Fig2D-1.pdf",height = 3,width = 3,useDingbats = F)
ggplot(data=Mutation ,aes(x=log10(SCV2_count),y=log10(SARS_CoV2_Polymorphism),col=SNP,size=3) )+ #y=log10(as.numeric(Error_Fraction)+10^(-11))
geom_point()+scale_y_continuous(limits = c(1,3.5)) +scale_x_continuous(limits = c(0.5,2)) +
scale_color_manual(values = Self1)+
labs(x='SARS-CoV-2 mutation in Vero',y='SARS-CoV-2 mutation polymorphism')# +
#my_theme2+guides(col=F,size=F)
dev.off()
cor.test(log10(Mutation$SCV2_count),log10(Mutation$SARS_CoV2_Polymorphism))
############################################################################################
#Fig1C
############################################################################################
Junction<-as.data.frame(fread("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/Junction_Site_mismatch_consensus_reads.txt",stringsAsFactors = F))
pdf("/Dell/Dell13/shankj/projects/Cov/Plot/20210609/Consensus_Mismatch.pdf",height = 3,width = 3,useDingbats = F)
colnames(Junction)<-c("Dis","Mis","Cover")
ggplot(data=Junction, aes(x=(Dis),y=Mis/Cover)) + #geom_abline(intercept=0.5, slope=0,color="red")+
geom_bar(position="dodge", stat="identity",width = 0.5)+
#theme_classic()+theme(panel.background=element_rect(colour='black'))+
labs(x='Position(Junction site=0) ',y='Mismatch number')+#barplot_theme +
#theme(axis.text.x = element_text(angle = 45, hjust = 0.5, vjust = 0.5))+
scale_x_continuous(breaks = seq(-30,30,15),limits = c(-30,30))+
scale_y_continuous(limits = c(0,0.025))+my_theme2
dev.off()
###############################################################################################
#FigS1C
###############################################################################################
Junction<-read.table("/Dell/Dell13/shankj/projects/Cov/SCV2SJ.out.tab",sep="\t",header = F,stringsAsFactors = F)
head(Junction)
colnames(Junction)<-c("Chr","start","end","strand","intron_motif","annotated","uniquely_mapped","multiple_mapped","Overhang")
#Junction2<-separate(Junction,UMI,into = c('start','end'),sep = '-')[,1:3]
Junction$sum<-Junction$uniquely_mapped+Junction$multiple_mapped
nrow(filter(Junction))
cutoff<-2^5
for (i in c(1:nrow(Junction))){
if (Junction[i,10]> cutoff){
Junction[i,10]<- cutoff
}
}
pdf("FigS1C.pdf",height = 4,width = 5.5)
jpeg("FigS1C.jpg",units = "cm",height = 8, width = 11,res = 300)
jpeg("/Dell/Dell13/shankj/projects/Cov/Plot/20210702/FigS1C_nolegend.jpg",units = "cm",height = 8, width = 8,res = 300)
ggplot() +
geom_point(data=Junction, aes(x=as.numeric(start),y=-as.numeric(end),color=log2(sum)),shape=15,size=0.2)+
geom_point(data=filter(Junction,start==3108,end==28158),aes(x=as.numeric(start),y=-as.numeric(end)),color="red",size=2,shape=23)+
scale_color_continuous(type = "viridis")+my_theme2+
ylab("End")+xlab("Start")+guides(col=F)
dev.off()
mid<-Junction %>% group_by(sum) %>% dplyr::summarise(count=n())
sum(filter(mid,count<=20)$count)
|
ff39a8e8b97cd9f455e02921eedb3b05e6640655 | bc2da1dc6085d7cb83e46da2c86e58ecef2b73a7 | /data/2019-01-22/prisonHolds.R | 7fb5583e266fc8c2463d1d2b285de8f909f71165 | [
"MIT"
] | permissive | tanyaberde/tidytuesday | f2aa130c80e8aa713f1611f3b03cacf5d26385a9 | b6202565f634135b85608eaa0d75708925a71a9a | refs/heads/master | 2020-04-08T14:08:08.591005 | 2019-03-20T23:59:09 | 2019-03-20T23:59:09 | 159,423,426 | 0 | 0 | MIT | 2018-11-28T01:16:13 | 2018-11-28T01:16:13 | null | UTF-8 | R | false | false | 2,076 | r | prisonHolds.R |
require(tidyverse)
require(ggplot2)
# Get the data
url1 <- 'https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-01-22/incarceration_trends.csv'
incarc_dat <- read_csv(url1)
# Add a row ID for later joining if needed
d2 <- incarc_dat %>%
mutate(row_id = row_number())
# Have other_state coded above _state_ or else case_when will mislabel
holds <- d2 %>%
select(yfips:total_pop_15to64, urbanicity:land_area, jail_from_state_prison:jail_from_ice, row_id) %>%
gather(agency, pop_count, jail_from_state_prison:jail_from_ice) %>%
mutate(agency = case_when(str_detect(agency, "other_state_prison") ~ "Out-of-State Prison",
str_detect(agency, "from_state_prison") ~ "State Prison",
str_detect(agency, "other_state_jail") ~ "Out-of-State Jail",
str_detect(agency, "from_state_jail") ~ "State Jail",
str_detect(agency, "_fed") ~ "All Federal Authorities",
str_detect(agency, "_ice") ~ "ICE or INS",
TRUE ~ NA_character_))
holds2 <- holds %>%
mutate(ratio = (pop_count/total_pop_15to64)*100)
# Summary of number of individuals held for other agencies, depending on urbanicity and year
holds_summ <- holds2 %>%
na.omit() %>%
group_by(year, urbanicity, agency) %>%
summarize(average_prop = mean(ratio),
average_total_pop = mean(total_pop_15to64),
average_pop_count = mean(pop_count)) %>%
ungroup()
# Plot
g <- ggplot(holds_summ,
aes(x = year, y = average_prop, color = agency )) +
geom_line(stat="identity",size=1.1
) +
scale_color_brewer(type = "div") +
facet_wrap(~urbanicity) +
labs(x = "Year", y = "Proportion to facility population aged 15-64", color="Agency") +
ggtitle("Prisoners being held for in-state or external authorities, per urbanicity category") +
theme_minimal(base_size = 12)
print(g)
ggsave("holds.png"
,plot = g
,width=9
,height=7)
|
f885371456d91d27f4d497a63db49105410ff6da | 3fc3964396f8010aae9345d37f551c4431c52ff9 | /R/read_label.R | d720dc7c2b301d6993f8214e50eb5c180f5ea632 | [] | no_license | muschellij2/freesurfer | ff96f465ebbfbb0b7ce18644be5f4c5ea753fc45 | 7d70f616e760d8d3a453a652d98756e34877fed7 | refs/heads/master | 2021-06-24T00:57:12.644687 | 2020-12-08T18:41:34 | 2020-12-08T18:41:34 | 67,370,835 | 9 | 8 | null | 2020-11-15T23:42:38 | 2016-09-04T22:12:47 | R | UTF-8 | R | false | false | 1,297 | r | read_label.R | #' @title Read Label File
#' @description Reads an \code{label} file from an individual subject
#'
#' @param file label file from Freesurfer
#'
#' @return \code{data.frame} with 5 columns:
#' \describe{
#' \item{\code{vertex_num}:}{Vertex Number}
#' \item{\code{r_coord}:}{Coordinate in RL direction}
#' \item{\code{a_coord}:}{Coordinate in AP direction}
#' \item{\code{s_coord}:}{Coordinate in SI direction}
#' \item{\code{value}:}{ Value of label (depends on file)}
#' }
#' @export
#'
#' @examples
#' if (have_fs()) {
#' file = file.path(fs_subj_dir(), "bert", "label", "lh.BA1.label")
#' if (!file.exists(file)) {
#' file = file.path(fs_subj_dir(), "bert", "label", "lh.BA1_exvivo.label")
#' }
#' out = read_fs_label(file)
#' }
read_fs_label = function(file) {
header = readLines(con = file)
comment = header[1]
n_lines = as.numeric(header[2])
header = header[-c(1:2)]
if (length(header) != n_lines) {
warning("Number of lines do not match file specification! ")
}
ss = strsplit(header, " ")
ss = lapply(ss, function(x) {
x[ !x %in% ""]
})
ss = do.call("rbind", ss)
colnames(ss) = c("vertex_num", "r_coord", "a_coord", "s_coord", "value")
ss = data.frame(ss, stringsAsFactors = FALSE)
attr(ss, "comment") = comment
return(ss)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.