blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbdee02ea9a516f8fe87182d401e3431926a5208 | f257ca25070d67cb26915f83d1cfe1db0d26f21e | /bootstrap/csquare_lookup.R | eb1b3106925ad1fabf5860d39dd3b93fa920c3b5 | [
"MIT"
] | permissive | ices-taf/2021_2007-36_SpecialRequest | 9433f37c0a0f34d3dc06d904172c8308cb9d91d0 | 7fdb46342acfbb6c0bfe01f735036d906f1dab02 | refs/heads/main | 2023-08-05T19:34:58.711433 | 2021-09-24T11:38:12 | 2021-09-24T11:38:12 | 408,114,517 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 968 | r | csquare_lookup.R | #' Statistical Rectangle lookup table subset to SWE EEZ
#'
#' A list of stat squares and accociated info, such as ICES area
#'
#' @name csquare_lookup
#' @references \url{https://ices.dk}
#' @format a csv file
#' @tafOriginator ICES VMS and Logbook database
#' @tafYear 2020
#' @tafAccess Public
#' @tafSource script
# utility function here:
library(ggplot2)
library(sf)
taf.library(sfdSAR)
# get the ospar regions
load(taf.data.path("shapefiles/ospar.rData"), verbose = TRUE)
st_is_valid(ospar, reason = TRUE)
csquares <- read.taf(taf.data.path("csquare_list/csquares.csv"))
csquares$lat <- sfdSAR::csquare_lat(csquares$csquare)
csquares$lon <- sfdSAR::csquare_lon(csquares$csquare)
csquares <- st_as_sf(csquares, coords = c("lon", "lat"), crs = 4326)
msg("calculating csquares within ospar")
idx <- which(st_within(csquares, ospar, sparse = FALSE))
msg("done calculating")
csquares_ospar <- csquares[idx, ]
save(csquares_ospar, file = "csquares_ospar.rData")
|
fe5d8deeedd31aef3471b9e233e43e3c657ac788 | 579ea905352ed45a672e76081e999cba0c58ebb1 | /scripts/track_description.R | 2d6c53d0061f75de021a608696da10e8344a20d0 | [] | no_license | Fjellrev/SeaWinds | d40a70f3fa2505d894be878acc377d6014e7da90 | 8b2e9d95be7057741a0b8be2f4dc91ce37ebcf74 | refs/heads/main | 2023-06-03T00:38:16.359754 | 2021-06-15T13:28:35 | 2021-06-15T13:28:35 | 342,537,155 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,005 | r | track_description.R | ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##
## SeaWinds - Data processing & Analyses
##
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sapply(c('sf','sp','spData','tidyverse', 'data.table','rgeos','MASS','plyr', 'magrittr', 'gdistance','geosphere','raster', "doParallel", "foreach",
'ggplot2', 'rWind','windR','adehabitatHR',"ggpubr"),
function(x) suppressPackageStartupMessages(require(x , character.only = TRUE, quietly = TRUE)))
### utility functions ----
source("functions/FUNCTION_OverlapPoly.r")
source('functions/FUNCTION_QuickMap.r')
cosd <-function(x) cos(x*pi/180) #cos of an angle in degrees
sind <-function(x) sin(x*pi/180)
bird_path <- "data/Kittiwake_data_treated"
bird_filename <- "BLKI_tracks.rds"
map_path <- "data/baseline_data"
map_filename <- "SEAwinds_Worldmap_res-c.rds"
### Read BLKI data ----
bird_data <- readRDS(paste0(bird_path,'/', bird_filename)) %>% as.data.table
world_map <- st_as_sf(readRDS(paste0(map_path,"/",map_filename)), coords=c("lon","lat"), crs = 4326)
bird_data[, x2 := data.table::shift(x, type = 'lead'), by = ring]
bird_data[, y2 := data.table::shift(y, type = 'lead'), by = ring]
bird_data[, n_seg := length(unique(bird_data$burst[bird_data$ring==ring])), by = 1:nrow(bird_data)]
bird_data[, dist :=distGeo(c(x, y),c(x2, y2)), by = 1:nrow(bird_data)]
bird_data[, length_id := sum(bird_data$dist[bird_data$ring==ring], na.rm=T), by = 1:nrow(bird_data)]
bird_data[, length_burst := sum(bird_data$dist[bird_data$burst==burst], na.rm=T), by = 1:nrow(bird_data)]
for (col in unique(bird_data$colony))
{
cat(paste0(col, "\n"))
#print(summary(bird_data$length_id[bird_data$colony==col]/1000))
#print(sd(bird_data$length_id[bird_data$colony==col], na.rm = T)/1000)
#print(summary(bird_data$n_seg[bird_data$colony==col]))
#print(sd(bird_data$n_seg[bird_data$colony==col], na.rm = T))
print(summary(bird_data$length_burst[bird_data$colony==col]/1000))
print(sd(bird_data$length_burst[bird_data$colony==col], na.rm = T)/1000)
}
|
1e341ad3e98aeedc764707590c5a9489bfeee2b4 | 020bdfef944d184f248cb26cda9f42346003128f | /Analysis-on-OTT-Platforms/After and During Pandemic (Line Graph).R | c4140f8541fbe8bdbb4bd1e21daf241fc38fab57 | [] | no_license | Nemwos/Analysis-on-OTT-Platforms | e523a6552f82cde8be82b564620d60c88c44d37e | 85994e207a4397e7bde86acae7d1a04ea541434e | refs/heads/main | 2023-07-31T13:09:45.857566 | 2021-10-01T08:40:34 | 2021-10-01T08:40:34 | 412,390,726 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 380 | r | After and During Pandemic (Line Graph).R | #####After,During######
("C:\\College\\R Project\\R Case Study")
data<- read.csv("ott data.csv")
a <- table(data$DP)
b <- table(data$AP)
plot(a, type="l", col="Yellow", xlab="Avg Min", ylab = "Frequency")
lines(b, type="l", col="Green")
legend("topright",
legend = c("Now","During Pandemic"),
col = c("Green","Yellow"),
pch = c(4,4)
)
|
d133e5ecd824f93a1fb487fecbdd2994b0fc319b | aee39ba8f18f1d85a9a6a6fcadd71b3f70ef1ae8 | /notes/rank.R | b51a35aa6436587d62ff236ad5942fbdcb503fc3 | [] | no_license | abarciauskas-bgse/computing-lab | 93dd817f4180a8d39633666561619b27e1a6b975 | 15756b0eb56ea4a3ccb839441f74213cc8c87e64 | refs/heads/master | 2016-08-12T08:22:40.696639 | 2015-11-24T14:58:38 | 2015-11-24T14:58:38 | 44,059,076 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | rank.R | (r1 <- rank(x1 <- c(3, 1, 4, 15, 92)))
x2 <- c(3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5)
names(x2) <- letters[1:11]
(r2 <- rank(x2)) # ties are averaged
## rank() is "idempotent": rank(rank(x)) == rank(x) :
stopifnot(rank(r1) == r1, rank(r2) == r2)
## ranks without averaging
rank(x2, ties.method= "first") # first occurrence wins
rank(x2, ties.method= "random") # ties broken at random
rank(x2, ties.method= "random") # and again
## keep ties ties, no average
(rma <- rank(x2, ties.method= "max")) # as used classically
(rmi <- rank(x2, ties.method= "min")) # as in Sports
stopifnot(rma + rmi == round(r2 + r2)) |
dacf845a9ddaef1019d70a3f50b19cbfe98d6d45 | e2f3320198c9244d9fa33bb682386120e8324d54 | /man/match_datasets.Rd | 9111fc38c0771b4069328ecb07279ba63ff7c542 | [] | no_license | leffj/mctoolsr | 495f1d3d87425d52a4fea3632a5d94221643689e | c7c9404ee932a07d4d1768b14895a2174324a118 | refs/heads/master | 2022-09-09T06:02:50.086961 | 2022-08-03T05:26:26 | 2022-08-03T05:26:26 | 32,060,772 | 17 | 8 | null | 2022-08-03T05:26:26 | 2015-03-12T06:24:28 | HTML | UTF-8 | R | false | true | 1,265 | rd | match_datasets.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input_output_filter.R
\name{match_datasets}
\alias{match_datasets}
\title{Match up samples from two datasets}
\usage{
match_datasets(ds1, ds2, match_taxa = FALSE)
}
\arguments{
\item{ds1}{The first dataset as loaded by \code{\link{load_taxa_table}}.}
\item{ds2}{The second dataset.}
\item{match_taxa}{[OPTIONAL] Do you want to match taxa in addition to sample
IDs? If \code{TRUE}, taxa will be removed if they are not in common between
datasets. Default = \code{FALSE}}
}
\value{
A list variable with the matched ds1 as the first element and ds2 as
the second element.
}
\description{
Function to match up sample order and optionally, taxa order
from two datasets that contain some overlapping sample IDs. Sample IDs that
are not present in both datasets will be dropped. The output is a list
containing the two filtered datasets in the same order as they were input.
}
\examples{
# This function would normally be run with two different datasets not the
# same one as in this example. For example if you had bacterial and fungal
# data for the same samples and wanted to compare the two.
match_datasets(fruits_veggies, fruits_veggies)
}
\concept{Taxa table manipulation}
|
a2e544bc35af1a57a2a56c8d656657ef020b5c68 | 8df9e457baa116ad2cce228c307aa3a7d0ed1c84 | /Plot1.R | 0b62b5ddc197f130271299642b8b95a172b71066 | [] | no_license | jackspringman/Exploratory-Data-Analysis-Course-Project-2 | 045b4b6c9e3a8cc2d59c44c78e2cd5d849194262 | ec244d3529f1c65104a8261242a5bf942746de64 | refs/heads/master | 2021-01-21T05:05:14.767135 | 2015-02-22T17:05:43 | 2015-02-22T17:05:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 568 | r | Plot1.R | # Read in RDS files
NEI <- readRDS("R Programming/summarySCC_PM25.rds")
SCC <- readRDS("R Programming/Source_Classification_Code.rds")
# Aggregate PM 2.5 emissions for 1999, 2002, 2005, 2008
TotalPM25ByYear <- aggregate(Emissions ~ year, NEI, sum)
# Plot aggregated emissions as a bar chart to answer whether total US emissions
# have decreased between 1999 and 2008
png('plot1.png')
barplot(height=TotalPM25ByYear$Emissions,
names.arg=TotalPM25ByYear$year, xlab="Year",
ylab="Emissions (tons)", main=expression("Total PM"[2.5]*" Emissions, by Year"))
dev.off() |
b7ee2789392cfcdb1879eff74ecf012f0fe77a5a | d247e779e90fc4d1f69710b15e00a21b8cd9af89 | /plot1.R | 8c92c0ea009deadd31819dc2a6f4cf124c041d99 | [] | no_license | jmiller42/ExData_Plotting1 | 5eaff9cbf807bf769b87f373f68675be2a02fbde | 5a255c0c551aeb9559f214d20653633069f7c615 | refs/heads/master | 2021-01-16T23:00:10.526007 | 2014-08-09T19:18:41 | 2014-08-09T19:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | plot1.R | plot1 <- function() {
##Read in data (assuming it exists in this location)
data <- read.csv("exdata-data-household_power_consumption/household_power_consumption.txt", sep=";")
##Concatanate date and time
data <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
##Convert relevant values to numeric
data$Global_active_power <- as.numeric(as.character(data$Global_active_power))
##Open Graphics Device
png("plot1.png")
##Plot
hist(data$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
##Close Graphics Device
dev.off()
}
|
5caa21ab1aa0aea5b91fabb1ce64ac658bb6fc4f | 037ddd4a706753cf89acad97f504e6863bcc08b6 | /man/tophits.Rd | 5f3ef479e45c235fe97002d3fe60a8f8b219aeed | [
"MIT"
] | permissive | mcgml/ieugwasr | 7ebd65af51508fc6308eae8b5a3243497e046963 | a68d9e5338ef75c0c8b4a4e6a91b17b11c8d3d45 | refs/heads/master | 2021-07-16T16:47:31.560328 | 2021-06-23T16:13:32 | 2021-06-23T16:13:32 | 235,069,948 | 0 | 0 | NOASSERTION | 2020-01-20T09:58:52 | 2020-01-20T09:58:51 | null | UTF-8 | R | false | true | 1,386 | rd | tophits.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{tophits}
\alias{tophits}
\title{Obtain top hits from a GWAS dataset}
\usage{
tophits(
id,
pval = 5e-08,
clump = 1,
r2 = 0.001,
kb = 10000,
pop = "EUR",
force_server = FALSE,
access_token = check_access_token()
)
}
\arguments{
\item{id}{Array of GWAS studies to query. See \code{gwasinfo} for available studies}
\item{pval}{use this p-value threshold. Default = 5e-8}
\item{clump}{whether to clump (1) or not (0). Default = 1}
\item{r2}{use this clumping r2 threshold. Default is very strict, 0.001}
\item{kb}{use this clumping kb window. Default is very strict, 10000}
\item{pop}{Super-population to use as reference panel. Default = "EUR". Options are EUR, SAS, EAS, AFR, AMR}
\item{force_server}{By default will return preclumped hits. p-value threshold 5e-8, with r2 threshold 0.001 and kb threshold 10000, using only SNPs with MAF > 0.01 in the European samples in 1000 genomes. If force_server = TRUE then will recompute using server side LD reference panel.}
\item{access_token}{Google OAuth2 access token. Used to authenticate level of access to data. By default, checks if already authenticated through \code{get_access_token} and if not then does not perform authentication}
}
\value{
Dataframe
}
\description{
By default performs clumping on the server side.
}
|
5ab349f1063125e79c0e518fb0202c474bcfe30a | 09ae0effee47cd56c95e9b1e382a18112eba890f | /plot1.R | 6ebf17db5fed02511766d88bd4d419d454544814 | [] | no_license | Vishwanathkvs/Coursera-exploratory-data-analysis-course-project-2 | 0850bacf85795c8a49d23396943352d2d94495a5 | 7f2e082f03f2d2a265aa1c80eba27c72d6bbf3ed | refs/heads/master | 2021-01-21T14:39:35.415017 | 2017-06-24T20:48:34 | 2017-06-24T20:48:34 | 95,321,917 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 822 | r | plot1.R | #Setting working directory
setwd("~/R/Coursera/exdata%2Fdata%2FNEI_data")
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Using the base plotting system, make a plot showing the total PM2.5 emission from all sources
# for each of the years 1999, 2002, 2005, and 2008.
aggregatedTotalByYear <- aggregate(Emissions ~ year, NEI, sum)
png("plot1.png",width=480,height=480,units="px",bg="transparent")
barplot(
(aggregatedTotalByYear$Emissions)/10^6,
names.arg=aggregatedTotalByYear$year,
xlab="Year",
ylab="PM2.5 Emissions (10^6 Tons)",
main="Total PM2.5 Emissions From All US Sources"
)
dev.off() |
45fc7362337e7afe93f580348bcce3bcfea41e0a | 35b2adcba4aac515dc54a2eeb36bf502de8a8467 | /textanalysis.R | 6a67520b0300e702f2717261c53392ad290526ad | [] | no_license | Sara-Abrams/PathfinderCenter | 43d2dea232a5c73e7c82dcdde8dd723f8d1f85e0 | b6cd3bb9dcfd2bee6ddbaf716fddaa1fab1522d8 | refs/heads/master | 2021-05-04T01:31:21.971034 | 2019-01-25T15:46:33 | 2019-01-25T15:46:33 | 120,356,974 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,385 | r | textanalysis.R | library(dplyr)
library(tidytext)
library (ggplot2)
library(purrr)
library(tm)
library(SnowballC)
library(wordcloud)
library(RColorBrewer)
library(tidyr)
library(igraph)
library(ggraph)
library(topicmodels)
library(tidyr)
data("stop_words")
read_plus <- function(flnm){
read.csv2(file=flnm, stringsAsFactors=FALSE, sep=",", na.strings=c("N/A")) %>%
mutate(filename= flnm)
}
InterviewData <- list.files(pattern="csv") %>% map_df(~read_plus(.))
researcherName <- c("Brandon Dorr", "Chris Frias", "Vasishta Somayaji")
myStopwords <- c('can', 'say','one','way','use',
'also','howev','tell','will',
'much','need','take','tend','even',
'like','particular','rather','said',
'get','well','make','ask','come','end',
'first','two','help','often','may',
'might','see','someth','thing','point',
'post','look','right','now','think',''ve ',
''re ','anoth','put','set','new','good',
'want','sure','kind','larg','yes,','day','etc',
'quit','sinc','attempt','lack','seen','awar',
'littl','ever','moreov','though','found','abl',
'enough','far','earli','away','achiev','draw',
'last','never','brief','bit','entir','brief',
'great','lot', "like", "just", "really", "yes", "lot",
"really", "day", "know", "think", "things", "going",
"realli", "want", "get", "work", "yeah", "something",
"well", "whatever", "whatev", "okay","monday","tuesday",
"wednesday", "thursday", "friday", "saturday", "sunday", "always", "alway",
"stuff", "maybe", "mayb", "got", "'ll", "'re", "'ve", "'ll", "'ll", "'re",
"re", "'ve", "'ve", "week", "high", "feel")
ResponseData <- InterviewData[which(!(InterviewData$speakerName %in% researcherName)), "textContent"]
idNum <- unique(ResponseData$speakerName)]
TopicData <-
for(i in idNum){
TopicData$ID <- i
TopicData$content <-
}
qC <- VCorpus(VectorSource(InterviewData[which(!(InterviewData$speakerName %in% researcherName)), "textContent"]))
qCC <- qC %>% tm_map(., content_transformer(tolower)) %>%
tm_map(., removeNumbers) %>%
tm_map(., removeWords, stopwords("english")) %>%
tm_map(., removePunctuation) %>%
tm_map(., stripWhitespace) %>%
tm_map(., stemDocument) %>%
tm_map(., removeWords, myStopwords )
dtm <- TermDocumentMatrix(qCC)
m <- as.matrix(dtm)
v <-sort(rowSums(m), decreasing = TRUE)
d <- data.frame(word = names(v), freq = v)
head(d, 100)
wordcloud(d$word, d$freq, min.freq = 40)
#relationship between words? Ngram?
#response <- data.frame(text = d$word)
#r_bigram <- response %>% unnest_tokens(output = bigram, input = text, token = "ngrams", n = 2)
#r_bigram_graph <- r_bigram %>% separate(bigram, c("word1", "word2"), sep = " ") %>%
# count(word1, word2, sort = TRUE) %>%
# unite(bigram, c("word1", "word2"), sep = " ") %>% graph_from_data_frame()
#ggraph(r_bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label=name), vjust = 1, hjust = 1)
#trigram
#qB <- data_frame(text=InterviewData[which(!(InterviewData$speakerName %in% researcherName)), "textContent"])
##Topic Modeling?
int_dtm <- DocumentTermMatrix(qCC)
ui <- unique(int_dtm$i)
int_dtm <- int_dtm[ui,]
int_lda <- LDA(int_dtm, k=2, control= list(seed=1234))
int_topics <-tidy(int_lda, matrix="beta")
int_topics
int_topic_terms <- int_topics %>% group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
int_topic_terms %>%
mutate(term=reorder(term, beta)) %>%
ggplot(aes(term, beta, fill=factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap( ~ topic, scales = "free") +
coord_flip ()
beta_spread <- int_topics %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate( log_ratio = log2(topic2/topic1))
beta_spread
|
26a5294a801397a140017cba9d4ddfde97642e96 | af3074731acae1fdea91fc0f4b3a2a33e87b0c1e | /R/_ARCHIVE/CopyOfnwos_response_rate.R | 654f9784f14371af62bc1f8efe2fb5518fd96ce4 | [] | no_license | bbutler01/nwos | ee5eb2b11d5dd38aaaaaa822476cf0d85a11a47b | fd7d86eebaf1aef4699bf68d5a89839fefe48c64 | refs/heads/master | 2021-01-25T12:42:08.163289 | 2019-08-29T18:19:27 | 2019-08-29T18:19:27 | 123,034,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,001 | r | CopyOfnwos_response_rate.R | #' NWOS Response Rate
#'
#' This function calculates response rates for the NWOS.
#' @usage nwos_response_rate(index = NA, data, stratum = "FFO", point.count = "POINT_COUNT", response = "RESPONSE")
#' @param index vector of observations in data to include. If NA, the index is set to the row names of data (i.e., uses all of the rows in data)
#' @param data data frame containing stratum and point.count variables.
#' @param stratum the name of a variable in data indicating inclusion (1) and exclusion (0) in the stratum of interest.
#' @param point.count name of a variable in data indicating the number of sample points associated with each observation.
#' @param response name of the variable in data indicating response (1) and non-response (0).
#' @details
#' This function needs to be run by state.
#' @return
#' Response rate in the stratum.
#' @keywords nwos
#' @export
#' @references
#' Butler, B.J. In review. Weighting for the US Forest Service, National Woodland Owner Survey. U.S. Department of Agriculture, Forest Service, Northern Research Station. Newotwn Square, PA.
#' @examples
#' wi <- wi %>% mutate(FFO = if_else(LAND_USE == 1 & OWN_CD == 45, 1, 0), RESPONSE = if_else(RESPONSE_PROPENSITY >= 0.5, 1, 0))
#' WI_FFO_RR <- nwos_response_rate(data = wi)
nwos_response_rate <- function(index = NA, data, stratum = "FFO", point.count = "POINT_COUNT", response = "RESPONSE") {
if(is.na(index[1])) index <- row.names(data)
if(point.count == 1) x <- data.frame(data[index, stratum], 1, data[index, response])
else x <- data.frame(data[index, c(stratum, point.count, response)]) # Create data frame
names(x) <- c("stratum", "point.count", "response")
n.s <- x %>% filter(stratum %in% c(1), response %in% c(0,1)) %>% summarize(sum(point.count)) # Number of sample points in stratum
n.s.r <- x %>% filter(stratum %in% c(1), response %in% c(1)) %>% summarize(sum(point.count)) # Number of respondent sample points in stratum
as.numeric(n.s.r / n.s) # Calculate response rate
}
|
d51763d591af0a053e6ada7e57c62b14349ca5b6 | 472081e8eeef613e93fbd4ea939fe0142505545d | /man/wfgTransformation.Rd | aa61440824d08d95a901161340bd282f90f81d05 | [
"MIT"
] | permissive | tudob/wfg | 914b78ff0192ca211a7d3790fa1908113929f5d1 | 274e17243ea5ec94604f4f950af82b7141bf1cb1 | refs/heads/master | 2020-05-12T15:12:29.352884 | 2014-10-22T15:46:32 | 2014-10-22T15:46:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 998 | rd | wfgTransformation.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{wfgTransformation}
\alias{wfgTransformation}
\title{wfgTransformation - used by wfgEval to apply a transformation to part of the search-space vector}
\usage{
wfgTransformation(M, current, t_, start.index, apply.length, n, k, params)
}
\arguments{
\item{M}{The number of objectives}
\item{current}{The current transformation function specified by the user}
\item{t_}{the vector}
\item{start.index}{Specifying to which entries to apply the transformation to.}
\item{apply.length}{see start.index}
\item{n}{The search-space dimension.}
\item{k}{The number of position-dependent parameters.}
\item{params}{List of parameters to this shape}
}
\value{
The vector x to which the shape has been applied. Those entries that were changed now have their final objective value, but more shapes can follow for the later indices.
}
\description{
wfgTransformation - used by wfgEval to apply a transformation to part of the search-space vector
}
|
f57b0427dce9ca4a2dcc5bab133ba763c4c7ad02 | 6b119674bd7227f5e9a670a22e2410e41be4ea5d | /2020-09-29 Beyonce TSwift/TswiftLyrics.R | a270fa334664cd019603c62b78ec3ca3776e26c5 | [] | no_license | juliekewing/tidytuesday | ee756a6d7d7962539ba3147c7c968da457a0f42a | 838459f06bdff2e365630464e20bd2f2584735c7 | refs/heads/main | 2023-01-02T08:05:53.026550 | 2020-10-18T04:01:29 | 2020-10-18T04:01:29 | 305,001,705 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,122 | r | TswiftLyrics.R | # Setup
library(tidyverse)
library(tidytext)
# Get the Data
# Read in with tidytuesdayR package
# Install from CRAN via: install.packages("tidytuesdayR")
# This loads the readme and all the datasets for the week of interest
# Either ISO-8601 date or year/week works!
tuesdata <- tidytuesdayR::tt_load('2020-09-29')
##tuesdata <- tidytuesdayR::tt_load(2020, week = 40)
beyonce_lyrics <- tuesdata$beyonce_lyrics
tswift_lyrics <- tuesdata$taylor_swift_lyrics
sales <- tuesdata$sales
charts <- tuesdata$charts
#Data wrangling
#Order albums by release
x <- c("Taylor Swift ", "Fearless", "Speak Now ", "Red", "1989",
"reputation ", "Lover ", "folklore ")
###take the full string of lyrics, separate it into one row per word, remove stop words
tidy_lyrics <- tswift_lyrics %>%
unnest_tokens(word, Lyrics) %>%
anti_join(stop_words, by=c("word"="word"))
##let's see what words changed in usage over time
##
ts_tf_idf <- tidy_lyrics %>%
count(Album, word, sort = TRUE) %>%
bind_tf_idf(word, Album, n) %>%
group_by(word) %>%
mutate(albums_used = n(), total_freq = sum(n)) %>%
ungroup() %>%
filter(albums_used == 8)
distinct_per_album <- tidy_lyrics %>%
count(Album, word, sort = TRUE) %>%
bind_tf_idf(word, Album, n) %>%
group_by(word) %>%
mutate(albums_used = n(), total_freq = sum(n)) %>%
ungroup() %>%
group_by(Album) %>%
mutate(most_distinct_value = max(tf_idf)) %>%
filter(tf_idf == most_distinct_value)
lyrics_album <- ts_tf_idf %>%
mutate(Album = factor(Album, levels = x)) %>%
mutate(album_order = as.integer(Album))
#Calculate term frequency-inverse document frequency
#(h/t to Rosie Baillie for her awesome text analysis!)
ts_tf_idf <- tidy_lyrics %>%
count(Album, word, sort = TRUE) %>%
bind_tf_idf(word, Album, n) %>%
arrange(-tf_idf) %>%
group_by(Album) %>%
top_n(10) %>%
ungroup %>%
mutate(counter = 1) %>%
group_by(Album) %>%
mutate(ticker = cumsum(counter)) %>%
arrange(Album, n) %>%
filter(ticker <= 10) %>%
ungroup
#Order albums by release
x <- c("Taylor Swift ", "Fearless", "Speak Now ", "Red", "1989",
"reputation ", "Lover ", "folklore ")
ts_tf_idf <- ts_tf_idf %>%
mutate(Album = factor(Album, levels = x)) %>%
mutate(album_order = as.integer(Ablum))
, "Fearless", "Speak Now ", "Red", "1989",
"reputation ", "Lover ", "folklore "
" ~ 1 + text_adj,
))
#Set theme
font_family <- 'Century Gothic'
background <- "#0D1821"
text_colour <- "white"
axis_colour <- "#595959"
plot_colour <- "white"
theme_style <- theme(text = element_text(family = font_family ),
rect = element_rect(fill = background),
plot.background = element_rect(fill = background, color = NA),
plot.title = element_markdown(family = font_family, vjust = 3.5, hjust = 0.5, size = 18, colour = text_colour),
plot.subtitle = element_text(family = font_family, vjust = 3, hjust = 0.5, size = 14, colour = text_colour),
plot.caption = element_markdown(family = font_family, hjust = 0.5, size = 10, colour = text_colour),
plot.margin = unit(c(1.5, 1, 1.5, 1), "cm"),
panel.background = element_rect(fill = background, color = NA),
panel.border = element_blank(),
panel.grid.major.y = element_line(colour = axis_colour, size = .1),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.title.x = element_blank(),
axis.text.x = element_text(family = font_family, size = 6, colour= text_colour),
axis.title.y = element_blank(),
axis.text.y = element_text(family = font_family, size = 6, colour= text_colour),
axis.ticks = element_blank(),
axis.line.y = element_line(colour = axis_colour, size = .05),
axis.line.x = element_blank(),
legend.position="none",
strip.text.x = element_text(size = 12, face = 'bold', colour= text_colour),
strip.placement = "outside",
strip.background = element_blank())
theme_set(theme_classic() + theme_style)
#Create colour palette
cols = c("#EAB4EE", "#FDE2AF", "#FCFDD3", "#BEEADE", "#CAE4FC", "#CBC4F3", "#F3B8D0", "#F9CCC7")
#Create variables needed for notes
stem_placement <- 0.06
note_width <- 0.2
note_height <- 0.02
text_height <- 0.03
#Plot data
ggplot(ts_tf_idf) +
geom_segment(aes(x = ticker + stem_placement,
xend = ticker + stem_placement,
y = tf_idf,
yend = tf_idf + note_height),
size = .2,
color=plot_colour) +
geom_curve(aes(x = ticker + stem_placement,
xend = ticker + note_width,
y = tf_idf + note_height,
yend = tf_idf + note_height),
size = .2,
color = plot_colour,
curvature = 0.4) +
geom_hline(aes(yintercept = 0),
colour = axis_colour,
size = .05) +
geom_point(aes(ticker,
y=tf_idf,
colour = Album),
size=2.5) +
geom_text(data = ts_tf_idf,
aes(y = tf_idf + text_height,
x = ticker,
label = paste(ticker, ".", word)),
family = font_family,
size = 2,
hjust = "left",
color = text_colour) +
scale_y_continuous(limits = c(0, 0.08, 0.015),
sec.axis = dup_axis(name = NULL, labels = NULL)) +
scale_x_discrete(labels = word) +
scale_colour_manual(name = "Album", values = cols) +
facet_wrap(~Album, ncol=1, strip.position = "top") +
labs(title = "**Taylor Swift** | Top Ten Words Per Album",
subtitle = "Albums ordered by release",
caption = "<br/><br/>**Data Source:** Rosie Baillie & Dr. Sara Stoudt<br/>**Visualisation:** @JaredBraggins")
|
02291c270133cb44f35529a4cab1876a99f3d61d | c874e55ec73043f6b837601cc58d855d37649e59 | /mlcenzer/stats/src/generic models-Gamma glm 3-FF inverse link.R | fea5ae4146905a3bac867477d954b012dcebd060 | [] | no_license | mlcenzer/SBB-dispersal | 85c54c924b399834a798d700cabf0b2702ae0755 | 1a777370986f83186180552a09149dfba72b96d0 | refs/heads/master | 2022-12-11T10:13:32.416530 | 2022-12-03T16:23:52 | 2022-12-03T16:23:52 | 229,098,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,518 | r | generic models-Gamma glm 3-FF inverse link.R | library(lme4)
#generic three factor analysis with binomial data
m0<-glm(R~1, family=Gamma(link="inverse"), data=data)
m1<-glm(R~A, family=Gamma(link="inverse"), data=data)
m2<-glm(R~B, family=Gamma(link="inverse"), data=data)
m3<-glm(R~C, family=Gamma(link="inverse"), data=data)
m4<-glm(R~A+B, family=Gamma(link="inverse"), data=data)
m5<-glm(R~A+C, family=Gamma(link="inverse"), data=data)
m6<-glm(R~B+C, family=Gamma(link="inverse"), data=data)
m7<-glm(R~A+B+C, family=Gamma(link="inverse"), data=data)
m8<-glm(R~A*B, family=Gamma(link="inverse"), data=data)
m9<-glm(R~A*C, family=Gamma(link="inverse"), data=data)
m10<-glm(R~B*C, family=Gamma(link="inverse"), data=data)
m11<-glm(R~A*B + C, family=Gamma(link="inverse"), data=data)
m12<-glm(R~A*C + B, family=Gamma(link="inverse"), data=data)
m13<-glm(R~B*C + A, family=Gamma(link="inverse"), data=data)
m14<-glm(R~A*B + A*C, family=Gamma(link="inverse"), data=data)
m15<-glm(R~A*B + B*C, family=Gamma(link="inverse"), data=data)
m16<-glm(R~A*C + B*C, family=Gamma(link="inverse"), data=data)
m17<-glm(R~A*B + A*C + B*C, family=Gamma(link="inverse"), data=data)
#m18<-glm(R~A*B*C, family=Gamma(link="inverse"), data=data)
#identify top models using AIC
summary<-AIC(m1,m2,m3,m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15, m16, m17, m0)
sort(summary$AIC, index.return=TRUE)
#Run AICprobabilities in generic models folder
P<-AICprobs(summary$AIC)
sort(P, index.return=TRUE, decreasing=TRUE) #remember, we want the largest one of these
|
77731a962ed8335eb38a7bc3b48338768f1aba14 | d69b50072a174ba4f0cf7818bc1063decb85dab9 | /Walmart_Sales_in_Stormy_Weather/R_Script/Hybrid_Model.R | b73892ad3df49c01e2dbc915fa0e9bccb12de658 | [] | no_license | chriszeng8/Kaggle | be80c02f2de6579a6e69a5fb683a9c2fb2532240 | 3f15483cd3d28de6fe46d45aae8c0bfdb295cf2d | refs/heads/master | 2021-01-23T12:07:06.908784 | 2015-05-20T07:31:51 | 2015-05-20T07:31:51 | 30,375,080 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,646 | r | Hybrid_Model.R | setwd("/Users/emmama/Google Drive/Data Mining Project/Data")
# By Ling Cong Ma
#==============================================================================
# Import Data
#==============================================================================
weather<-read.csv("/Users/emmama/Google Drive/Data Mining Project/ProcessedData/processed_weather.csv")
training<-read.csv("joint_trainwithkey.csv")
test<-read.csv("joint_testwithkey.csv")
key<- read.csv("key.csv")
training<-training[order(training$date),]
#==============================================================================
# Initiation
#==============================================================================
#sales<-matrix(rep(0),nrow=111,ncol=45)
RMSE<-matrix(rep(0),nrow=111,ncol=45)
RMSE_PCA<-matrix(rep(0),nrow=111,ncol=45)
RMSE_L1<-matrix(rep(0),nrow=111,ncol=45)
RMSE_Step<-matrix(rep(0),nrow=111,ncol=45)
lmRMSE<-matrix(rep(0),nrow=111,ncol=45)
lmRMSE_PCA<-matrix(rep(0),nrow=111,ncol=45)
lmRMSE_L1<-matrix(rep(0),nrow=111,ncol=45)
lmRMSE_Step<-matrix(rep(0),nrow=111,ncol=45)
ListIndex<-1
ModelList<-list()
ModelList_L1<-list()
lmModelList<-list()
lmModelList_L1<-list()
# Lots of items in stores have no sales in history at all
# Therefore we already have matrix "sales" only recording the #item&#store has sales
for (m in 2:max(sales)) {
index<-which(sales==m,arr.ind=TRUE)
storeNo <-index[2]
stationNo <- key[key$store_nbr==storeNo,2]
itemNo <- index[1]
i<-itemNo
j<-storeNo
print(paste("hello:", itemNo, storeNo))
train_sales <- subset(training,(training$store_nbr == storeNo)&(training$item_nbr == itemNo))
train_weather <- weather[(weather$date %in% train_sales$date)&(weather$station_nbr==stationNo),c(-1,-2,-3,-6)]
train<-cbind(units=train_sales$units,train_weather)
index_val<-sample(1:nrow(train), 0.3*round(nrow(train)),replace=F)
validation_fold<-train[index_val,]
train_fold<-train[-index_val,]
y_true<-validation_fold[,1]
y_train<-train_fold[,1]
#==============================================================================
# PCA
#==============================================================================
for (modeltype in 1:2){
RMSE_PCAselect<-c()
pca = prcomp(train_fold[,c(-1)],center=T)
validation_fold_pca<- as.matrix(validation_fold[,-1]) %*% as.matrix(pca$rotation)
train_fold_pca<-as.matrix(train_fold[,-1])%*%as.matrix(pca$rotation)
# Using validation fold to get the optimal column amount to be reserved
for (n_col in (1:20)){
pca_select<-as.data.frame(subset(train_fold_pca,select=c(1:n_col)))
#Poisson Regression
if(modeltype==1){
selected_model<-glm(unlist(y_train)~.,data=pca_select,family=poisson())
y_hat<-round(exp(predict(selected_model, newdata = as.data.frame(validation_fold_pca))),0)
}
#Linear Regression
if(modeltype==2){
selected_model<-lm(unlist(y_train)~.,data=pca_select)
y_hat<-round(predict(selected_model, newdata = as.data.frame(validation_fold_pca)),0)
y_hat<-as.matrix(y_hat)
y_hat[y_hat<0,1]<-0
}
RMSE_PCAselect <- c(RMSE_PCAselect,sqrt(sum((log(y_hat+1)-log(y_true+1))^2)/length(y_hat)))
}
if(modeltype==1){
RMSE_PCA[i,j]<-min(RMSE_PCAselect)
}
if(modeltype==2){
lmRMSE_PCA[i,j]<-min(RMSE_PCAselect)
}
}
#==============================================================================
# L1 Regulization
#==============================================================================
for (modeltype in 1:2){
#install.packages("ISLR")
#install.packages(c('glmnet',"leaps","ISLR"))
#library('glmnet')
train_fold_L1<-as.matrix(train_fold[,c(-1)])
validation_fold_L1<-validation_fold[,-1]
y_train_L1<-as.matrix(y_train)
#Poisson Regression
if(modeltype==1){
selected_model<-cv.glmnet(train_fold_L1,y_train,family="poisson",standardize=T,nfold=10)
y_hat<-round(predict(selected_model,as.matrix(validation_fold_L1),s=0,type="response"),0)
RMSE_L1[i,j] <- sqrt(sum((log(y_hat+1)-log(y_true+1))^2)/length(y_hat))
#save the model for test using
ModelList_L1[[length(ModelList_L1)+1]] <- list(selected_model)
}
#Linear Regression
if(modeltype==2){
selected_model<-cv.glmnet(train_fold_L1,y_train,family="gaussian",standardize=T,nfold=10)
y_hat<-round(predict(selected_model,as.matrix(validation_fold_L1),s=0,type="response"),0)
y_hat<-as.matrix(y_hat)
y_hat[y_hat<0,1]<-0
lmRMSE_L1[i,j] <- sqrt(sum((log(y_hat+1)-log(y_true+1))^2)/length(y_hat))
#save the model for test using
lmModelList_L1[[length(lmModelList_L1)+1]] <- list(selected_model)
}
}
#==============================================================================
# Step Forward
#==============================================================================
for (modeltype in 1:2){
#Poisson Regression
if(modeltype==1){
min.model <-glm(units~1, data=train_fold,family=poisson())
biggest <- formula(glm(units~., data=train_fold,family=poisson()))
poisson_model <- step(min.model, direction='forward', scope=biggest)
selected_model<-glm(poisson_model$formula,data=train_fold,family=poisson())
y_hat<-round(exp(predict(selected_model, newdata = validation_fold[,c(-1)])),0)
RMSE_Step[i,j] <- sqrt(sum((log(y_hat+1)-log(y_true+1))^2)/length(y_hat))
#save the model for test using
ModelList[[length(ModelList)+1]] <- list(selected_model)
}
#Linear Regression
if(modeltype==2){
min.model <-lm(units~1, data=train_fold)
biggest <- formula(lm(units~., data=train_fold))
poisson_model <- step(min.model, direction='forward', scope=biggest)
selected_model<-lm(poisson_model,data=train_fold)
y_hat<-round(predict(selected_model, newdata = validation_fold[,c(-1)]),0)
y_hat<-as.matrix(y_hat)
y_hat[y_hat<0,1]<-0
lmRMSE_Step[i,j] <- sqrt(sum((log(y_hat+1)-log(y_true+1))^2)/length(y_hat))
#save the model for test using
lmModelList[[length(lmModelList)+1]] <- list(selected_model)
}
}
ListIndex=ListIndex+1
RMSE[i,j]<-min(RMSE_Step[i,j],RMSE_L1[i,j],RMSE_PCA[i,j])
lmRMSE[i,j]<-min(lmRMSE_Step[i,j],lmRMSE_L1[i,j],lmRMSE_PCA[i,j])
}
#==============================================================================
# Save the intermediate result
#==============================================================================
save(ModelList,file="ModelList.RData")
save(ModelList_L1,file="ModelList_L1.RData")
write.csv(sales,file="sales.csv")
write.csv(RMSE,file="RMSE.csv")
write.csv(RMSE_PCA,file="RMSE_PCA.csv")
write.csv(RMSE_L1,file="RMSE_L1.csv")
write.csv(RMSE_Step,file="RMSE_Step.csv")
save(lmModelList,file="lmModelList.RData")
save(lmModelList_L1,file="lmModelList_L1.RData")
#write.csv(sales,file="lmsales.csv")
write.csv(lmRMSE,file="lmRMSE.csv")
write.csv(lmRMSE_PCA,file="lmRMSE_PCA.csv")
write.csv(lmRMSE_L1,file="lmRMSE_L1.csv")
write.csv(lmRMSE_Step,file="lmRMSE_Step.csv")
#====================================================================
# test
#====================================================================
#test
test<-test[order(test$date),]
test_weather <- weather[(weather$date %in% test$date),]
test$units<-0
#test$units_poPCA<-0
#test$units_poL1<-0
#test$units_poStep<-0
#test$units_lmPCA<-0
#test$units_lmL1<-0
#test$units_lmStep<-0
#test$units_avg<-0
for (m in 2:max(sales)) {
index<-which(sales==m,arr.ind=TRUE)
item_n<-index[1]
store_n<-index[2]
stationNo <- key[key$store_nbr==store_n,2]
subsetTestIndex<-(test$item_nbr==item_n)&(test$store_nbr==store_n)
subsetWeatherIndex<-(test_weather$station_nbr==stationNo)&(test_weather$date %in% test[subsetTestIndex,]$date)
ModelListIndex<-sales[item_n,store_n]-1
ModelListIndex_L1<-sales[item_n,store_n]-1
train_sales <- subset(training,(training$store_nbr == store_n)&(training$item_nbr == item_n))
train_weather <- weather[(weather$date %in% train_sales$date)&(weather$station_nbr==stationNo),c(-1,-2,-3,-6)]
train<-cbind(units=train_sales$units,train_weather)
index_val<-sample(1:nrow(train), 0.3*round(nrow(train)),replace=F)
validation_fold<-train[index_val,]
train_fold<-train[-index_val,]
y_true<-validation_fold[,1]
y_train<-train_fold[,1]
#Select the best model among (Poisson,Linear) *(PCA,L1,SteoForward)
min_RMSE<-c(RMSE_PCA[item_n,store_n],RMSE_L1[item_n,store_n],RMSE_Step[item_n,store_n],lmRMSE_PCA[item_n,store_n],lmRMSE_L1[item_n,store_n],lmRMSE_Step[item_n,store_n])
F_Model<-which.min(min_RMSE)
if(nrow(test[subsetTestIndex,])>0){
#==============================================================================
# Poisson_PCA
#==============================================================================
#F_Model=1
if(F_Model==1){
RMSE_PCAselect<-c()
pca = prcomp(train_fold[,c(-1)],center=T)
validation_fold_pca<- as.matrix(validation_fold[,-1]) %*% as.matrix(pca$rotation)
train_fold_pca<-as.matrix(train_fold[,-1])%*%as.matrix(pca$rotation)
for (n_col in (1:15)){
pca_select<-as.data.frame(subset(train_fold_pca,select=c(1:n_col)))
selected_model<-glm(unlist(y_train)~.,data=pca_select,family=poisson())
y_hat<-round(exp(predict(selected_model, newdata = as.data.frame(validation_fold_pca))),0)
RMSE_PCAselect <- c(RMSE_PCAselect,sqrt(sum((log(y_hat+1)-log(y_true+1))^2)/length(y_hat)))
}
lmRMSE_PCA[i,j]<-min(RMSE_PCAselect)
optimal_col<-which.min(RMSE_PCAselect)
pca_select<-as.data.frame(subset(train_fold_pca,select=c(1:optimal_col)))
selected_model<-glm(unlist(y_train)~.,data=pca_select,family=poisson())
test_pca<- as.matrix(test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]) %*% as.matrix(pca$rotation)
#test_pca_select<-as.data.frame(test_pca,select=c(1:optimal_col))
test[subsetTestIndex,]$units<-round(exp(predict(selected_model, newdata = as.data.frame(test_pca))),0)
#test[subsetTestIndex,]$units_poPCA<-round(exp(predict(selected_model, newdata = as.data.frame(test_pca))),0)
}
#==============================================================================
# Poisson_L1
#==============================================================================
#F_Model=2
if(F_Model==2){
test[subsetTestIndex,]$units<-round(predict(ModelList_L1[[ModelListIndex]][[1]],as.matrix(test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]),s="lambda.min",type="response"),0)
#test[subsetTestIndex,]$units_poL1<-round(predict(ModelList_L1[[ModelListIndex]][[1]],as.matrix(test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]),s="lambda.min",type="response"),0)
}
#==============================================================================
# Poisson_Step
#==============================================================================
#F_Model=3
if(F_Model==3){
test[subsetTestIndex,]$units<-round(exp(predict(ModelList[[ModelListIndex]][[1]],newdata=test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)])),0)
#test[subsetTestIndex,]$units_poStep<-round(exp(predict(ModelList[[ModelListIndex]][[1]],newdata=test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)])),0)
}
#==============================================================================
# Linear_PCA
#==============================================================================
#F_Model=4
if(F_Model==4){
RMSE_PCAselect<-c()
pca = prcomp(train_fold[,c(-1)],center=T)
validation_fold_pca<- as.matrix(validation_fold[,-1]) %*% as.matrix(pca$rotation)
train_fold_pca<-as.matrix(train_fold[,-1])%*%as.matrix(pca$rotation)
for (n_col in (1:15)){
pca_select<-as.data.frame(subset(train_fold_pca,select=c(1:n_col)))
selected_model<-lm(unlist(y_train)~.,data=pca_select)
y_hat<-round(predict(selected_model, newdata = as.data.frame(validation_fold_pca)),0)
y_hat<-as.matrix(y_hat)
y_hat[y_hat<0,1]<-0
RMSE_PCAselect <- c(RMSE_PCAselect,sqrt(sum((log(y_hat+1)-log(y_true+1))^2)/length(y_hat)))
}
lmRMSE_PCA[i,j]<-min(RMSE_PCAselect)
optimal_col<-which.min(RMSE_PCAselect)
pca_select<-as.data.frame(subset(train_fold_pca,select=c(1:optimal_col)))
selected_model<-lm(unlist(y_train)~.,data=pca_select)
test_pca<- as.matrix(test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]) %*% as.matrix(pca$rotation)
test[subsetTestIndex,]$units<-round(predict(selected_model, newdata = as.data.frame(test_pca)),0)
#test[subsetTestIndex,]$units_lmPCA<-round(predict(selected_model, newdata = as.data.frame(test_pca)),0)
}
#==============================================================================
# Linear_L1
#==============================================================================
#F_Model=5
if(F_Model==5){
test[subsetTestIndex,]$units<-round(predict(lmModelList_L1[[ModelListIndex]][[1]],as.matrix(test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]),s="lambda.min",type="response"),0)
#test[subsetTestIndex,]$units_lmL1<-round(predict(lmModelList_L1[[ModelListIndex]][[1]],as.matrix(test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]),s="lambda.min",type="response"),0)
}
#==============================================================================
# Linear_StepForward
#==============================================================================
#F_Model=6
if(F_Model==6){
test[subsetTestIndex,]$units<-round(predict(lmModelList[[ModelListIndex]][[1]],newdata=test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]),0)
#test[subsetTestIndex,]$units_lmStep<-round(predict(lmModelList[[ModelListIndex]][[1]],newdata=test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]),0)
}
print(m)
}
}
test[test$units<0,6]<-0
#test[test$units_poPCA<0,7]<-0
#test[test$units_poL1<0,8]<-0
#test[test$units_poStep<0,9]<-0
#test[test$units_lmPCA<0,10]<-0
#test[test$units_lmL1<0,11]<-0
#test[test$units_lmStep<0,12]<-0
#test$units_avg<-round((test$units_poPCA+test$units_poL1+test$units_poStep+test$units_lmPCA+test$units_lmL1+test$units_lmStep)/6,0)
#test<-test[,-13]
write.csv(test,"test_bestmodel_F.csv")
test[subsetTestIndex,]$units<-round(exp(predict(ModelList[[ModelListIndex]][[1]],newdata=test_weather[subsetWeatherIndex,])),0)
test[subsetTestIndex,]$units<-round(exp(predict(ModelList_L1[[ModelListIndex_L1]][[1]],as.matrix(test_weather[subsetWeatherIndex,c(-1,-2,-3,-6)]),s="lambda.min",type="response")),0)
test[subsetTestIndex,]$units<-round(exp(predict(lmModelList_L1[[ModelListIndex]][[1]],newdata=test_weather[subsetWeatherIndex,])),0)
#text_1<-test[test$store_nbr==1,]
|
c26112a5d7eb7dce5ff543aaf0fc45dfa08749e0 | 933f7e96ff913739b22053ba81651797178ecc0a | /man/inv.f5pl.Rd | 7ef46f3c778b1e78cad4bc214d32754c0933f108 | [] | no_license | cran/ELISAtools | 8ffafdb930fe47c5c1cd619cb54644ee9f1210d6 | 67e6926484f17d9fde53237659415fa11398f2c4 | refs/heads/master | 2021-06-19T08:26:43.601807 | 2021-01-21T14:40:05 | 2021-01-21T14:40:05 | 174,589,176 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 517 | rd | inv.f5pl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Regression.R
\name{inv.f5pl}
\alias{inv.f5pl}
\title{The inverse of the 5-parameter logistic function}
\usage{
inv.f5pl(pars, y)
}
\arguments{
\item{pars}{the parameters of the function. It has the following content: [a, d, xmid, scale, g].}
\item{y}{the value to be reverse calculated.}
}
\value{
the value of the reverse function
}
\description{
The inverse function of the 5pl. Set the value of g to be 1, if the 4pl
is of interest.
}
|
1d5d219e421232026d130fc75e8f479c7c8f91ca | 3bfe4f88a0190b3dca045b8388cb6080a1cacb44 | /liftcode.R | ec42ece8bb7333fc35b0b6f32e4356039ae2e710 | [] | no_license | jakempker/lift | 0a93a380697e510af303f821ad8b2d9ed7330847 | 8fb13f05230a9185c4ca0d16438b47d510017c4d | refs/heads/master | 2021-01-13T10:18:08.570207 | 2016-08-25T00:40:22 | 2016-08-25T00:40:22 | 66,485,458 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,739 | r | liftcode.R | setwd("C:/Users/jkempke/Box Sync/Coursera/lift")
download.file(url="https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv",
destfile = "./pml-training.csv")
download.file(url="https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv",
destfile = "./pml-testing.csv")
library(caret)
library(dplyr)
training<-read.csv("./pml-training.csv")
testing<-read.csv("./pml-testing.csv")
#One of the first things I noticed about the test set is that there are many columns
#with all data missing. There seems to be no point building a model to apply to
#no data so I am going to remove these varaibles.
#create a logical which =TRUE if data is missing for all observations of variable
missing<-sapply(testing, function(x)all(is.na(x)))
#get the column names where all data are missing, using above logical and save to
#character vector
MissingVariables <- colnames(testing[missing==TRUE])
#select only variables that have data using the "one_of" helper of dplyr::select
testing <- select(testing, -one_of(MissingVariables))
#apply this to training set so that we are only working with variables that we will
#encounter in the true test set.
training <- select(training,-one_of(MissingVariables))
#I am dropping time and identifiers since I am not sure how to use them inteligently
#to incorporate them as predictors.
training <- select(training,
-X,
- user_name,
-raw_timestamp_part_1,
-raw_timestamp_part_2,
-cvtd_timestamp,
-new_window,
-num_window)
testing <- select(testing,
-X,
- user_name,
-raw_timestamp_part_1,
-raw_timestamp_part_2,
-cvtd_timestamp,
-new_window,
-num_window)
#Create a data partition of our set for training and testing.
inTrain <- createDataPartition(y=training$classe, p=0.75, list=FALSE)
train <- training[inTrain,]
test <- training[-inTrain,]
rpart.model <- train(classe ~ ., data=train, method='rpart')
rf.model <- train(classe ~ ., data=train, method='rf') #random forest
gbm.model <- train(classe ~ ., data=train, method='gbm')#boosting
lda.model <- train(classe ~ ., data=train, method='lda')#linear discriminant analysis
rpart.predict <- predict(rpart.model, test)
#rf.predict <- predict(rf.model, test) #tried this but took too long to run!
gbm.predict <- predict(gbm.model, test)
lda.predict <- predict(lda.model, test)
confusionMatrix(rpart.predict, test$classe)
confusionMatrix(gbm.predict, test$classe)
confusionMatrix(lda.predict, test$classe)
confusionMatrix(rpart.predict, test$classe)$overall["Accuracy"]
#confusionMatrix(rf.predict, test$classe)$overall["Accuracy"]
confusionMatrix(gbm.predict, test$classe)$overall["Accuracy"]
confusionMatrix(lda.predict, test$classe)$overall["Accuracy"]
library(knitr)
knit2html("lift.Rmd")
|
6fc9c60e653b99e4ae5b76dc95468cf5a72550e7 | 7193be8e52b2095d8b6472f3ef2104843dca339e | /Project4-MachineLearning/Datasaurus/Nick/TryKNN.R | e355783c799f514ad11c564a83ec253fc6e71e50 | [] | no_license | vuchau/bootcamp007_project | 4b0f37f49a1163ea6d8ee4143a5dcfdef0d352dd | ffbd0f961b18510fc72fd49770187ec1b4b013ae | refs/heads/master | 2020-03-20T13:19:58.812791 | 2017-06-08T05:13:24 | 2017-06-08T05:13:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,845 | r | TryKNN.R | #rm(list = ls())
library(class)
library(qdapRegex)
library(kknn) #Load the weighted knn library.
library(VIM) #For the visualization and imputation of missing values.
library(ggplot2)
library(stringr)
library(Hmisc)
library(stringi)
library(dplyr)
library(plyr)
library(foreach)
library(parallel)
library(doParallel)
cores.Number = max(1,detectCores(all.tests = FALSE, logical = TRUE)-1)
cl <- makeCluster(2)
registerDoParallel(cl, cores=cores.Number)
if (dir.exists('/Users/nicktalavera/Coding/NYC_Data_Science_Academy/Projects/Allstate-Kaggle---Team-Datasaurus-Rex')) {
setwd('/Users/nicktalavera/Coding/NYC_Data_Science_Academy/Projects/Allstate-Kaggle---Team-Datasaurus-Rex')
} else if (dir.exists("~/Allstate-Kaggle---Team-Datasaurus-Rex")) {
setwd("~/Allstate-Kaggle---Team-Datasaurus-Rex")
}
dataFolder = './Data/'
if (!exists("testData")) {
testData = read.csv(paste0(dataFolder,'test.csv'))
}
if (!exists("trainData")) {
trainData = read.csv(paste0(dataFolder,'train.csv'))
}
trainData_cat <- cbind(trainData[,2:117])
trainData_catNoStates = select(trainData_cat, -cat112)
trainData_num <- cbind(trainData[,118:ncol(trainData)])
# head(trainData_num)
# trainData_num.describe()
# trainData[, grepl("cont", names(trainData))]
# head(trainData)
testData$loss = NA
chiSquareAllColumns = function(data){
#chiResults = data.frame(matrix(ncol = ncol(data), nrow = ncol(data)))
chiResults = foreach (i = 1:length(data), .combine=cbind) %dopar% {
columnName = colnames(data)[i]
print(paste("Column Name:", columnName))
return(columnName = apply(data, 2 , function(i) chisq.test(table(data[, columnName], i ))$p.value))
}
chiResults[chiResults > 0.05] = Inf
names(chiResults) = colnames(data)
return(chiResults)
}
a = data.frame(chiSquareAllColumns(trainData_cat))
a = data.frame(a)
head(a)
stopCluster(cl) |
2f8162152103d155f6176d82dec577fef4fdfd76 | 58235e8e6c953f628f25da903764432fe19feafa | /inst/examples/04-selected/server.R | ec6a33c538faa1bc8f1a68e88f20b3fc879c1161 | [
"MIT"
] | permissive | timelyportfolio/shinyTree | 6be833fb7e932ec42aa04e6c4dc08009a4438d9e | 52996823084827279a07361820c2d7edb6f9bfb1 | refs/heads/master | 2023-07-20T13:45:11.414436 | 2023-07-15T16:08:48 | 2023-07-15T16:08:48 | 124,381,230 | 2 | 0 | MIT | 2018-03-08T11:21:47 | 2018-03-08T11:21:47 | null | UTF-8 | R | false | false | 649 | r | server.R | library(shiny)
library(shinyTree)
#' Define server logic required to generate a simple tree
#' @author Jeff Allen \email{jeff@@trestletech.com}
shinyServer(function(input, output, session) {
log <- c(paste0(Sys.time(), ": Interact with the tree to see the logs here..."))
output$tree <- renderTree({
list(
root1 = structure("123"),
root2 = list(
SubListA = list(leaf1 = "", leaf2 = "", leaf3=""),
SubListB = list(leafA = "", leafB = "")
)
)
})
output$selTxt <- renderText({
tree <- input$tree
if (is.null(tree)){
"None"
} else{
unlist(get_selected(tree))
}
})
}) |
3b42fcc25ca4b123e6bd8f375eed785774a10b54 | 3aa9c5d5c41eb2f47f48cc057b89b9bf1d2473cf | /bigdata-R-Azure/fundamentals/07-Matrizes.R | 322b292cc05a233766bea780fdd482c67588037b | [
"MIT"
] | permissive | RaniereRamos/data-science | dc124ec94889903f27fd5a344483486b61dd924c | a9535ac09c2f8f07faecee421be76816f85c2760 | refs/heads/master | 2022-11-05T03:56:49.182063 | 2020-06-23T14:14:07 | 2020-06-23T14:14:07 | 272,301,444 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,373 | r | 07-Matrizes.R | # Matrizes, operacoes com matrizes e matrizes nomeadas
# Obs: caso tenha problemas com a acentuacao, consulte este link:
# http://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
# Configurando o diretoria de trabalho
# Coloque entre aspas o diretorio de trabalho que voce esta usando no seu computador
# Nao use diretorios com espaco no nome
setwd("C:/Raniere/DSA/FCD/BigDataRAzure/Cap02")
getwd()
# Criando matrizes
# Numeros de linhas
matrix(c(1,2,3,4,5,6), nr = 2)
matrix(c(1,2,3,4,5,6), nr = 3)
matrix(c(1,2,3,4,5,6), nr = 6)
# Numero de colunas
matrix(c(1,2,3,4,5,6), nc = 2)
matrix(c(1,2,3,4), nc = 2)
# Help
?matrix
# Matrizes precisam ter um numero de elementos que seja multiplo do numero de linhas
matrix(c(1,2,3,4,5), nc = 2)
#Criando matrizes a partir de vetores e preenchendo a partir das linhas
meus_dados = c(1:10)
matrix(data = meus_dados, nrow = 5, ncol = 2, byrow = TRUE)
matrix(data = meus_dados, nrow = 5, ncol = 2)
# Fatiando a matriz
mat <- matrix(c(2,3,4,5), nrow = 2)
mat
mat[1,2]
mat[2,2]
mat[1,3]
mat[ ,2]
# Criando uma matriz diagonal
matriz = 1:3
matriz
diag(matriz)
# Extraindo o vetor de uma matriz diagonal
vetor = diag(matriz)
diag(vetor)
# Transposta da matriz
W <- matrix(c(2,4,8,12), nrow = 2, ncol = 2)
W
t(W)
U <- t(W)
U
# Obtendo uma matriz inversa
solve(W)
# Multiplicacao de matrizes
mat1 <- matrix(c(2,3,4,5), nrow = 2)
mat1
mat2 <- matrix(c(6,7,8,9), nrow = 2)
mat2
mat1 * mat2
mat1 / mat2
mat1 + mat2
mat1 - mat2
# Multiplicando matriz com vetor
x = c(1:4)
x
y <- matrix(c(2,3,4,5), nrow = 2)
x * y
# Nomeado a matriz
mat3 <- matrix(c("Terra", "Marte", "Saturno","Netuno"), nrow = 2)
mat3
dimnames(mat3) = list(c("Linha1", "Linha2"), c("Coluna1", "Coluna2"))
mat3
# Identificando linhas e colunas no momento da criacao da matriz
matrix(c(1,2,3,4), nrow = 2, ncol = 2, dimnames = list(c("Linha1", "Linha2"), c("Coluna1", "Coluna2")))
# Combinando matrizes
mat4 <- matrix(c(2,3,4,5), nrow = 2)
mat4
mat5 <- matrix(c(6,7,8,9), nrow = 2)
mat5
cbind(mat4, mat5) # Liga as matrizes por coluna
rbind(mat4, mat5) # Liga as matrizes por linha
# Descontruindo uma matriz
c(mat4)
|
85edba4968a86bb84985841fe164292d21375eac | 8397af2a24361e014d345c9c801ce019cdfabfb8 | /plot4.R | 53a3a80bf2b98a6e86f55d666d46f37eb4b14fe1 | [] | no_license | lukaszoleksy/ExData_Plotting1 | bcc2056895d4484fb6c78d9b3c72a8963da27abc | 99abbc7975b90ec5873aff818dd5976312f76320 | refs/heads/master | 2021-01-15T08:35:58.942448 | 2016-09-07T22:00:27 | 2016-09-07T22:00:27 | 67,646,579 | 0 | 0 | null | 2016-09-07T21:57:35 | 2016-09-07T21:57:34 | null | UTF-8 | R | false | false | 1,180 | r | plot4.R | startReadingFrom<-66637
nRowsToUse <- 2880
colNames <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
data <- read.table("c:/temp/household_power_consumption.txt", header = FALSE, na.strings = "?", sep=";", skip=startReadingFrom, nrows=nRowsToUse, col.names = colNames)
datetime<- paste(data$Date, data$Time)
datetime<-strptime(datetime, "%d/%m/%Y %H:%M:%S")
Sys.setlocale("LC_TIME", "English")
png(file="plot4.png")
par(mfrow=c(2,2))
plot(dateTime,data$Global_active_power, type = "l", xlab="", ylab="Global Active Power")
plot(dateTime,data$Voltage, type = "l", xlab="datetime", ylab="Voltage")
plot(datetime,data$Sub_metering_1, type = "l", xlab="", ylab="Energy sub metering", col="Black")
lines(datetime,data$Sub_metering_2, col="Red")
lines(datetime,data$Sub_metering_3, col="Blue")
legend("topright",c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col=c("Black","Red","Blue"))
plot(dateTime,data$Global_reactive_power, type = "l", xlab="datetime", ylab="Global_reactive_power", ylim=c(0.0,0.5), tck=0.1)
dev.off()
|
db222c6aa42efdf106d51e6728777c58ed07319a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/mvmeta/examples/summary.mvmeta.Rd.R | c8ae0f29e17750bec348d5bdbe1fb6b6c74c3eb1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 430 | r | summary.mvmeta.Rd.R | library(mvmeta)
### Name: summary.mvmeta
### Title: Summarizing mvmeta Models
### Aliases: summary.mvmeta print.mvmeta print.summary.mvmeta
### Keywords: models regression multivariate methods
### ** Examples
# RUN THE MODEL
model <- mvmeta(cbind(PD,AL)~pubyear,S=berkey98[5:7],data=berkey98)
# SIMPLE PRINT
model
# DEFINE DIGITS
print(model,digit=2)
# SUMMARY WITH 80TH CONFIDENCE INTERVALS
summary(model,ci.level=0.80)
|
53034f2d84448f25a1798a7e019258f8b4880648 | e205d4542b2f7d13bc3c1a3bba2eae4c16cfc743 | /tests/testthat/test-13-dd-double-triangulation.R | e73cede2c03e13448285c7ee299984a48ef836e1 | [
"MIT"
] | permissive | trenchproject/TrenchR | 03afe917e19b5149eae8a76d4a8e12979c2b752f | 7164ca324b67949044827b743c58196483e90360 | refs/heads/main | 2023-08-20T11:54:26.054952 | 2023-08-04T03:52:42 | 2023-08-04T03:52:42 | 78,060,371 | 8 | 8 | NOASSERTION | 2022-09-15T21:36:08 | 2017-01-04T23:09:28 | R | UTF-8 | R | false | false | 545 | r | test-13-dd-double-triangulation.R | context("Double Triangulation")
test_that("degree day function-double-triangulation work", {
# fpath <- test_path("degree-days-double-triangle.csv")
# doubletriang <- read.csv(fpath)
#doubletriang$DD.Trench<-degree_days(doubletriang$Air.min,doubletriang$Air.max,12,33,"double.triangulation")
#corrdoubletrian=cor(doubletriang$Degree.days,doubletriang$DD.Trench,method = "pearson")
#expect_gte(corrdoubletrian, .94)
#rm(doubletriang)
expect_equal(degree_days(T_min=7, T_max=14, LDT=12, UDT=33, method="double.triangulation"), .29)
}) |
c24b08a1e72a6b53ed913b57ab0966484d8f8ed7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Momocs/examples/coo_area.Rd.R | 3cf8d08e4c3ab7174725e103b434f1b3a43d7db8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 240 | r | coo_area.Rd.R | library(Momocs)
### Name: coo_area
### Title: Calculates the area of a shape
### Aliases: coo_area
### ** Examples
coo_area(bot[1])
# for the distribution of the area of the bottles dataset
hist(sapply(bot$coo, coo_area), breaks=10)
|
0f83dfd672cd6f269b76097db886047463b2dd2b | 3d0974bf3ab2c4d27c001513b02b1ce15a6ed745 | /plot4.R | dade0e2cece58845cedbe7b287c6f7c0c52f0589 | [] | no_license | YujiShen/ExData_Plotting1 | 5ea57e0ab249053d5242cab43ed67474109237e1 | 2d98a639e93b35bbefba3c34dab4d232274fd0b5 | refs/heads/master | 2020-12-24T20:15:47.166412 | 2014-06-06T13:39:58 | 2014-06-06T13:39:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,327 | r | plot4.R | #load data.table package
library(data.table)
#get the colClasses and read table
tab5rows <- fread("household_power_consumption.txt", header = T, sep = ";", nrow = 5, na.string = '?')
classes <- sapply(tab5rows, class)
power <- fread("household_power_consumption.txt", header = T, sep = ";", na.string = '?', colClasses = classes)
#set key for binary search
setkey(power, Date)
#subset the data
data <- power[c("1/2/2007", "2/2/2007")]
#create a new date-time column by "Date" and "Time"
data[, DateTime := {temp <- paste(Date, Time); as.POSIXct(strptime(temp, "%d/%m/%Y %H:%M:%S"))}]
# Plot 4
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2))
## sub 1
plot(data$DateTime, data$Global_active_power, type = 'l', xlab = "", ylab = "Global Active Power")
## sub 2
plot(data$DateTime, data$Voltage, type='l', xlab='datetime', ylab='Voltage')
## sub 3
plot(data$DateTime, data$Sub_metering_1, type='l', xlab='', ylab='Energy sub metering')
lines(data$DateTime, data$Sub_metering_2, col='red')
lines(data$DateTime, data$Sub_metering_3, col='blue')
legend("topright", legend = c("Sub_metering_1", "Sub_metering_3", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"), bty = 'n')
## sub 4
plot(data$DateTime, data$Global_reactive_power, type = 'l', xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
33c36dec735078fc247ece861f94fd7dde540c74 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/coxmeg/R/fit_ppl.R | 1657706514b03b411dfb26549438bf167e172f97 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,106 | r | fit_ppl.R |
#' Estimate HRs using PPL given a known variance component (tau)
#'
#' \code{fit_ppl} returns estimates of HRs and their p-values given a known variance component (tau).
#'
#' @section About \code{type}:
#' 'bd' is used for a block-diagonal relatedness matrix, or a sparse matrix the inverse of which is also sparse. 'sparse' is used for a general sparse relatedness matrix the inverse of which is not sparse.
#' @section About \code{solver}:
#' When \code{solver=1,3}/\code{solver=2}, Cholesky decompositon/PCG is used to solve the linear system. When \code{solver=3}, the solve function in the Matrix package is used, and when \code{solver=1}, it uses RcppEigen:LDLT to solve linear systems.
#'
#' @param tau A positive scalar. A variance component given by the user. Default is 0.5.
#' @param X A matrix of the preidctors. Can be quantitative or binary values. Categorical variables need to be converted to dummy variables. Each row is a sample, and the predictors are columns.
#' @param outcome A matrix contains time (first column) and status (second column). The status is a binary variable (1 for failure / 0 for censored).
#' @param corr A relatedness matrix. Can be a matrix or a 'dgCMatrix' class in the Matrix package. Must be symmetric positive definite or symmetric positive semidefinite.
#' @param type A string indicating the sparsity structure of the relatedness matrix. Should be 'bd' (block diagonal), 'sparse', or 'dense'. See details.
#' @param FID An optional string vector of family ID. If provided, the data will be reordered according to the family ID.
#' @param eps An optional positive value indicating the tolerance in the optimization algorithm. Default is 1e-6.
#' @param spd An optional logical value indicating whether the relatedness matrix is symmetric positive definite. Default is TRUE.
#' @param solver An optional bianry value that can be either 1 (Cholesky Decomposition using RcppEigen), 2 (PCG) or 3 (Cholesky Decomposition using Matrix). Default is NULL, which lets the function select a solver. See details.
#' @param verbose An optional logical value indicating whether to print additional messages. Default is TRUE.
#' @param order An optional integer value starting from 0. Only valid when dense=FALSE. It specifies the order of approximation used in the inexact newton method. Default is 1.
#' @return beta: The estimated coefficient for each predictor in X.
#' @return HR: The estimated HR for each predictor in X.
#' @return sd_beta: The estimated standard error of beta.
#' @return p: The p-value.
#' @return iter: The number of iterations until convergence.
#' @return ppl: The PPL when the convergence is reached.
#' @keywords Cox mixed-effects model
#' @export fit_ppl
#' @examples
#' library(Matrix)
#' library(MASS)
#' library(coxmeg)
#'
#' ## simulate a block-diagonal relatedness matrix
#' tau_var <- 0.2
#' n_f <- 100
#' mat_list <- list()
#' size <- rep(10,n_f)
#' offd <- 0.5
#' for(i in 1:n_f)
#' {
#' mat_list[[i]] <- matrix(offd,size[i],size[i])
#' diag(mat_list[[i]]) <- 1
#' }
#' sigma <- as.matrix(bdiag(mat_list))
#' n <- nrow(sigma)
#'
#' ## simulate random effexts and outcomes
#' x <- mvrnorm(1, rep(0,n), tau_var*sigma)
#' myrates <- exp(x-1)
#' y <- rexp(n, rate = myrates)
#' cen <- rexp(n, rate = 0.02 )
#' ycen <- pmin(y, cen)
#' outcome <- cbind(ycen,as.numeric(y <= cen))
#'
#' ## fit the ppl
#' re = fit_ppl(x,outcome,sigma,type='bd',tau=0.5,order=1)
#' re
fit_ppl <- function(X,outcome,corr,type,tau=0.5,FID=NULL,eps=1e-06,order=1,solver=NULL,spd=TRUE,verbose=TRUE){
if(eps<0)
{eps <- 1e-06}
if(!(type %in% c('bd','sparse','dense')))
{stop("The type argument should be 'bd', 'sparse' or 'dense'.")}
## family structure
if(is.null(FID)==FALSE)
{
ord <- order(FID)
FID <- as.character(FID[ord])
X <- as.matrix(X[ord,,drop = FALSE])
outcome <- as.matrix(outcome[ord,,drop = FALSE])
corr <- corr[ord,ord,drop = FALSE]
}else{
X <- as.matrix(X)
outcome <- as.matrix(outcome)
}
min_d <- min(outcome[which(outcome[,2]==1),1])
rem <- which((outcome[,2]==0)&(outcome[,1]<min_d))
if(length(rem)>0)
{
outcome <- outcome[-rem, ,drop = FALSE]
X <- as.matrix(X[-rem,,drop = FALSE])
corr <- corr[-rem,-rem,drop = FALSE]
}
if(verbose==TRUE)
{message(paste0('Remove ', length(rem), ' subjects censored before the first failure.'))}
x_sd = which(as.vector(apply(X,2,sd))>0)
x_ind = length(x_sd)
if(x_ind==0)
{stop("The predictors are all constants after the removal of subjects.")}else{
k <- ncol(X)
if(x_ind<k)
{
warning(paste0(k-x_ind," predictor(s) is/are removed because they are all constants after the removal of subjects."))
X = X[,x_sd,drop=FALSE]
k <- ncol(X)
}
}
n <- nrow(outcome)
if(min(outcome[,2] %in% c(0,1))<1)
{stop("The status should be either 0 (censored) or 1 (failure).")}
u <- rep(0,n)
beta <- rep(0,k)
d_v <- outcome[,2]
## risk set matrix
ind <- order(outcome[,1])
ind <- as.matrix(cbind(ind,order(ind)))
rk <- rank(outcome[ind[,1],1],ties.method='min')
n1 <- sum(d_v>0)
rs <- rs_sum(rk-1,d_v[ind[,1]])
if(spd==FALSE)
{
rk_cor = matrix.rank(as.matrix(corr),method='chol')
spsd = FALSE
if(rk_cor<n)
{spsd = TRUE}
if(verbose==TRUE)
{message(paste0('The sample size included is ',n,'. The rank of the relatedness matrix is ', rk_cor))}
}else{
spsd = FALSE
rk_cor = n
if(verbose==TRUE)
{message(paste0('The sample size included is ',n,'.'))}
}
nz <- nnzero(corr)
if( nz > ((as.double(n)^2)/2) )
{type <- 'dense'}
inv = NULL
eigen = TRUE
if(type=='dense')
{
if(verbose==TRUE)
{message('The relatedness matrix is treated as dense.')}
corr = as.matrix(corr)
if(spsd==FALSE)
{
corr = chol(corr)
corr = as.matrix(chol2inv(corr))
}else{
ei = eigen(corr)
ei$values[ei$values<1e-10] = 1e-6
corr = ei$vectors%*%diag(1/ei$values)%*%t(ei$vectors)
# corr <- ginv(corr)
rk_cor = n
spsd = FALSE
}
inv <- TRUE
sigma_i_s = corr
corr <- s_d <- NULL
si_d <- as.vector(diag(sigma_i_s))
if(is.null(solver))
{solver = 2}else{
if(solver==3)
{solver = 1}
}
}else{
if(verbose==TRUE)
{message('The relatedness matrix is treated as sparse.')}
corr <- as(corr, 'dgCMatrix')
si_d = s_d = NULL
if(spsd==FALSE)
{
if(type=='bd')
{
sigma_i_s <- Matrix::chol2inv(Matrix::chol(corr))
inv = TRUE
si_d <- as.vector(Matrix::diag(sigma_i_s))
}else{
sigma_i_s = NULL
inv = FALSE
s_d <- as.vector(Matrix::diag(corr))
}
}else{
sigma_i_s = eigen(corr)
if(min(sigma_i_s$values) < -1e-10)
{
stop("The relatedness matrix has negative eigenvalues.")
}
# sigma_i_s = sigma_i_s$vectors%*%(c(1/sigma_i_s$values[1:rk_cor],rep(0,n-rk_cor))*t(sigma_i_s$vectors))
sigma_i_s$values[sigma_i_s$values<1e-10] = 1e-6
sigma_i_s = sigma_i_s$vectors%*%diag(1/sigma_i_s$values)%*%t(sigma_i_s$vectors)
rk_cor = n
spsd = FALSE
inv = TRUE
si_d <- as.vector(Matrix::diag(sigma_i_s))
}
if(is.null(solver))
{
if(type=='bd')
{
solver = 1
if(n>5e4)
{
eigen = FALSE
}
}else{solver = 2}
}else{
if(solver==3)
{
eigen = FALSE
solver = 1
}
}
if(inv==TRUE)
{
sigma_i_s <- as(sigma_i_s,'dgCMatrix')
if(eigen==FALSE)
{
sigma_i_s = Matrix::forceSymmetric(sigma_i_s)
}
corr <- s_d <- NULL
}
}
if(verbose==TRUE)
{
if(inv==TRUE)
{message('The relatedness matrix is inverted.')}
if(type=='dense')
{
switch(
solver,
'1' = message('Solver: solve (base).'),
'2' = message('Solver: PCG (RcppEigen:dense).')
)
}else{
switch(
solver,
'1' = message('Solver: Cholesky decomposition (RcppEigen=',eigen,').'),
'2' = message('Solver: PCG (RcppEigen:sparse).')
)
}
}
if(type=='dense')
{
res <- irls_ex(beta, u, tau, si_d, sigma_i_s, X, eps, d_v, ind, rs$rs_rs, rs$rs_cs,rs$rs_cs_p,det=FALSE,detap='exact',solver=solver)
}else{
res <- irls_fast_ap(beta, u, tau, si_d, sigma_i_s, X, eps, d_v, ind, rs$rs_rs, rs$rs_cs,rs$rs_cs_p,order,det=FALSE,detap='exact',sigma_s=corr,s_d=s_d,eigen=eigen,solver=solver)
}
res_beta = as.vector(res$beta)
res_var = diag(as.matrix(res$v11))
p = pchisq(res_beta^2/res_var,1,lower.tail=FALSE)
re = list(beta=res_beta,HR=exp(res_beta),sd_beta=sqrt(res_var),p=as.vector(p),iter=res$iter,ppl=res$ll)
return(re)
}
|
4a619cd569c3bc35f6213fa803cb0c36cbe953e6 | bcfde339f22bd8470f6eb88daa82f83e51cbaa77 | /Calculating Colley Ranking - Web Scraping and so on.R | 5895d5f8cb741722201e0d8521bf8f3a8f3ef572 | [] | no_license | sunlee0216/Data-Science-Portfolio | 5f7a0d6e5d0acdeee267b236bc7d2c6ff8ca26c9 | adc96f786d462fc0252356e83b10465275ab95da | refs/heads/master | 2020-06-22T13:33:31.735442 | 2019-08-02T19:47:43 | 2019-08-02T19:47:43 | 197,722,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,797 | r | Calculating Colley Ranking - Web Scraping and so on.R | # In this project, I will write a function (named "colley") that takes a year (any between 1961 and 2010)
# as input and calculates the Colley rankings using the matrix method for that year. The function should
# return a data frame with two columns, team name and colley score, and should be ordered from best to
# worst. I will get the data from web and my function should work without any data previously loaded into R.
# colley(v): calculates the Colley rankings using the matrix method for any year between 1961 and 2010
# v: the year to calculate the colley ranking of
# return value: a data frame with team name and colley score ordered from best to worst
colley = function(v){
#Loading data
v=as.character(v)
a = read.fwf(paste("http://homepages.cae.wisc.edu/~dwilson/rsfc/history/howell/cf","gms.txt",sep=v),c(11,28,3,28,3,27))
#Modifying data type and names
#ateam = awayteam, ascore =awayscore, hscore= homescore
names(a) = c("year","ateam","ascore","hteam","hscore","location")
a$ateam = as.character(a$ateam)
a$hteam = as.character(a$hteam)
a$ateam = gsub(" ","",a$ateam)
a$hteam = gsub(" ","",a$hteam)
#Separating awayteam(team1) and hometeam(team2)
team1 = data.frame(sort(table(a$ateam)))
team1$Var1 = as.character(team1$Var1)
team2 = data.frame(sort(table(a$hteam)))
team2$Var1 = as.character(team2$Var1)
#Dropping teams played less than 6 times
#After loops, df only contains team names that played 6 or more games
df=data.frame(name=character())
for(i in 1:length(team1$Var1)){
for(j in 1:length(team2$Var1)){
if(team1$Var1[i] == team2$Var1[j]){
if(team1$Freq[i] + team2$Freq[j] > 5 | team1$Freq[i] >5 | team2$Freq[j] >5){
df = rbind(df,data.frame(name=as.character(team1$Var1[i]), stringsAsFactors = F))
}
}
}
}
df = unique(df)
#Dropping games against teams that played fewer than 6 games
newa = data.frame()
for ( i in 1: length(a$ateam)){
for( j in 1: length(df$name)){
if(df$name[j] == a$ateam[i]){
newa = rbind(newa,data.frame(ateam=a$ateam[i],ascore=a$ascore[i],hteam=a$hteam[i],hscore=a$hscore[i],stringsAsFactors = F))
}
}
}
newb = data.frame()
for (i in 1: length(newa$hteam)){
for(j in 1:length(df$name)){
if(df$name[j] == newa$hteam[i]){
newb = rbind(newb, data.frame(ateam=newa$ateam[i],ascore=newa$ascore[i],hteam=newa$hteam[i],hscore=newa$hscore[i]))
}
}
}
#Dropping ties and counting wins and loses for each team
new = data.frame()
for ( i in 1: length(newb$ateam)){
if(newb$ascore[i] > newb$hscore[i]){
new = rbind(new,data.frame(wins=newb$ateam[i],loses=newb$hteam[i]))
}
if(newb$ascore[i] < newb$hscore[i]){
new = rbind(new,data.frame(wins=newb$hteam[i],loses=newb$ateam[i]))
}
}
wins = data.frame(table(new$wins))
loses = data.frame(table(new$loses))
#final contains finalized team names, number of wins, and number of loses
final = data.frame()
for( i in 1:length(wins$Var1)){
for( j in 1:length(loses$Var1)){
if(wins$Var1[i]==loses$Var1[j]){
final = rbind(final, data.frame(teams = wins$Var1[i], wins = wins$Freq[i], loses = loses$Freq[j] ))
}
}
}
#using hash table to index teams, creating opponents column
final$teams = as.character(final$teams)
e = new.env()
for(i in 1:length(final$teams)){
e[[final$teams[i]]] = i
}
new$wins = as.character(new$wins)
new$loses = as.character(new$loses)
list = list()
c=c()
for(i in 1:length(final$teams)){
c=c()
for( j in 1:length(new$wins)){
if(final$teams[i] == new$wins[j]){
c[length(c)+1]= e[[new$loses[j]]]
}
if(final$teams[i] == new$loses[j]){
c[length(c)+1]= e[[new$wins[j]]]
}
list[[i]] = c
}
}
#Adding opponents to final data frame
final[["opponents"]] = c(list)
#Time to generate rankings
#Setting matrix
N = length(final$teams)
C = matrix(0, nrow= N, ncol= N)
B = numeric(0)
#Looping
for(i in 1:N){
C[i,i] = 2 + length(final$opponents[[i]])
B[i] = 1 + ((final[i,2]-final[i,3])/2)
for(j in 1:N){
if(i != j){
C[i,j] = -1*(sum(final$opponents[[i]] == j))
}
}
}
#Solving linear system
x = solve(C,B)
#New data frame with solution
solution = data.frame("Team"=final[,1],"Score"=x)
solution = solution[order(solution$Score, decreasing=T),]
return(solution)
}
|
d5e93bb7d8a839d8afb53f79ab64a476013019a2 | 27b4436fdbab36fd3d7963df408a2d259e8fd909 | /man/extract_precip.Rd | 305c260f41c650e70ebf634a2d551fb48ef0d903 | [] | no_license | dklinges9/mcera5 | 962833c6b208d892ddb7e1ef1f9e80b9b7fa6140 | a37074a74f78439118f90c2ee76ab5bafaf7f2ed | refs/heads/master | 2023-07-23T11:09:26.529124 | 2023-07-10T23:13:41 | 2023-07-10T23:13:41 | 260,175,954 | 10 | 4 | null | 2023-09-05T13:51:41 | 2020-04-30T10:01:45 | R | UTF-8 | R | false | true | 1,823 | rd | extract_precip.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_precip.R
\name{extract_precip}
\alias{extract_precip}
\title{Produces daily or hourly precipitation data for a single location ready for use
with `microclima::runauto`.}
\usage{
extract_precip(
nc,
long,
lat,
start_time,
end_time,
d_weight = TRUE,
convert_daily = TRUE
)
}
\arguments{
\item{nc}{character vector containing the path to the nc file. Use the
`build_era5_request` and `request_era5` functions to acquire an nc file with
the correct set of variables. Data within nc file must span the period
defined by start_time and end_time.}
\item{long}{longitude of the location for which data are required (decimal
degrees, -ve west of Greenwich Meridian).}
\item{lat}{latitude of the location for which data are required (decimal
degrees, -ve south of the equator).}
\item{start_time}{a POSIXlt object indicating the first hour for which data
are required.}
\item{end_time}{a POSIXlt object indicating the last hour for which data
are required.}
\item{d_weight}{logical value indicating whether to apply inverse distance
weighting using the 4 closest neighbouring points to the location defined by
`long` and `lat`. Default = `TRUE`.}
\item{convert_daily}{a flag indicating whether the user desires to convert the
precipitation vector from hourly to daily averages (TRUE) or remain as hourly
values (FALSE). Only daily precipitation will be accepted by `microclima::runauto`.}
}
\value{
a numeric vector of daily or hourly precipitation (mm).
}
\description{
`extract_precip` takes an nc file containing hourly ERA5
climate data, and for a given set of coordinates, produces an (optionally)
inverse distance weighted mean of precipitation (at daily or hourly resolution)
ready for use with `microclima::runauto`.
}
|
e21129060c7f0ebebab833314121b69bd3913b7c | de93acb5426b3d637c7f1a10d902b837fbcd6988 | /man/check_team_membership.Rd | e79a01dac312d39543442ad55da481a0e2c22021 | [
"MIT"
] | permissive | Sage-Bionetworks/dccvalidator | 40c32495bdf3b06f6601b0cc63e12e6872bca0d3 | dcdddd8c6e3c3cfc56388352501de0d947fa60b5 | refs/heads/master | 2023-04-09T04:26:45.088613 | 2022-05-05T20:59:53 | 2022-05-05T20:59:53 | 130,909,540 | 10 | 16 | NOASSERTION | 2022-12-08T07:01:16 | 2018-04-24T20:23:02 | R | UTF-8 | R | false | true | 832 | rd | check_team_membership.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check-team-membership.R
\name{check_team_membership}
\alias{check_team_membership}
\title{Check team membership}
\usage{
check_team_membership(teams, user, syn)
}
\arguments{
\item{teams}{Team IDs to check membership in}
\item{user}{User to check (e.g. output from syn$getUserProfile())}
\item{syn}{Synapse client object}
}
\value{
A condition object indicating whether the Synapse user is a member of
the given team(s).
}
\description{
Check if a user is a member of any of the given teams.
}
\examples{
\dontrun{
syn <- synapse$Synapse()
syn$login()
user <- syn$getUserProfile("dcctravistest")
check_team_membership(teams = "3396691", user = user, syn = syn)
check_team_membership(
teams = c("3397398", "3377637"),
user = user,
syn = syn
)
}
}
|
f2bdd451c318213a1f46855b2d06f99efb999986 | f7290d41cdcf6fd40b7647b69670e09d96c8386f | /R/SS_VCM.R | aa79e20dfa2bdb70e145818e0617547de7fb8c21 | [] | no_license | Heuclin/VCGSS | ff9a3d401b59d2d5bbf7163e536016f66ac4419f | 71dd884ec402a5dfdd3e98c93ea18e1b66a1acd6 | refs/heads/main | 2022-05-01T19:42:44.321397 | 2022-04-26T13:33:23 | 2022-04-26T13:33:23 | 408,858,225 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 25,727 | r | SS_VCM.R |
#____________________________________________________________________________________________________________________
#
# Varying Coefficient Model (VCM) code
# B. Heuclin, F. Mortier, C. Trottier, M. Denis
# 22/09/2020
#
#____________________________________________________________________________________________________________________
library(mvnfast)
library(splines)
library(doParallel)
library(parallel)
library(coda)
library(stringr)
library(rlang)
library(utils)
legendre_polynome <- function(n, t){
res <- 0
for(k in 0:n) res <- res + choose(n, k)^2 *(t-1)^(n-k) * (t+1)^k
return(res* 1/2^n)
}
mode <- function(x) stats::density(x)$x[which.max(stats::density(x)$y)]
prob_jointe <- function(tab, start, end){
proba.g.jointe <- sort(table(apply(tab[, start:end], 1,
function(l) paste(l, collapse=""))) / 100, decreasing = TRUE )#/ nrow(g.est))
for(i in 1:length(proba.g.jointe)){
st <- names(proba.g.jointe)[i]
if(sum(as.numeric(st)) == 0) names(proba.g.jointe)[i] <- "nothing"
else names(proba.g.jointe)[i] <- paste0( paste(start - 1 + stringr::str_locate_all(st, "1")[[1]][, 1], collapse = "/"))
}
return(proba.g.jointe)
}
ss_vcm <- function(Y, X, settings, init, selection = TRUE, var_gp = "global")
{
if(selection){
ssVcmCpp(as.matrix(Y), as.matrix(X), as.list(settings), as.list(init), var_gp = var_gp)
}else{
vcmCpp(as.matrix(Y), as.matrix(X), as.list(settings), as.list(init), var_gp = var_gp)
}
}
#' Main function to run the Bayesian varying coefficient model using spike-and-slab prior for variable selection
#'
#' @param Y n x T matrix of phenotypic observations (n is the number of individuals, T is the number of observed time points)
#' @param X n x q matrix of variables (markers), q is the number of variables
#' @param ENV n x ne matrix of environmental variables, ne is the number of environmental variables
#' @param selection boolean indicating if selection is requiered, the default is TRUE
#' @param interpolation character, indicating the approach used to estimate curve of the effects. Possible choise is c('Legendre', 'B-spline', 'P-spline', 'RW'), the default is 'P-spline'.
#' @param df integer, only for Legendre, B-spline or P-spline approach. df is the degree of freedom parameter for these approaches, the default is floor(ncol(Y)/3)+3
#' @param order_diff interger in [1, 2, 3], order of the difference penalty, the default is 2.
#' @param niter number of iterations of the MCMC chain, the default is 10000
#' @param burnin number of initial iterations of the MCMC chain which should be discarded, the defaults to 5000
#' @param thin save only every thin-th iteration, the default is 10
#' @param rep number of repetitions, the default is one
#' @param cores number of cores for parallelisation, the default is -1 indicating that all cores minus 1 are used.
#' @param save boolean indicating if the MCMC chains should me saved, the default is TRUE
#' @param path optional (existing) path to save the MCMC chains if save = TRUE, if NULL, path = Fused_HS if selection = TRUE, Fusion_HS ortherwise
#' @param summary.fct name of the function (mean, mode or median) used to calculate the estimation of the parameters from the MCMC posterior samples, the default is mean.
#' @param scale boolean, indicating if X should be scaled, the default is TRUE
#' @param estimation boolean, indicating summary of MCMC chains should be apply to provide estimation of parameters and curve, the default is TRUE
#' @param gelman.diag boolean, indicating gelman.diag should be apply on MCMC chains, the default is TRUE
#' @param gelman.plot boolean indicating if the gelman plot should be plotted, the default is FALSE
#' @param traceplot boolean indicating if the trace plot should be plotted, the default is FALSE
#' @param ... additional arguments
#'
#' @return Return a list
#' @export
#'
#' @example R/example_1.R
#'
VCM_fct <- function(Y, X, ENV = NULL, selection = TRUE, interpolation='P-spline', df = floor(ncol(Y)/3)+3,
order_diff = 2, niter = 10000, burnin = 5000, thin = 10,
rep = 1, cores = -1, save = TRUE, path = NULL,
summary.fct = mean, scale = FALSE, estimation = TRUE,
gelman.diag = TRUE, gelman.plot=FALSE, traceplot=FALSE) #, rm_old_chain = TRUE, ...)
{
require(mvnfast)
require(splines)
require(doParallel)
require(parallel)
require(coda)
require(stringr)
require(rlang)
require(utils)
require(invgamma)
if(!interpolation %in% c('Legendre', 'B-spline', 'P-spline', 'RW')) stop("interpolation must be in the list : 'Legendre', 'B-spline', 'P-spline', 'RW'!")
# print("--------------------------------")
print("Bayesian Varying Coefficient Model using spike-and-slab prior")
n <- nrow(Y); T <- ncol(Y); q <- ncol(X); n; q; T
print(paste0("Number of individuals: ", n))
print(paste0("Number of time points: ", T))
print(paste0("Number of variables: ", q))
if(cores > parallel::detectCores()-1) warning(paste0("Number of cores can not be upper than ", parallel::detectCores()-1, " on your computer!"), immediate. = TRUE)
if(cores == -1) cores <- max(1, parallel::detectCores()-1)
pars <- expand.grid(rep = 1:rep) #, fold = 1:CV)
cores <- min(cores, nrow(pars))
doParallel::registerDoParallel(cores = cores)
print(paste("Nb cores = ", cores))
if(is.null(path) & save){
path <- "SS_VCM_results"
print(paste0("Default path: '", path, "'." ), immediate. = TRUE)
system(paste0("mkdir ", path))
}
if(save){
files <- system(paste0("ls ", path) , intern = TRUE)
if(!rlang::is_empty(files)){
warning(paste0(path, " directory is not empty !"), immediate. = TRUE)
rm_old_chain <- utils::askYesNo("Do you want to delete all files ?" )
if(is.na(rm_old_chain)) stop("Function have been stoped !")
if(rm_old_chain) system(paste0("rm -rf ", path, "/* "))
}
}
df_mu <- df_env <- df-1
if(interpolation == "AR") df_beta <- T else df_beta <- df
if(scale) X <- scale(X)
output <- list()
output$data$Y <- Y
output$data$X <- X
output$data$ENV <- ENV
output$parameters$n <- n
output$parameters$q <- q
output$parameters$T <- T
output$parameters$interpolation = interpolation
output$parameters$order_diff = order_diff
output$parameters$niter = niter
output$parameters$burnin = burnin
output$parameters$thin = thin
output$parameters$cores = cores
output$parameters$rep = rep
output$parameters$save = save
output$parameters$path = path
output$parameters$gelman.plot = gelman.plot
output$parameters$traceplot = traceplot
output$parameters$summary.fct = summary.fct
output$parameters$df_mu <- output$parameters$df_env <- df_mu
output$parameters$df_beta <- df_beta
# Settings _____________________________________________________________________________
print("Settings")
settings <- list()
settings$l <- df # p-spline : nombre de noeuds + degre spline = 3
settings$n.iter <- niter
settings$burnin <- burnin
settings$thin = thin
settings$k <- 1 # nbr d'iterations pour le Metropolis-Hastings
settings$Sigma_m0 <- 1e6*diag(settings$l) # Matrice de la loi a priori sur m
settings$a <- 1 # parametre de se2
settings$bb <- 1 # parametre de se2
settings$s <- 1 # parametre de lambda2
settings$r <- 1 # parametre de lambda2
settings$rho_tuning <- 0.05
# settings$pi <- 0.1
settings$s2_a <- 10
tmp_temps <- 2*((1:T)-1)/(T-1) -1
if(interpolation == 'Legendre'){
settings$Bt <- matrix(1, T, settings$l);
for(t in 1:T) for(j in 1:settings$l) settings$Bt[t, j] <- legendre_polynome(j-1, tmp_temps[t])
}else{
settings$Bt <- as.matrix(splines::bs(tmp_temps, degree = 3, df = settings$l, intercept = TRUE)) # Base des b-spline
}
if(interpolation == 'RW') settings$B <- diag(T)
if(interpolation == 'B-spline' | interpolation == 'P-spline') settings$B <- as.matrix(splines::bs(tmp_temps, degree = 3, df = settings$l, intercept = TRUE)) # Base des b-spline
if(interpolation == 'Legendre'){ settings$B <- matrix(1, T, settings$l); for(t in 1:T) for(j in 1:settings$l) settings$B[t, j] <- legendre_polynome(j-1, tmp_temps[t]) }
if(!is.null(ENV)){
settings$n_env <- ncol(ENV)
settings$Be <- array(1, c(T, settings$l, settings$n_env))
settings$Be_tmp <- array(NA, c(T, settings$l-1, settings$n_env))
for(i in 1:settings$n_env){
tmp_env <- (ENV[, i] - min(ENV[, i]))/(max(ENV[, i]) - min(ENV[, i])) *2 -1
if(interpolation == 'Legendre'){
for(t in 1:T) for(j in 1:settings$l) settings$Be[t, j, i] <- legendre_polynome(j-1, tmp_env[t])
}else{
settings$Be[, , i] <- as.matrix(splines::bs(tmp_env, degree = 3, df = settings$l, intercept = TRUE)) # Base des b-spline
}
}
Z=list()
B_tilde <- settings$Be
for ( j in (1:dim(B_tilde)[3])){
QR = qr(t(t(rep(1, T))%*%B_tilde[ , , j]))
QR_Q = qr.Q(QR,complete =TRUE)
Z[[j]] = QR_Q[, 2:ncol(QR_Q)]
settings$Be_tmp[, , j] <- settings$Be[, , j] %*% Z[[j]]
}
settings$Be <- settings$Be_tmp
}else{
settings$Be <- array(0, c(T, settings$l-1, 1))
settings$n_env <- 1
}
if(interpolation == 'B-spline' | interpolation == 'Legendre') {
settings$K <- diag(ncol(settings$B))
settings$D <- diag(ncol(settings$B))
}else{
settings$D <- diff(diag(ncol(settings$B)), differences = order_diff)
settings$K <- t(settings$D)%*%settings$D
settings$K <- settings$K + 0.001 * diag(ncol(settings$B))
}
settings$K2 <- t(diff(diag(settings$l-1), differences = 2))%*%diff(diag(settings$l-1), differences = 2)
settings$K2 <- settings$K2 + 0.001 * diag(settings$l-1)
Z=list()
B_tilde <- list(settings$Bt)
QR = qr(t(t(rep(1, T))%*%B_tilde[[1]]))
QR_Q = qr.Q(QR,complete =TRUE)
Z[[1]] = QR_Q[, 2:ncol(QR_Q)]
settings$Bt <- settings$Bt %*% Z[[1]]
settings$epsilon = 1e-5
output$settings <- settings
# MCMC _____________________________________________________________________________
print("MCMC sampler")
# list_chain <- list()
# for(k in 1:nrow(pars)){
list_chain <- foreach::foreach(k = 1:nrow(pars), .verbose = FALSE) %dopar% {
init <- list()
init$alpha <- stats::rnorm(1, 0, 3)
init$pi = stats::runif(1, 0.001, 0.99)
init$m <- stats::rnorm(settings$l-1, 0, 1)
init$e <- matrix(0, settings$l-1, settings$n_env)
init$b <- matrix(stats::rnorm( ncol(settings$B)*q, 0, 1), ncol(settings$B), q)
init$rho <- stats::runif(1, 0.001, 0.99) # rho, parametre auto-regressif sur la matrice de variance residuelle
init$se2 <- abs(stats::rnorm(1, 1, 3)) # sigma^2, variance residuelle, scalaire
init$g <- rep(1, q) # sample(0:1, q, replace = TRUE); # parametre gamma pour le Spike and Slab
init$tau2 <- rep(100, q) # tau2, parametres de groupe lasso, vecteur de longueur q
init$tau0 <- 100
init$tau0_e <- rep(100, settings$n_env)
init$xi <- matrix(1, nrow(settings$D), q)
# init <- list()
# init$alpha <- 0
# init$pi = 0.5
# init$m <- rep(0, settings$l-1)
# init$e <- matrix(0, settings$l-1, settings$n_env)
# init$b <- matrix(0, ncol(settings$B), q)
# init$tau2 <- rep(100, q) # tau2, parametres de groupe lasso, vecteur de longueur q
# init$rho <- 0.5 # rho, parametre auto-regressif sur la matrice de variance residuelle
# init$se2 <- 5 # sigma^2, variance residuelle, scalaire
# init$g <- rep(1, q); # parametre gamma pour le Spike and Slab
# init$tau0 <- 100
# init$tau0_e <- rep(100, settings$n_env)
# init$xi <- matrix(1, nrow(settings$D), q)
# chain <- ss_vcm(Y = Y, X = X, settings = settings, init = init, selection = selection)
# browser()
if(selection){
chain <- ssVcmCpp(as.matrix(Y), as.matrix(X), as.list(settings), as.list(init), var_gp = "global")
}else{
chain <- vcmCpp(as.matrix(Y), as.matrix(X), as.list(settings), as.list(init), var_gp = "global")
}
if(save) save(chain, settings, init, file = paste0(path, "/chain_rep_", k, ".Rdata"))
if(save) return() else return(chain)
# list_chain[[k]] <- chain
}
if(!save) {output$list_chain <- list_chain ; rm(list_chain)}
if(save) save(output, file = paste0(path, "/output.Rdata"))
# Estimation _____________________________________________________________________________
if(estimation){
output <- estimation.VCM(output)
if(save) save(output, file = paste0(path, "/output.Rdata"))
}
# Gelman diag _____________________________________________________________________________
if(rep>1 & gelman.diag) output <- gelman.diag.VCM(output, gelman.plot = gelman.plot, traceplot = traceplot) else output$gelman.diag <- NULL
if(save) save(output, file = paste0(path, "/output.Rdata"))
print("End")
return(output)
}
#___________________________________________________________________________________________________________________
#' Gelman-Rubin's convergence diagnostics of the MCMC chain generated by the \code{VCM_fct} function
#'
#' This function can be apply only if the number of repetition applied in the \code{VCM_fct} function is upper than one.
#'
#' @param object list generated by the \code{VCM_fct} function
#' @param gelman.plot boolean indicating if the gelman plot should be plotted
#' @param traceplot boolean indicating if the trace plot should be plotted
#'
#' @return return input \code{object} including Gelman Rubin's convergence diagnostics
#' @export
#'
#' @examples
gelman.diag.VCM <- function(object, gelman.plot = FALSE, traceplot = FALSE){
require(coda)
rep <- object$parameters$rep
df_beta <- object$parameters$df_beta
df_mu <- df_env <- object$parameters$df_mu
q <- object$parameters$q
ENV <- object$data$ENV
path <- object$parameters$path
save <- object$parameters$save
print("Gelman diagnostic")
mcmc_list_chain <- b <- m <- e <- list()
for(i in 1:q){
b[[i]] <- list()
for(j in 1:df_beta) b[[i]][[j]] <- list()
}
g <- NULL
k=1
for(k in 1:rep){
if(save) load(paste0(path, "/chain_rep_", k, ".Rdata")) else chain <- object$list_chain[[k]]
prob <- colMeans(chain$g)
g <- cbind(g, prob)
for(i in 1:q) for(j in 1:df_beta) if(prob[i]>0.5) b[[i]][[j]][[k]] <- coda::mcmc(chain$b[, j, i])
tmp2 <-tmp3 <- NULL
tmp <- do.call(cbind, chain[c("alpha", "m")]); colnames(tmp) <- c("alpha", paste0("m", 1:(df_mu)))
if(!is.null(ENV)) {for(i in 1:ncol(ENV)) tmp2 <- cbind(tmp2, chain$e[, , i]); colnames(tmp2) <- paste0("e", 1:((df_env)*ncol(ENV)))}
tmp3 <- do.call(cbind, chain[c("pi", "rho", "se2")]); colnames(tmp3) <- c("pi", "rho", "se2")
mcmc_list_chain[[k]] <- coda::mcmc(cbind(tmp, tmp2, tmp3)) #, thin = thinin, start = burnin+1, end = niter)
}
object$mcmc_list <- coda::mcmc.list(mcmc_list_chain); rm(mcmc_list_chain)
object$gelman.diag <- coda::gelman.diag(object$mcmc_list)
print((object$gelman.diag))
# graphics::plot(object$gelman.diag$psrf[, 1], ylab="psrf", xlab= "parameters")
print("Summary of the Gelman diagnostic:")
print(summary(object$gelman.diag$psrf))
print("mpsrf:")
print(object$gelman.diag$mpsrf)
if(gelman.plot) coda::gelman.plot(object$mcmc_list)
if(traceplot) plot(object$mcmc_list)
# graphics::par(mfrow = c(1, 1), mar = c(4, 4, 1, 1))
# graphics::matplot(apply(g, 1, cumsum)/ 1:rep, t="l", lty = 1, lwd = 2, xlab = "number of repetitions", ylab = "average marg. prob.", ylim = c(0, 1))
# # dim(g)
prob <- rowMeans(g)
gelman_diag_b <- matrix(NA, q, df_beta)
for(i in 1:q){
# if(prob[i] > 0.1)
for(j in 1:df_beta){
if(sum(sapply(b[[i]][[j]], length) > 0) > 1) gelman_diag_b[i, j] <- coda::gelman.diag(coda::mcmc.list(b[[i]][[j]][which(sapply(b[[i]][[j]], length) > 0)]))$psrf[1]
}
}
object$gelman.diag.b.psrf <- round(gelman_diag_b, 2)
object$gelman.diag.b.psrf.median <- round(apply(gelman_diag_b , 1, stats::median, na.rm = TRUE), 2)
tmp <- object$gelman.diag.b.psrf.median
if(sum(!is.na(tmp))) graphics::plot(object$gelman.diag.b.psrf.median, ylab = "median of psrf", xlab = "variables", main = "psrf of parameters 'b'"); graphics::abline(1.1, 0, col = 2)
return(object)
}
#___________________________________________________________________________________________________________________
#' Function to obtain parameter estimations from many MCMC chains generated by the \code{VCM_fct} function
#'
#' @param object list generated by the \code{VCM_fct} function
#'
#' @return return the input \code{object} including the parameter estimations
#' @export
#'
#' @examples
estimation.VCM <- function(object){
require(stringr)
print("Summary")
rep <- object$parameters$rep
q <- object$parameters$q
T <- object$parameters$T
ENV <- object$data$ENV
path <- object$parameters$path
summary.fct <- object$parameters$summary.fct
res_selec_mar <- res_selec_joint <- res_selec_prob <- matrix(0, rep, q)
prob_joint <- rep(NA, rep)
tmp <- matrix(NA, rep*q, T)
curve_beta <- cbind(expand.grid(chain = 1:rep, pos = 1:q), tmp)
if(!is.null(ENV)) {
tmp <- matrix(NA, rep*ncol(ENV), T)
curve_env <- cbind(expand.grid(chain = 1:rep, pos = 1:ncol(ENV)), tmp)
}
tmp <- matrix(NA, rep, T)
curve_mu <- cbind(expand.grid(chain = 1:rep, pos = 1), tmp)
full_chain <- list()
full_chain$alpha <- NULL
full_chain$m <- NULL
full_chain$e <- NULL
full_chain$b <- NULL
full_chain$g <- NULL
full_chain$rho <- NULL
full_chain$se2 <- NULL
g <- NULL
i=1
for(i in 1:rep){ # length(list_chain)){
# print(i)
if(!object$parameters$save ){
chain <- object$list_chain[[i]]
}else{
load(paste0(path, "/chain_rep_", i, ".Rdata"))
}
g <- cbind(g, colMeans(chain$g))
res_selec_mar[i, which(colMeans(chain$g[, ])>0.5)] <- 1
proba.g.jointe <- sort(table(apply(chain$g[, 1:q], 1,
function(l) paste(l, collapse=""))) / nrow(chain$g), decreasing = TRUE )[1]#/ nrow(g.est))
prob_joint[i] <- proba.g.jointe
res_selec_joint[i, stringr::str_locate_all(names(proba.g.jointe), "1")[[1]][, 1]] <- 1
res_selec_prob[i, ] <- colMeans(chain$g[, ])
for(j in 1:q){
if(res_selec_prob[i, j] >= 0.5){
curve_beta[which(curve_beta$chain == i & curve_beta$pos == j), -(1:2)] <- object$settings$B %*% apply(chain$b[chain$g[, j]==1, , j], 2, summary.fct) #/ res_selec_prob[i, j]
}
}
if(!is.null(ENV)) for(j in 1:ncol(ENV)) curve_env[which(curve_env$chain == i & curve_env$pos == j), -(1:2)] <- object$settings$Be[, , j] %*% apply(chain$e[, , j], 2, summary.fct)
curve_mu[which(curve_mu$chain == i), -(1:2)] <- object$settings$Bt %*% apply(chain$m[, ], 2, summary.fct)
full_chain$pi <- c(full_chain$pi, chain$pi)
full_chain$alpha <- c(full_chain$alpha, chain$alpha)
full_chain$m <- rbind(full_chain$m, chain$m)
full_chain$e <- DescTools::Abind(full_chain$e, chain$e, along = 1)
full_chain$b <- DescTools::Abind(full_chain$b, chain$b, along = 1)
full_chain$g <- rbind(full_chain$g, chain$g)
full_chain$rho <- c(full_chain$rho, chain$rho)
full_chain$se2 <- c(full_chain$se2, chain$se2)
}
object$full_chain <- full_chain
object$estimation$marginal.probabilities <- g
object$estimation$mean.marginal.probabilities <- colMeans(full_chain$g)
object$estimation$res_selec_mar <- res_selec_mar
object$estimation$joint_probabilities <- prob_joint
object$estimation$res_selec_joint <- res_selec_joint
object$estimation$res_selec_prob <- res_selec_prob
object$estimation$curve_mu <- curve_mu
if(!is.null(ENV)) object$estimation$curve_env <- curve_env else object$estimation$curve_env <- NULL
object$estimation$curve_beta <- curve_beta
object$estimation$pi <- summary.fct(full_chain$pi)
object$estimation$rho <- summary.fct(full_chain$rho)
object$estimation$alpha <- summary.fct(full_chain$alpha)
object$estimation$se2 <- summary.fct(full_chain$se2)
return(object)
}
#___________________________________________________________________________________________________________________
#' Function to plot the estimated functional effects
#'
#' @param object list generated by the \code{VCM_fct} function
#' @param mfrow a numerical vector of the form c(nr, nc). Subsequent figures will be drawn in an nr-by-nc array on the device by rows. The default is c(6, 7).
#' @param mar a numerical vector of the form c(bottom, left, top, right) which gives the number of lines of margin to be specified on the four sides of the plot. The default is c(1, 1, 4, 1).
#' @param plot caracter list ("Y", "mu", "env", "beta") containing object names for which you want the plot curves. The default is ("Y", "mu", "env", "beta").
#' @param id variable indinces for which you want to plot the functional effects. By defaults the argument id contains indicies of variables with posterior marginal probabilities upper than 0.5.
#' @param add caracter list ("quantile", "matplot") to indicate if you want to add credible intervals at 95\% or matpot of estimated curve of each repetition.
#' @param name optional caracter vector of variable names
#'
#' @return
#' @export
#'
#' @example R/example_1.R
#'
plot_functional_effects <- function(object, mfrow = c(6, 7), mar = c(1, 1, 4, 1), plot = c("Y", "mu", "env", "beta"),
id = which(object$estimation$mean.marginal.probabilities > 0.5), add = c("quantile", "matplot"),
name = NULL)
{
if(is.null(name)) name <- 1:object$parameters$q
graphics::par(mfrow = mfrow, mar = mar)
ENV <- object$data$ENV
if("Y" %in% plot){
Y <- object$data$Y
graphics::matplot(t(Y), t='l', lty = 1, col = "gray65", main = "Y")
graphics::lines(colMeans(Y), lwd = 2)
}
if("mu" %in% plot){
if("matplot" %in% add){
graphics::matplot(t(object$estimation$curve_mu[, -c(1:2)]), t='l', col = "gray65", lty = 1, ylab = "estimation", xlab = "Time", main = "mu"); graphics::abline(0, 0)
graphics::lines(colMeans(object$estimation$curve_mu[, -c(1:2)]), lwd = 2, t='l')
}else{
graphics::plot(colMeans(object$estimation$curve_mu[, -c(1:2)]), lwd = 2, ylab = "estimation", xlab = "Time", main = "mu", t='l'); graphics::abline(0, 0)
}
if("quantile" %in% add) graphics::lines(apply(object$estimation$curve_mu[, -c(1:2)], 2, stats::quantile, 0.025), lty = 3, t="l", lwd = 2)
if("quantile" %in% add) graphics::lines(apply(object$estimation$curve_mu[, -c(1:2)], 2, stats::quantile, 0.975), lty = 3, t="l", lwd = 2)
}
if("env" %in% plot){
if(!is.null(ENV)){
if(is.null(colnames(ENV))) colnames(ENV) <- 1:ncol(ENV)
for(i in 1:ncol(ENV)) {
if("matplot" %in% add){
graphics::matplot(t(object$estimation$curve_env[which(object$estimation$curve_env$pos == i), -c(1:2)]), t='l', col = "gray65", lty = 1, ylab = "estimation", xlab = "Time", main = paste0("env ", colnames(ENV)[i])); graphics::abline(0, 0)
graphics::lines(colMeans(object$estimation$curve_env[which(object$estimation$curve_env$pos == i), -c(1:2)]), lwd = 2, t='l')
}else{
graphics::plot(colMeans(object$estimation$curve_env[which(object$estimation$curve_env$pos == i), -c(1:2)]), lwd = 2, ylab = "estimation", xlab = "Time", main = paste0("env ", colnames(ENV)[i]), t='l'); graphics::abline(0, 0)
}
if("quantile" %in% add) graphics::lines(apply(object$estimation$curve_env[which(object$estimation$curve_env$pos == i), -c(1:2)], 2, stats::quantile, 0.025), lty = 3, t="l", lwd = 2)
if("quantile" %in% add) graphics::lines(apply(object$estimation$curve_env[which(object$estimation$curve_env$pos == i), -c(1:2)], 2, stats::quantile, 0.975), lty = 3, t="l", lwd = 2)
}
}
}
if("beta" %in% plot){
for(i in id) {
if("matplot" %in% add){
graphics::matplot(t(object$estimation$curve_beta[which(object$estimation$curve_beta$pos == i), -c(1:2)]), t='l', col = "gray65", lty = 1, ylab = "estimation", xlab = "Time", main = paste0("beta ", name[i])); graphics::abline(0, 0)
graphics::lines(colMeans(object$estimation$curve_beta[which(object$estimation$curve_beta$pos == i), -c(1:2)], na.rm = TRUE), lwd = 2, t='l')
}else{
graphics::plot(colMeans(object$estimation$curve_beta[which(object$estimation$curve_beta$pos == i), -c(1:2)], na.rm = TRUE), lwd = 2, t='l', ylab = "estimation", xlab = "Time", main = paste0("beta ", name[i])); graphics::abline(0, 0)
}
if("quantile" %in% add) graphics::lines(apply(object$estimation$curve_beta[which(object$estimation$curve_beta$pos == i), -c(1:2)], 2, stats::quantile, 0.025, na.rm = TRUE), lty = 3, t="l", lwd = 2)
if("quantile" %in% add) graphics::lines(apply(object$estimation$curve_beta[which(object$estimation$curve_beta$pos == i), -c(1:2)], 2, stats::quantile, 0.975, na.rm = TRUE), lty = 3, t="l", lwd = 2)
}
}
}
#___________________________________________________________________________________________________________________
#' Function to plot convergence diagnostic of variable marginal posterior inclusion probabilities
#'
#' @param object list generated by the \code{VCM_fct} function
#'
#' @return none
#' @export
#'
#' @example R/example_1.R
#'
plot_diagnostic_gamma <- function(object){
rep <- object$parameters$rep
g <- object$estimation$marginal.probabilities
graphics::par(mfrow = c(1, 1), mar = c(4, 4, 4, 1))
graphics::matplot(apply(g, 1, cumsum)/ 1:rep, t="l", lty = 1, lwd = 2, xlab = "number of repetitions", ylab = "average marg. prob.", ylim = c(0, 1))
}
|
9e25c5f0014dd08823b6faad46995a5760e0ab8a | a7c4723087d16f75add07061726058060008c74e | /testCode.r | 3e4616171350976edbaef78cd30398a99556c7ab | [] | no_license | c-zhong/TriageBehaviorGraphAnalysis | 29c37aff35af2b9518c53e384c0c1188d58f6f4f | 25a492e5ab6253e1559946835f4a9f03255ba60c | refs/heads/master | 2021-05-28T21:01:06.651241 | 2015-02-25T19:16:21 | 2015-02-25T19:16:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 290 | r | testCode.r |
g1<-graph.formula( A--B--C--D)
g2<-graph.formula( A--B--C, A--C)
g3<-graph.formula( A--D, A--B, A--C)
g4<-graph.formula( A--B, C--D, E)
plot(g)
#linux
setwd("//home//amyamyamy//Dropbox//Work//Filter Graph Analysis//")
#windows
setwd("C://Dropbox//Work//Filter Graph Analysis//")
|
382cf5c8f7f97cfda2dec9c893f0d51b46fba6b0 | 70ef9078060688fd4fa238d717907d3ca7a3c931 | /run_jags.R | 4852eefb2d189ae2599bc45610342b7751c8bbe7 | [
"MIT"
] | permissive | claraqin/beta_regression | 7f90f486468135aed320307737f66b717f627bb8 | 0cd45bd19981b4add3ff1d8dde38a08f40de4cee | refs/heads/master | 2020-05-31T03:56:30.786751 | 2019-07-24T01:13:06 | 2019-07-24T01:13:06 | 190,090,461 | 4 | 0 | MIT | 2019-07-24T00:57:48 | 2019-06-03T22:23:04 | R | UTF-8 | R | false | false | 951 | r | run_jags.R | # Bayesian beta regression using JAGS: running the model
library(rjags)
library(R2jags)
# read in simulated data
jags_data <- readRDS("jags_data.rds")
# hierarchical model structure
jags_mod <- function() {
# likelihood
for(i in 1:N) {
y[i] ~ dbeta(shape1[i], shape2[i])
shape1[i] <- phi * mu[i]
shape2[i] <- phi * (1 - mu[i])
odds[i] <- exp(beta1 * x1[i] + beta2 * x2[i])
mu[i] <- odds[i] / (1 + odds[i])
}
# priors
beta1 ~ dnorm(0,5)
beta2 ~ dnorm(0,5)
phi ~ dnorm(100, 5)
}
# parameters to monitor
jags_par <- c("beta1", "beta2", "phi")
# run JAGS
system.time(
jags_draws <- jags.parallel(data = jags_data,
model.file = jags_mod,
parameters.to.save = jags_par,
n.chains = 4, n.iter = 10000,
n.burnin = 1000)
)
# results
print(jags_draws)
plot(jags_draws)
traceplot(jags_draws) |
ccfabad8dc3bd09064adc7b6f80bd980dec77d39 | c45e23e739438902c1d1e271a62b498b7a2c9cde | /MixRHLP/R/ModelMixRHLP.R | a39a597da91d6f89c5d3f7f5950ca47879a73ceb | [] | no_license | oxygenwu/MixFRHLP_R | 7354bd24ce49be1da2d8c81099309b5105887cb4 | 2f761b6a9fe1084de16a3a0922779cb0b0dedb64 | refs/heads/master | 2020-05-18T07:45:03.546979 | 2019-04-25T15:25:53 | 2019-04-25T15:25:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 922 | r | ModelMixRHLP.R | source("R/FData.R")
source("R/enums.R")
ModelMixRHLP <- setRefClass(
"ModelMixRHLP",
contains = "FData",
# Define the fields
fields = list(
G = "numeric",
# number of clusters
K = "numeric",
# number of regimes
p = "numeric",
# dimension of beta (order of polynomial regression)
q = "numeric",
# dimension of w (order of logistic regression)
variance_type = "numeric",
nu = "numeric" # degree of freedom
)
)
ModelMixRHLP <- function(fData, G, K, p, q, variance_type) {
if (variance_type == variance_types$homoskedastic) {
nu <<- (G - 1) + G * ((q+1) * (K-1) + K * (p+1) + 1)
}
else{
nu <<- (G - 1) + G * ((q+1) * (K-1) + K * (p+1) + K)
}
new(
"ModelMixRHLP",
Y = fData$Y,
X = fData$X,
m = fData$m,
n = fData$n,
vecY = fData$vecY,
G = G,
K = K,
p = p,
q = q,
variance_type = variance_type,
nu = nu
)
}
|
d0101545fdd3a6a2bc973f7902cc3230b7eadde7 | c787120d5e80587bf4ce62ae858d653d11743b18 | /man/create.roi.Rd | 7f9bb5ceabe9dcfeea7e2c9e3f44a71ba54a59af | [] | no_license | cran/erpR | d2105b844fce3dd3c0dc0e8b47fc33850cf2d68c | 6c33fb2cd818c0f9172cdba4253b99857eca0ac5 | refs/heads/master | 2021-01-20T12:45:06.305879 | 2014-05-14T00:00:00 | 2014-05-14T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,566 | rd | create.roi.Rd | \name{create.roi}
\alias{create.roi}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
create electrode region of interests
}
\description{
An utility function to create a factor that collapses other factor levels (typically electrode names) in a new variable vector with the new ROI (region of interest) variable.
It can be used only with data.frame in long format.
}
\usage{
create.roi(datall, electrode="electrode", groups=NULL, roi.levels=NULL )
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{datall}{
a data frame containing ERP data in long format.
}
\item{electrode}{
name of the column in datall containing electrode names.
}
\item{groups}{
a list containing (in separate vectors) electrode names to create the ROI.\cr E.g. \code{list(c("Fp1", "Fp2"), c("P3", "P4"))}.
}
\item{roi.levels}{
a vector with the names of the newly created factor variable.\cr E.g. \code{c("Frontopolar", "Parietal")}.
}
}
\details{
All levels of the variable \code{electrode} that are not specified in \code{groups} will be coded as \code{NA} in the returned vector.
}
\value{
The function returns a vector with the new coded ROI variable.
}
\author{
Giorgio Arcara
}
\examples{
data(ERPsets)
datall=erp.mean(base = "Exp1_word_subj", numbers = 1:20,
win.ini = 400, win.end = 600, startmsec= -200, endmsec=1500, erplist=ERPsets)
datall$caudality=create.roi(datall, "electrode",
groups=list(c("Fp1", "Fp2"), c("P3", "P4")),
roi.levels=c("Frontopolar", "Parietal"))
table(datall$caudality, datall$electrode)
}
|
5470980b086d94f8a0c59bfca30105847933d1e9 | 29585dff702209dd446c0ab52ceea046c58e384e | /HardyWeinberg/inst/doc/HardyWeinberg.R | b2890eeaeb81c6c5697f2ceb8ab40348a4838960 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,587 | r | HardyWeinberg.R | ### R code from vignette source 'HardyWeinberg.Rnw'
### Encoding: ISO8859-1
###################################################
### code chunk number 1: HardyWeinberg.Rnw:736-737
###################################################
options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE)
###################################################
### code chunk number 2: HardyWeinberg.Rnw:740-742 (eval = FALSE)
###################################################
## install.packages("HardyWeinberg")
## library("HardyWeinberg")
###################################################
### code chunk number 3: HardyWeinberg.Rnw:753-754 (eval = FALSE)
###################################################
## vignette("HardyWeinberg")
###################################################
### code chunk number 4: HardyWeinberg.Rnw:768-771
###################################################
library("HardyWeinberg")
x <- c(MM = 298, MN = 489, NN = 213)
HW.test <- HWChisq(x, verbose = TRUE)
###################################################
### code chunk number 5: HardyWeinberg.Rnw:792-793
###################################################
HW.test <- HWChisq(x, cc = 0, verbose = TRUE)
###################################################
### code chunk number 6: HardyWeinberg.Rnw:801-802
###################################################
HW.lrtest <- HWLratio(x, verbose = TRUE)
###################################################
### code chunk number 7: HardyWeinberg.Rnw:810-811
###################################################
HW.exacttest <- HWExact(x, verbose = TRUE)
###################################################
### code chunk number 8: HardyWeinberg.Rnw:830-832
###################################################
set.seed(123)
HW.permutationtest <- HWPerm(x, verbose = TRUE)
###################################################
### code chunk number 9: HardyWeinberg.Rnw:846-848
###################################################
x <- c(MN = 489, NN = 213, MM = 298)
HW.test <- HWChisq(x, verbose = TRUE)
###################################################
### code chunk number 10: HardyWeinberg.Rnw:866-867
###################################################
HW.results <- HWAlltests(x, verbose = TRUE, include.permutation.test = TRUE)
###################################################
### code chunk number 11: HardyWeinberg.Rnw:878-880
###################################################
data(Markers)
Markers[1:12,]
###################################################
### code chunk number 12: HardyWeinberg.Rnw:893-897
###################################################
Xt <- table(Markers[,1])
Xv <- as.vector(Xt)
names(Xv) <- names(Xt)
HW.test <- HWChisq(Xv,cc=0,verbose=TRUE)
###################################################
### code chunk number 13: HardyWeinberg.Rnw:909-911
###################################################
set.seed(123)
Results <- HWMissing(Markers[,1], m = 50, method = "sample", verbose=TRUE)
###################################################
### code chunk number 14: HardyWeinberg.Rnw:929-931
###################################################
set.seed(123)
Results <- HWMissing(Markers[, 1:5], m = 50, verbose = TRUE)
###################################################
### code chunk number 15: HardyWeinberg.Rnw:940-942
###################################################
set.seed(123)
Results <- HWMissing(Markers[, 1:5], m = 50, statistic = "exact", verbose = TRUE)
###################################################
### code chunk number 16: HardyWeinberg.Rnw:954-956
###################################################
SNP1 <- c(A=399,B=205,AA=230,AB=314,BB=107)
HWChisq(SNP1,cc=0,x.linked=TRUE,verbose=TRUE)
###################################################
### code chunk number 17: HardyWeinberg.Rnw:961-962
###################################################
HWChisq(SNP1[3:5],cc=0)
###################################################
### code chunk number 18: HardyWeinberg.Rnw:970-971
###################################################
HWExact(SNP1,x.linked=TRUE)
###################################################
### code chunk number 19: HardyWeinberg.Rnw:976-977
###################################################
HWExact(SNP1,x.linked=TRUE,pvaluetype="midp")
###################################################
### code chunk number 20: HardyWeinberg.Rnw:982-983
###################################################
HWExact(SNP1[3:5])
###################################################
### code chunk number 21: HardyWeinberg.Rnw:988-989
###################################################
HWPerm(SNP1,x.linked=TRUE)
###################################################
### code chunk number 22: HardyWeinberg.Rnw:994-995
###################################################
HWLratio(SNP1,x.linked=TRUE)
###################################################
### code chunk number 23: HardyWeinberg.Rnw:1000-1001
###################################################
HWAlltests(SNP1,x.linked=TRUE,include.permutation.test=TRUE)
###################################################
### code chunk number 24: HardyWeinberg.Rnw:1006-1007
###################################################
AFtest(SNP1)
###################################################
### code chunk number 25: HardyWeinberg.Rnw:1033-1042
###################################################
x <- c(MM = 298, MN = 489, NN = 213)
n <- sum(x)
nM <- mac(x)
pw4 <- HWPower(n, nM, alpha = 0.05, test = "exact", theta = 4,
pvaluetype = "selome")
print(pw4)
pw8 <- HWPower(n, nM, alpha = 0.05, test = "exact", theta = 8,
pvaluetype = "selome")
print(pw8)
###################################################
### code chunk number 26: HardyWeinberg.Rnw:1115-1138
###################################################
set.seed(123)
n <- 100
m <- 100
X1 <- HWData(m, n, p = rep(0.5, m))
X2 <- HWData(m, n)
X3 <- HWData(m, n, p = rep(0.5, m), f = rep(0.5, m))
X4 <- HWData(m, n, f = rep(0.5, m))
X5 <- HWData(m, n, p = rep(c(0.2, 0.4, 0.6, 0.8), 25), pfixed = TRUE)
X6 <- HWData(m, n, exactequilibrium = TRUE)
opar <- par(mfrow = c(3, 2),mar = c(1, 0, 3, 0) + 0.1)
par(mfg = c(1, 1))
HWTernaryPlot(X1, main = "(a)", vbounds = FALSE)
par(mfg = c(1, 2))
HWTernaryPlot(X2, main = "(b)", vbounds = FALSE)
par(mfg = c(2, 1))
HWTernaryPlot(X3, main = "(c)", vbounds = FALSE)
par(mfg = c(2, 2))
HWTernaryPlot(X4, main = "(d)", vbounds = FALSE)
par(mfg = c(3, 1))
HWTernaryPlot(X5, main = "(e)", vbounds = FALSE)
par(mfg = c(3, 2))
HWTernaryPlot(X6, main = "(f)", vbounds = FALSE)
par(opar)
###################################################
### code chunk number 27: HardyWeinberg.Rnw:1163-1166 (eval = FALSE)
###################################################
## data("HapMapCHBChr1", package = "HardyWeinberg")
## HWTernaryPlot(HapMapCHBChr1, region = 1, vbounds = FALSE)
## HWTernaryPlot(HapMapCHBChr1, region = 7, vbounds = FALSE)
###################################################
### code chunk number 28: HardyWeinberg.Rnw:1199-1206 (eval = FALSE)
###################################################
## set.seed(123)
## data("HapMapCHBChr1", package = "HardyWeinberg")
## HWQqplot(HapMapCHBChr1)
## dev.off()
## set.seed(123)
## SimulatedData <- HWData(nm = 225, n = 84, p = af(HapMapCHBChr1))$Xt
## HWQqplot(SimulatedData)
|
e051c006916da49139481778887397346e75d036 | 113b03cf0597cdb6af61fe9d47268ceb5215c86a | /R/flag_suicide_types.R | 43b56a57774a1f45f9c17113b98548697f86b183 | [
"MIT"
] | permissive | mkiang/narcan | a26e529d600d9f71ae6bec83b1ae7592069dd14a | 5627c00debf4db8a939889210c9e167a2578f6e7 | refs/heads/master | 2023-04-22T09:48:55.612283 | 2023-04-19T15:39:20 | 2023-04-19T15:39:20 | 102,496,439 | 9 | 0 | null | 2018-03-13T17:09:07 | 2017-09-05T15:09:55 | R | UTF-8 | R | false | false | 2,168 | r | flag_suicide_types.R | #' Flag suicide five types: firearm, poisoning, fall, suffocation, or other
#'
#' TODO: Make this also work with ICD-9
#'
#' @param df processed MCOD dataframe
#'
#' @return new dataframe
#' @importFrom dplyr mutate
#' @export
flag_suicide_types <- function(df) {
new_df <- df %>%
flag_suicide_firearm() %>%
flag_suicide_poison() %>%
flag_suicide_fall() %>%
flag_suicide_suffocation() %>%
flag_suicide_other()
return(new_df)
}
#' Flag suicide by firearm
#'
#' TODO: MAKE WORK WITH ICD-9
#'
#' @param df a processed MCOD dataframe
#'
#' @return dataframe
#' @importFrom dplyr mutate
#' @export
flag_suicide_firearm <- function(df) {
new_df <- df %>%
mutate(suicide_firearm = grepl("X7[234]", ucod) + 0)
return(new_df)
}
#' Flag suicide by poison
#'
#' TODO: MAKE WORK WITH ICD-9
#'
#' @param df a processed MCOD dataframe
#'
#' @return dataframe
#' @importFrom dplyr mutate
#' @export
flag_suicide_poison <- function(df) {
new_df <- df %>%
mutate(suicide_poison = grepl("X6\\d{1}", ucod) + 0)
return(new_df)
}
#' Flag suicide by fall
#'
#' TODO: MAKE WORK WITH ICD-9
#'
#' @param df a processed MCOD dataframe
#'
#' @return dataframe
#' @importFrom dplyr mutate
#' @export
flag_suicide_fall <- function(df) {
new_df <- df %>%
mutate(suicide_fall = grepl("X80", ucod) + 0)
return(new_df)
}
#' Flag suicide by suffocation
#'
#' TODO: MAKE WORK WITH ICD-9
#'
#' @param df a processed MCOD dataframe
#'
#' @return dataframe
#' @importFrom dplyr mutate
#' @export
flag_suicide_suffocation <- function(df) {
new_df <- df %>%
mutate(suicide_suffocation = grepl("X70", ucod) + 0)
return(new_df)
}
#' Flag suicide by other (not poison, fall, firearm, suffocation)
#'
#' ICD-10 codes: U03, X71, X75-X79, X81-X84, Y870
#'
#' TODO: MAKE WORK WITH ICD-9
#'
#' @param df a processed MCOD dataframe
#'
#' @return dataframe
#' @importFrom dplyr mutate
#' @export
flag_suicide_other <- function(df) {
new_df <- df %>%
mutate(suicide_other = grepl("U03|X7[156789]|X8[1234]|Y870",
ucod) + 0)
return(new_df)
}
|
a995069e9d3d5c33d975d8be5462750a823e9a56 | 8a768c89ea59d7f29795d3750901378828b7b2c2 | /plot3.R | 5472627847bf8c014b89bcb759b592cd0f508155 | [] | no_license | wojaw/ExData_Plotting1 | fd20baed77b2cf8da1ba6db54efc8c8611353869 | 2dbca4336bc98206b194d16f5e78a5885f5badc2 | refs/heads/master | 2020-07-15T07:23:51.025179 | 2014-09-07T23:03:27 | 2014-09-07T23:03:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 347 | r | plot3.R | png('plot3.png', width=480, height=480)
plot(data$Sub_metering_1, axes=F, xlab=NA, ylab='Energy sub metering', type='l')
lines(data$Sub_metering_2, col=2)
lines(data$Sub_metering_3, col=4)
box('plot')
axis(1, at=plot2_days, labels=c('Thu','Fri','Sat'))
axis(2)
legend('topright', lty=1, col=c(1,2,4), legend=paste0('Sub_metering', 1:3))
dev.off()
|
9e571da346fc58bebfe0ea85910276d341767426 | 435c24d24e98feb41a81ee4d06c305832577d2f0 | /cachematrix.R | d4f4d5a5dfc176455575b8a81acad40e70eafe87 | [] | no_license | sonalimanohar/ProgrammingAssignment2 | 80539421a605dc7e6379eec6c1d94ea11f69dc2e | 485546f788446b97b9676b22ce50bdf63d2bc010 | refs/heads/master | 2021-01-17T08:26:53.469312 | 2014-11-23T22:45:12 | 2014-11-23T22:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,424 | r | cachematrix.R | makeCacheMatrix <- function(xsolve = matrix())
{ ## makeCacheMatrix is a Function which does the following:
## -- Creates a matrix via the "set" function.
## -- Creates 'attributes' such as set, get, setsolve, getsolve and assigns it to the matrix
## -- Resultant matrix is now capable of performing tasks such as setting some values and also getting them
## -- Logic for inversing the matrix is not included in this function. It is a part of the calling function cacheSolve(msolve()).
## -- Step #1 is to initialize this function and assign it to a matrix. For eg: cmat <- makeCacheMatrix()
inv <- NULL # inv is a vector that is shared by both functions. Its value is set in the setsolve() function
set <- function(y) # Call the set() function as step #2 and pass an actual matrix as parameter. cmat$set(matrix(1:4,2,2))
{ # set() is a "Closure" function. It can access variables from its parent function- makeCacheMatrix
xsolve <<- y # setting the value of parameter from parent function. Hence <<- operator.
inv <<- NULL
}
get <- function() xsolve # Step3: Calling cmat$get() should display the matrix provided in step #2.
setsolve <- function(solve) inv <<- solve
getsolve <- function() inv
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
cacheSolve <- function(msolve=matrix(), ...)
{
## cacheSolve is called as step #4 and is passed the matrix created in step #1. For eg: cacheSolve(cmat)
inv <- msolve$getsolve() ## Value of inv is checked to determine if empty or not. "not empty" = cached.
if(!is.null(inv)) ## If "not empty" then return cached data and exit the function cacheSolve.
{
message("getting cached data")
return(inv)
}
## Continue with the following lines if value of inv is NULL.
## inv will be NULL when cacheSolve is executed for the first time.
## for all subsquent runs, the control will come to this point.
data <- msolve$get() ## this statement will store the matrix returned by get() into data
inv <- solve(data, ...) ## Now THIS is where the actual inversion of the matrix takes place. solve(<matrix>) is used to inverse a matrix.
msolve$setsolve(inv) ## setsolve is set to the value of inv.
inv ## inversed matrix is returned to the console.
}
|
02c4e08da4305181092892ac59ff6decb4ac0200 | 973cf626ce20a6facdff3e08bc7f5b7f7553faa8 | /BioStats2/q1.R | 80caeaae5d13e88be344556495b85b4890703a0f | [] | no_license | RobertCPhillips/CourseraJohnHopkinsDataScience | c9b3ca59f208e9cc0c24e40674eeffd01b329824 | e0fc46b404d76e1aa689d85b72bd7c13dea95c90 | refs/heads/master | 2021-01-23T12:17:16.871163 | 2016-01-02T15:33:38 | 2016-01-02T15:33:38 | 22,586,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,944 | r | q1.R | #--------------------------------------------------------------------------------
#1 - one sided test
#--------------------------------------------------------------------------------
n <- 100
x <- 12
s <- 4
a <- .05
za <- qnorm(1-a)
se <- s/sqrt(n)
rr <- x - za*se
#--------------------------------------------------------------------------------
#2 - paired and unpaired
#--------------------------------------------------------------------------------
n <- 5
x1 <- c(140,138,150,148,135)
x2 <- c(138,136,148,146,133)
#paired
d <- x2-x1
d_bar <- mean(d)
d_sd <- sd(d)
se <- d_sd/sqrt(n)
#ts <- d_bar/se # se=0, implies inf?
#unpaired
n1 <- n2 <- n
x1b <- mean(x1)
s1 <- sd(x1)
x2b <- mean(x2)
s2 <- sd(x2)
d <- x1b - x2b
sp <- sqrt(((n1 - 1) * s1^2 + (n2 - 1) * s2^2)/(n1 + n2 - 2))
se <- sp*sqrt(1/n1 + 1/n2)
t <- d/se
ta <- qt(.025,df=n1+n2-2)
r <- t < ta
#--------------------------------------------------------------------------------
#4 - one sided t test, paired sample, test that new has lower times
#5 - 95% ci for ratio of waiting times
#--------------------------------------------------------------------------------
n <- 5
#log hours
new <- c(0.929, -1.745, 1.677, 0.701, 0.128)
old <- c(2.233, -2.513, 1.204, 1.938, 2.533)
d <- new - old
d_bar <- mean(d)
d_sd <- sd(d)
se <- d_sd/sqrt(n)
t <- d_bar/se
p <- pt(t, df=n-1, lower.tail=T)
new2 <- exp(new)
old2 <- exp(old)
ratio <- new2/old2
ratio.ln <- log(ratio)
ci <- mean(ratio.ln) + c(-1,1)*qt(.95,df=n-1)*sd(ratio.ln)/sqrt(n)
ci.a <- exp(ci)
#--------------------------------------------------------------------------------
#6 - 2 groups independent, 2 sided test, followup - baseline
#--------------------------------------------------------------------------------
n1 <- n2 <- 9
xdt <- -3
xdp <- 1
st <- 1.5
sp <- 1.8
sp <- sqrt(((n1 - 1) * st^2 + (n2 - 1) * sp^2)/(n1 + n2 - 2))
se <- sp*sqrt(1/n1 + 1/n2)
t <- (xdt - xdp)/se
p <- pt(t,df=n1+n2-2) * 2
|
893cf762af4966bf72daaa335f55c4d56d434633 | 6cbb51fe996e65a51a8d9f2f35e3159721933f25 | /inst/shiny/ui_10_1_TSCAN.R | 4370cb3aeb63f9be6ee6f1dcad91ca184a217d41 | [
"MIT"
] | permissive | compbiomed/singleCellTK | 927fb97e257ba89cddee9a90f9cb7cb375a5c6fb | 990e89e7ccfbf663f23c793454f72fb8c6878a32 | refs/heads/master | 2023-08-11T09:17:41.232437 | 2023-07-26T20:43:47 | 2023-07-26T20:43:47 | 68,756,293 | 144 | 89 | NOASSERTION | 2023-09-06T18:22:08 | 2016-09-20T21:50:24 | R | UTF-8 | R | false | false | 17,054 | r | ui_10_1_TSCAN.R | # User Interface for TSCAN ---
shinyPanelTSCAN <- fluidPage(
# Dropdown closing script ####
tags$script("Shiny.addCustomMessageHandler('close_dropDownTSCAN', function(x){
$('html').click();
});"),
tags$script("Shiny.addCustomMessageHandler('close_dropDownTscanDE', function(x){
$('html').click();
});"),
tags$script("Shiny.addCustomMessageHandler('close_dropDownTscanClusterDEG', function(x){
$('html').click();
});"),
h1("Trajectory Analysis - TSCAN"),
h5(tags$a(href = paste0(docs.artPath, "trajectoryAnalysis.html"),
"(help)", target = "_blank")),
inlineCSS(list(".panel-danger>.panel-heading" = "background-color:#dcdcdc; color:#000000",
".panel-primary>.panel-heading" = "background-color:#f5f5f5; color:#000000; border-color:#dddddd",
".panel-primary" = "border-color:#dddddd;",
".panel-primary>.panel-heading+.panel-collapse>.panel-body" = "border-color:#dddddd;")),
bsCollapse(
id = "TSCANUI",
open = "Calculate Pseudotime Values",
bsCollapsePanel(
# Collapse 1, Get MST ####
"Calculate Pseudotime Values",
fluidRow(
column(
4,
panel(
selectInput("TSCANReddim", "Select input dimension reduction:", currreddim),
selectInput("TSCANclusterName", "Select clustering result: ",
"Auto generate clusters", selected = NULL),
conditionalPanel(
condition = 'input.TSCANclusterName == "Auto generate clusters"',
numericInput(inputId = "seed_TSCAN",
label = "Seed value for reproducibility of result:",
value = 12345,
step = 1)
),
actionButton("TSCANRun", "Run")
)
),
column(
8,
panel(
fluidRow(
column(
width = 3,
dropdown(
fluidRow(
column(
12,
fluidRow(actionBttn(inputId = "closeDropDownTSCAN",
label = NULL, style = "simple",
color = "danger", icon = icon("times"),
size = "xs"),
align = "right"),
selectInput("TSCANVisRedDim", "Select 2D embedding for visualization:", currreddim),
actionBttn(
inputId = "TSCANPlot",
label = "Update",
style = "bordered",
color = "primary",
size = "sm"
)
)
),
inputId = "dropDownTSCAN",
icon = icon("cog"),
status = "primary",
circle = FALSE,
inline = TRUE
),
),
column(
width = 9,
fluidRow(
column(
width = 12,
h6(
"A scatter plot of the selected low-dimensionality representation of the dataset will be generated, with the calculated pseudotime colored on each dot (cell). The MST is also projected to the cells."
)
),
align="center"
)
)
),
hr(),
shinyjqui::jqui_resizable(plotOutput("TSCANPlot"))
)
)
),
style = "primary"
),
bsCollapsePanel(
# Collapse 2, DEG along selected path ####
"Identify Genes Differentially Expressed For Path",
fluidRow(
column(
4,
panel(
selectizeInput(
inputId = "TSCANassayselect",
label = "Select input matrix:",
choices = NULL,
selected = NULL,
multiple = FALSE,
options = NULL),
pickerInput("pathIndexx", "Select path terminal node:",
choices = "", multiple = FALSE),
pickerInput("discardCluster",
"Select cluster(s) to discard (OPTIONAL):",
choices = NULL,
selected = NULL,
multiple = TRUE,
options = list(
`none-selected-text` = "No cluster discarded"
)),
actionButton("runTSCANDEG", "Run")
)
),
column(
8,
panel(
fluidRow(
column(
width = 3,
dropdown(
fluidRow(
column(
12,
fluidRow(
actionBttn(inputId = "closeDropDownTscanDE",
label = NULL, style = "simple",
color = "danger", icon = icon("times"),
size = "xs"),
align = "right"
),
selectInput("tscanDEexpPathIndex",
"Select path terminal node:",
choices = "", multiple = FALSE),
numericInput(inputId = "tscanDEHMTopGenes",
label = "Number of top features for heatmap",
value = 30,
step = 1),
numericInput(inputId = "tscanDERegTopGenes",
label = "Number of top features for regulation plots",
value = 10,
step = 1),
selectInput("tscanDEFeatureDisplay",
"Display ID Type",
c("Rownames (Default)",
featureChoice)),
actionBttn(
inputId = "tscanDEPlot",
label = "Update",
style = "bordered",
color = "primary",
size = "sm"
)
)
),
inputId = "dropDownTscanDE",
icon = icon("cog"),
status = "primary",
circle = FALSE,
inline = TRUE
)
),
column(
width = 9,
fluidRow(
column(
width = 12,
h6(
"Visualization on top genes that have significant expression changes along the pseudotime path of insterest."
)
),
align = "center"
)
)
),
hr(),
tabsetPanel(
tabPanel(
# Tab 2.1, DEG Heatmap ####
"Heatmap",
panel(
fluidRow(
column(
width = 12,
h6(
"A heatmap of the expression of the top DE genes along the path in the cells on the path. "
)
),
),
hr(),
shinyjqui::jqui_resizable(
plotOutput(outputId = "heatmapPlot")
)
)
),
tabPanel(
# Tab 2.2, Gene expression increasing along pseudotime ####
"Up-regulated Genes",
panel(
fluidRow(
column(
width = 12,
h6(
"A cell scatter plot showing the expression change along the pseudotime. Genes with top significance in increasing expression along the pseudotime are displayed."
)
),
align="center"
),
hr(),
shinyjqui::jqui_resizable(
plotOutput(outputId = "UpregGenesPlot")
)
)
),
tabPanel(
# Tab 2.3, Gene expression decreasing along pseudotime ####
"Down-regulated Genes",
panel(
fluidRow(
column(
width = 12,
h6(
"A cell scatter plot showing the expression change along the pseudotime. Genes with top significance in decreasing expression along the pseudotime are displayed."
)
),
align="center"
),
hr(),
shinyjqui::jqui_resizable(
plotOutput(outputId = "DownregGenesPlot")
)
)
)
)
)
)
),
style = "primary"
),
bsCollapsePanel(
# Collapse 3, DEG between branches of selected cluster ####
"Identify Genes Differentially Expressed For Branched Cluster",
fluidRow(
column(
4,
panel(
selectInput("TSCANUseCluster",
"Select branched cluster of interest:",
choices = NULL),
selectizeInput(
inputId = "TSCANBranchAssaySelect",
label = "Select input matrix:",
choices = NULL,
selected = NULL,
multiple = FALSE,
options = NULL),
numericInput(inputId = "fdrThreshold_TSCAN",
label = "FDR less than:",
value = 0.05,
step = 0.01),
actionButton("findDEGenes", "Run")
)
),
column(
8,
panel(
fluidRow(
column(
width = 3,
dropdown(
fluidRow(
actionBttn(inputId = "closeDropDownTscanClusterDEG",
label = NULL, style = "simple",
color = "danger", icon = icon("times"),
size = "xs"),
align = "right"
),
selectInput("plotTSCANClusterDEG_useCluster",
"Select branched cluster of interest:",
choices = "", multiple = FALSE),
pickerInput("plotTSCANClusterDEG_pathIndex",
"Select Path Index:",
choices = NULL,
choicesOpt = NULL,
selected = NULL),
selectInput("plotTSCANClusterDEG_useReducedDim",
"Select 2D embedding for visualization:",
currreddim),
numericInput("plotTSCANClusterDEG_topN",
label = "Number of top features to plot:",
value = 4, min = 1, step = 1),
selectInput("plotTSCANClusterDEG_featureDisplay",
"Display ID Type",
c("Rownames (Default)",
featureChoice)),
actionBttn(
inputId = "plotTSCANClusterDEG",
label = "Update",
style = "bordered",
color = "primary",
size = "sm"
),
inputId = "dropDownTscanClusterDEG",
icon = icon("cog"),
status = "primary",
circle = FALSE,
inline = TRUE
)
),
column(
width = 9,
fluidRow(
column(
width = 12,
h6(
"Visualization and tables of top DE genes on different branch path of the cluster of interest."
)
),
align="center"
)
)
),
hr(),
tabsetPanel(
tabPanel(
# Tab 3.1, feature cluster expression scatter ####
"Top Feature Plot",
panel(
fluidRow(
column(
width = 12,
h6(
"Scatter plots on the selected low-dimension representation of cells in the selected cluster, colored by the expression of top features differentially expressed in the selected branch path. Local MST overlaid."
)
),
align="center"
),
hr(),
shinyjqui::jqui_resizable(
plotOutput(outputId = "tscanCLusterDEG")
)
)
),
tabPanel(
# Tab 3.2, Datatable ####
"Top Feature Table",
panel(
fluidRow(
column(
width = 12,
h6(
"A table of top features differentially expressed in the selected branch path of the selected cluster, with statistical metrics displayed."
)
),
align="center"
),
hr(),
DT::dataTableOutput("tscanCLusterDEGTable")
)
),
tabPanel(
# Tab 3.3, scatter plots of pseudotime ####
"Pseudotime",
panel(
fluidRow(
column(
width = 12,
h6(
"Scatter plots on the selected low-dimension representation of cells in the selected cluster, colored by the recomputed pseudotime value on each of the branching path. Local MST overlaid."
)
),
align="center"
),
hr(),
shinyjqui::jqui_resizable(
plotOutput(outputId = "tscanCLusterPeudo")
)
)
)
)
)
)
),
style = "primary"
),
bsCollapsePanel(
# Collanpse 4, free plot ####
"Plot feature expression on trajectory",
fluidRow(
column(
4,
panel(
selectizeInput(
inputId = "plotTSCANDimReduceFeatures_useAssay",
label = "Select expression matrix:",
choices = NULL,
selected = NULL,
multiple = FALSE,
options = NULL),
selectizeInput("plotTSCANDimReduceFeatures_features",
label = "Select feature(s):", NULL,
multiple = TRUE),
selectInput("plotTSCANDimReduceFeatures_useReducedDim",
"Select 2D embedding for visualization:", currreddim),
pickerInput("plotTSCANDimReduceFeatures_useCluster",
"Show cluster(s) of interest:",
choices = NULL,
selected = NULL,
multiple = TRUE,
options = list(
`none-selected-text` = "Show all"
)),
selectInput("plotTSCANDimReduceFeatures_featureDisplay",
"Display ID Type",
c("Rownames (Default)",
featureChoice)),
actionButton("plotTSCANDimReduceFeatures", "Plot")
)
),
column(
8,
panel(
fluidRow(
column(
width = 12,
h6(
"Scatter plots on the selected low-dimension representation of cells in the selected cluster, colored by the expression of selected features, with the MST overlaid."
)
),
align="center"
),
hr(),
shinyjqui::jqui_resizable(
plotOutput("TscanDimReduceFeatures")
)
)
)
),
style = "primary"
)
),
nonLinearWorkflowUI(id = "nlw-Traj")
)
|
c3392eded79bdd00c2876539ebb91be7ffb79aa6 | 9772d0d606d8cd06284d4dbc85ce8a6110aef08c | /R/residuals_multiflashlight.R | 3af5ff4f7c2ec01f96c949a131662c6fa063816c | [] | no_license | cran/flashlight | abeb64b4ed5efc2ecdbd0ec5f6cfac7619161838 | ffbc6ad57eab440ae6b7f5702ca16d43f534077a | refs/heads/master | 2023-05-24T21:33:00.607373 | 2023-05-10T01:40:06 | 2023-05-10T01:40:06 | 209,293,489 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 897 | r | residuals_multiflashlight.R | #' Residuals for multiflashlight
#'
#' Residuals method for an object of class "multiflashlight".
#' Pass additional elements to update the multiflashlight before calculation of
#' residuals.
#'
#' @param object An object of class "multiflashlight".
#' @param ... Arguments used to update the multiflashlight before
#' calculating the residuals.
#' @returns A named list with residuals per flashlight.
#' @export
#' @examples
#' fit_part <- lm(Sepal.Length ~ Petal.Length, data = iris)
#' fit_full <- lm(Sepal.Length ~ ., data = iris)
#' mod_full <- flashlight(model = fit_full, label = "full")
#' mod_part <- flashlight(model = fit_part, label = "part")
#' mods <- multiflashlight(list(mod_full, mod_part), data = iris, y = "Sepal.Length")
#' residuals(mods, data = head(iris))
residuals.multiflashlight <- function(object, ...) {
lapply(object, stats::residuals, ...)
}
|
93eb767ed7fed79baac08c48eb3053454f6c4bbe | 4cdeb4d6f2c0574f73f958a32a25263b63e568b3 | /src/diagnose/covid_deck/MAIN_fit_ELG.R | 7037dc62347259a5b517190c3e97c84b0dd872bb | [] | no_license | jiachengwang2019/tsforecast_val | 5a136a730bdad0a8e25fcbc181aa2a44c3cab868 | e1ee5b14554502e4bd3fc7da144c6c6038f97767 | refs/heads/master | 2022-11-05T20:32:05.215093 | 2020-06-29T13:28:51 | 2020-06-29T13:28:51 | 273,545,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,261 | r | MAIN_fit_ELG.R | library(here)
library(RMySQL)
library(dplyr)
source(here::here("src","common_codes","parameters_CENT.R"))
source(here::here("src","common_codes","data_prep_functions_v2.R"))
source(here::here("src","common_codes","forecasting_functions.R"))
source(here::here("src","common_codes","model_evaluation.R"))
source(here::here("src","common_codes","plotting_functions.R"))
options(warn = -1,digits = 3,verbose = F,error = NULL)
#::::::::::::::::::::::::::::: READ THE MODEL DETAILS
prev_ver = '0'
refresh = 'LRP'
add_LS = F
str_ = 'L_Imps'
net_type = 'Cable'
net_list <- list('USA', 'BRVO', 'ENT', 'SYFY', 'OXYG', 'NBC')
net_ = 'USA'
# set-up spark connection
SparkR::sparkR.session()
sc <- sparklyr::spark_connect(method = "databricks")
gen_weekly_group_by_cols = gen_weekly_group_by_cols[! gen_weekly_group_by_cols %in% c('SL_NT_Daypart')]
#connection <- dbConnect(MySQL(), user = 'forecast_master', password = 'Line#rForec#st#1221',
# host = 'linear-forecasting-prod.cqwbwa84updc.us-east-1.rds.amazonaws.com', dbname = 'linear_forecasting_outputs')
#model_details <- DBI::dbGetQuery(connection,"SELECT * FROM LRPmodelDetails WHERE current_prod = 1")
stringsAsFactors = FALSE
model_details <- read.csv(here::here('src', 'diagnose', "covid_deck", 'new_mdt_week.csv'))
model_details = model_details %>% filter(network == net_)
network = net_
output_cols <- c("NETWORK","DEMO","LAST_UPDATE","LATEST_BASE_DATA","FORECAST_DATA",
"TIME_ID","BROADCAST_YEAR","CABLE_QTR","QTRYEAR","HH_NT_DAYPART","STREAM","ACTUAL","PREDICT",
"LAST_ACTUAL", 'BROADCAST_WEEK', "BROADCAST_DATE", 'L_DUR') #, #"MODEL_FINAL_VERSION",
#inputpath = paste0('s3://nbcu-ds-linear-forecasting/processed/pacing/20W029/', net_ ,'_AFL_0420_200423_20W029.csv')
inputpath = paste0('s3://nbcu-ds-linear-forecasting/processed/pacing/20W030/', net_ ,'_AFL_0420_200424_20W030.csv')
raw_data_AFL <- create_raw_data(inputFilePath = inputpath,
network = net_type,
group_by_cols = 'default')
#raw_data_AFL$raw_data$L_Imps <- raw_data_AFL$raw_data$L_Imps /2
#raw_data_AFL$raw_data$L_Dur <- raw_data_AFL$raw_data$L_Dur / 2
test <- raw_data_AFL$raw_data %>% filter(HH_NT_Daypart == 'SalesPrime')
tail(test$Date[!is.na(test$L_Imps)],1)
add_LS = T
OOS_start = as.Date('2020-04-17') #as.Date('2020-02-01') #
end_LS = as.Date('2020-04-30') #as.Date('2020-06-30') #as.Date('2020-04-30')
{
all_outputs = list()
results = list()
# caseid=1
for (caseid in 1:nrow(model_details)){
data_model <- model_details[caseid,]
all_outputs[[caseid]] = data.frame(matrix(nrow = 0,ncol = length(output_cols)))
names(all_outputs[[caseid]]) = output_cols
data_model$filter_hh_nt_daypart <- as.character(data_model$filter_hh_nt_daypart)
data_model$filter_show_name <- as.character(data_model$filter_show_name)
data_model$changepoints <- as.character(data_model$changepoints)
data_model$regressors <- as.character(data_model$regressors)
if (data_model$filter_show_name != "") filter_sh = data_model$filter_show_name else filter_sh = NA
aggdata <- create_aggregated_data (raw_data_AFL ,
interactive_input = F,
filter_sl_nt_daypart = NA,
filter_hh_nt_daypart = data_model$filter_hh_nt_daypart ,
filter_program_type = NA,
filter_show_name = filter_sh,
filter_date = NA,
filter_half_hr = NA,
filter_cols = NA,
network = net_type,
time_id = data_model$time_id,
agg_group_by_cols = 'default')
case_data <- aggdata$aggregated_data
case_data = case_data[order(case_data$Week),]
#insample_end <- max(subset(case_data,Source == "Nielsen")$Date)
insample_end <- as.Date('2020-03-31')
regtxt <- gsub(pattern = " ",replacement = "",x = data_model$regressors)
regset <- unlist(strsplit(regtxt,split = ","))
regset <- regset[!(regset %in% c("Labor_Day_ind" ))]
# get the arima and seasonal orders
arima_pdq <- as.numeric(data_model[,c("arima_p","arima_d","arima_q")])
arima_seasonal <- list(order = as.numeric(data_model[,c("seasonal_p","seasonal_d","seasonal_q")]),
period = as.numeric(data_model$arima_period))
# get the changepoint dates
if (is.na(data_model$changepoints) == T | str_length(data_model$changepoints) < 2){
cp_dates <- NA
} else {
if (grepl("/",data_model$changepoints) == T){
date_parts = unlist(strsplit(data_model$changepoints,split = "/"))
if (str_length(date_parts[3]) == 2) date_parts[3] = paste(c("20",date_parts[3]),collapse = "")
cp_dates <- as.Date(paste(date_parts[c(3,1,2)],collapse = "-"))
} else if (grepl("-",data_model$changepoints) == T){
cp_dates <- as.Date(unlist(strsplit(data_model$changepoints,",")))
}
}
case_data <- case_data %>%
mutate(
YoY = Broadcast_Year - 2013,
LS_Jan_2015 = ifelse(Week >= as.Date('2015-01-01'), 1,0),
LS_Q2_2015 = ifelse(Week >= as.Date('2015-03-30'), 1, 0),
LS_Jan_2017 = ifelse( Week >= as.Date('2017-01-01'), 1, 0),
Rebrand_LS_2017 = ifelse(Week >= as.Date('2017-10-01'), 1 ,0),
LS_Sep_2018 = ifelse(Week > as.Date('2018-09-24'), 1, 0 ),
#LS_Cov = ifelse(Week >= as.Date('2020-03-16') & Week <= end_LS, 1, 0)
LS_Cov = case_when(
Week < as.Date('2020-03-16') ~ 0,
Week == as.Date('2020-03-16') ~ 0.14,
Week == as.Date('2020-03-23') ~ 0.77,
Week > as.Date('2020-03-23') & Week <= end_LS ~ 0.95,
Week > end_LS ~ 0
)
)
#plot(case_data$LS_Cov)
if (add_LS == T) regset <- c(regset, 'LS_Cov')
# get the results from fitting the best ARIMA model
results[[caseid]] <- fit_champion_arima(data = case_data,
stream = str_, #data_model$stream,
agg_timescale = data_model$time_id,
log_transformation = as.numeric(data_model$log_transformation),
boxcox = 'auto', #data_model$boxcox,
OOS_start = OOS_start,
regressors = regset,
changepoint_dates = cp_dates,
ARIMA_order = arima_pdq,
ARIMA_seasonal = arima_seasonal)
champ = results[[caseid]]$champion_result
all_outputs[[caseid]] = champ %>%
mutate(
LAST_UPDATE = Sys.Date(), # format(Sys.time(),tz = "America/New_York",usetz = T),
LAST_ACTUAL = insample_end,
QTRYEAR = paste0(Broadcast_Year, "-", Cable_Qtr),
MODEL_TYPE = data_model$model_type,
MODEL_FINAL_VERSION = data_model$model_final_version,
FORECAST_DATA = data_model$forecast_data,
LATEST_BASE_DATA = data_model$latest_base_data,
TIME_ID = data_model$time_id,
STREAM = str_ , # data_model$stream,
HH_NT_Daypart = ifelse(rep("HH_NT_Daypart",nrow(champ)) %in% names(champ),HH_NT_Daypart,NA)
) %>%
rename(
BROADCAST_WEEK = Week,
L_DUR = L_Dur
)
if (data_model$time_id == 'Date') {
all_outputs[[caseid]] = all_outputs[[caseid]] %>%
rename(
BROADCAST_DATE = Date
)
}
if (data_model$time_id == 'Week'){
all_outputs[[caseid]] = all_outputs[[caseid]] %>%
mutate(
BROADCAST_DATE = BROADCAST_WEEK
)
}
names(all_outputs[[caseid]])[which(names(all_outputs[[caseid]]) == data_model$stream)] = "ACTUAL"
all_outputs[[caseid]] = setNames(all_outputs[[caseid]], toupper(names(all_outputs[[caseid]])))
}
full_output <- do.call(rbind,lapply(all_outputs,function(x) return(x[,output_cols])))
#lapply(all_outputs,function(x) return(print( setdiff(output_cols,colnames(x)) )))
full_output$HH_NT_DAYPART[which(full_output$NETWORK == 'USA' & full_output$HH_NT_DAYPART == 'LateNight')] = 'SalesPrime'
namesave = 0
if (add_LS == T){
namesave = paste0(net_,'_lrp_LS',end_LS, '.csv')
full_output <- full_output %>%
rename(PREDICT_LS = PREDICT)
} else if (OOS_start == as.Date('2020-04-17')) {
namesave = paste0(net_,'_lrp_OOS.csv')
} else if (OOS_start == as.Date('2020-02-01')) {
namesave = paste0(net_,'_lrp.csv')
}
print(namesave)
#write.csv(get_table(result$champion_result), here::here('src', 'diagnose', 'lrp_results', paste0('results_', net_, '.csv')))
write.csv(full_output, here::here('src', 'diagnose', 'covid_deck','results', namesave))
}
|
43b7efda3cb8c4a58820c7c135051d7ff6d44f84 | 873394d6cd11b544d602a7038c6e4890073727c7 | /Lab_10.R | 10797938e99ac60ea43364b0f920354a548577d1 | [] | no_license | michaelchoie/Introduction-to-Statistical-Learning | bb44c1c48155034da0ed99af776bd0f7cb17a2c6 | fb52ee1124bfe7c0267727790a9aec4cbe26c9a8 | refs/heads/master | 2021-08-19T23:43:46.172749 | 2017-11-27T17:55:50 | 2017-11-27T17:55:50 | 106,619,417 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,053 | r | Lab_10.R | ########################################################################################
# LAB 10.4 Principal Component Analysis
########################################################################################
USArrests <- data.frame(USArrests)
summary(USArrests)
apply(USArrests, 2, var)
# Must standardize variables otherwise variable with highest mean/variance skews results
# Use prcomp() to perform PCA
# By default, makes mean = 0, but scale = T makes SD = 1
pr.out <- prcomp(USArrests, scale = T)
# center = mean prior to standardization
# scale = SD prior to standardization
# rotation = principal component loadings matrix (called rotation because if you multiply X by rotation matrix,
# you get the coordinates of the rotated coordinate system)
# x = matrix with principal component scores as columns
pr.out
dim(pr.out$x)
# scale = 0 ensures arrows are scaled to represent the loadings
# sdev = standard deviation of principal components
# use sdev to get proportion of variance explained
# cumsum() gives cumulative sum of the elements a numeric vector
biplot(pr.out, scale = 0)
pr.out$rotation <- -pr.out$rotation
pr.out$x <- pr.out$x
biplot(pr.out, scale = 0)
pr.out$sdev
pr.var <- pr.out$sdev^2
pve <- pr.var / sum(pr.var)
pve
plot(pve, xlab = "Principal Component", ylab = "Proportion of Variance Explained",
ylim = c(0, 1), type = "b", pch = 15)
lines(cumsum(pve), type = "b", pch = 15, col = "blue", lty = 2)
legend(2.0, 0.8, legend = c("Proportion of Variance Explained", "Cumulative Proportion of Variance Explained"),
col = c("black", "blue"), lty = 1:2, cex = 0.8)
########################################################################################
# LAB 10.5.1 K-Means Clustering
########################################################################################
# Create sample data with distinct 2 clusters
set.seed(3)
x <- matrix(rnorm(50*2), ncol = 2)
x[1:25, 1] <- x[1:25, 1] + 3
x[1:25, 2] <- x[1:25, 2] - 4
# Use kmeans() to perform the clustering algorithm
# cluster assignments stored in km.out$cluster
# nstart = how many random assignment sets that K-means performs on (recommended 20 or 50)
# tot.withinss = total within cluster sum of squares
# withinss = individual within cluster sum of squares
km.out <- kmeans(x, 2, nstart = 20)
km.out$cluster
plot(x, col = (km.out$cluster + 1), main = "K-Means Clustering Results with K = 2",
xlab = "", ylab = "", pch = 20, cex = 2)
set.seed(2)
km.out <- kmeans(x, 3, nstart = 20)
km.out
set.seed(4)
km.out <- kmeans(x, 3, nstart = 1)
km.out$tot.withinss
km.out <- kmeans(x, 3, nstart = 20)
km.out$tot.withinss
########################################################################################
# LAB 10.5.2 Hierarchical Clustering
########################################################################################
# hclust() performs hierarchical clustering algo
# dist() computes inter-observation Euclidean distance matrix
# method = "linkage" [i.e complete, average, single]
# height represents of fusion represents how different two observations are
hc.complete <- hclust(dist(x), method = "complete")
hc.average <- hclust(dist(x), method = "average")
hc.single <- hclust(dist(x), method = "single")
par(mfrow = c(1,3))
plot(hc.complete, main = "Complete Linkage", xlab = "", sub = "", cex = .9)
plot(hc.average, main = "Average Linkage", xlab = "", sub = "", cex = .9)
plot(hc.single, main = "Single Linkage", xlab = "", sub = "", cex = .9)
# cutree() to see cluster labels given a cut of the dendrogram
cutree(hc.complete, 2)
cutree(hc.average, 2)
cutree(hc.single, 2)
cutree(hc.single, 4)
# scale() to scale features prior to clustering
xsc <- scale(x)
plot(hclust(dist(xsc), method = "complete"), main = "Hierarchical Clustering with Scaled Features")
# as.dist() to convert square symmetric matrix into form hclust() recognizes as distance matrix (for correlation distance)
# data must have >= 3 features since absolute correlation b/w any 2 observations w/ measurements on 2 features = 1
# (i.e, two points on a plane, is there linear relationship? (can there be a line drawn to connect them?)
# ofc! thus correlation = +1/-1))
# t() = transpose
x <- matrix(rnorm(30 * 3), ncol = 3)
dd <- as.dist(1 - cor(t(x)))
plot(hclust(dd, method="complete"), main="Complete Linkage with Correlation-Based Distance", xlab="", sub="")
########################################################################################
# LAB 10.6.1 PCA on NCI60 Data
########################################################################################
library(ISLR)
nci.labs <- NCI60$labs
nci.data <- NCI60$data
dim(nci.data)
nci.labs[1:4]
table(nci.labs)
pr.out <- prcomp(nci.data, scale = T)
# rainbow() creates vector with continguous colors
Cols <- function(vec) {
col <- rainbow(length(unique(vec)))
return(col[as.numeric(as.factor(vec))])
}
# Plot points of principal components colored by cancer types
par(mfrow = c(1,2))
plot(pr.out$x[, 1:2], col = Cols(nci.labs), pch = 19, xlab = "Z1", ylab = "Z2")
plot(pr.out$x[, c(1,3)], col = Cols(nci.labs), pch = 19, xlab = "Z1", ylab = "Z2")
summary(pr.out)
# Plot proportion of variance explained
plot(pr.out)
# Better to plot proportion of variance and cumulative proportion of variance for each principal component
# This is called a skree plot
# Elbow at around 7th principal component - suggests little benefit examining more than that
pve <- pr.out$sdev^2/sum(pr.out$sdev^2)
par(mfrow = c(1,2))
plot(pve, type = "o", ylab = "PVE", xlab = "Principal Component", col = "blue")
plot(cumsum(pve), type = "o", ylab = "Cumulative PVE", xlab = "Principal Component", col = "brown3")
# PVE explained directly via summary(pr.out)$importance[2,]
# Cumulative PVE explained via summary(pr.out$importance[3,])
########################################################################################
# LAB 10.6.2 Clustering on Observations of NCI60 Data
########################################################################################
sd.data <- scale(nci.data)
par(mfrow = c(1,3))
data.dist <- dist(sd.data)
plot(hclust(data.dist), labels = nci.labs, main = "Complete Linkage", xlab = "", sub = "", ylab = "")
plot(hclust(data.dist, method = "average"), main = "Average Linkage", xlab = "", sub = "", ylab = "")
plot(hclust(data.dist, method = "single"), main = "Single Linkage", xlab = "", sub = "", ylab = "")
hc.out <- hclust(dist(sd.data))
hc.clusters <- cutree(hc.out, 4)
table(hc.clusters, nci.labs)
par(mfrow = c(1,1))
plot(hc.out, labels = nci.labs)
abline(h = 139, col = "red")
# How does hierarchical clustering compare to k-means when K=4?
set.seed(2)
km.out <- kmeans(sd.data, 4, nstart = 20)
km.clusters <- km.out$cluster
table(km.clusters, hc.clusters)
# Perform hierarchical clustering only on first few principal components
hc.out <- hclust(dist(pr.out$x[, 1:5]))
plot(hc.out, labels = nci.labs, main = "Hierarchical Clustering on First Five Score Vectors")
table(cutree(hc.out, 4), nci.labs)
|
905594a04aa71f193c6ae84a0dffad424e83d047 | 1d176135777b85272c99ac0b9edf3a41f5a72a29 | /Figure_6/compare_strains/read_phaster_results.r | 5f849d84270c23a60e96908ac778cdb66d1cb3de | [] | no_license | paraslonic/GCBPaperCode | 429a0528b1a78fbdbb35946bc789972d822433f7 | 7760cd72d4ea480f5b1b5233c2fb3a312ceaa587 | refs/heads/master | 2022-06-21T02:10:20.169991 | 2020-06-18T10:35:45 | 2020-06-18T10:35:45 | 224,661,526 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,308 | r | read_phaster_results.r | phaster.o157 <- read.delim("phaster/summary_o157.txt", head = T, sep = "\t")
phaster.o157 <- strsplit(as.character(phaster.o157$REGION_POSITION), split = '-')
phaster.o157 <- as.data.frame(phaster.o157)
phaster.o157 <- apply(phaster.o157, c(1,2), as.integer)
phaster.iai1 <- read.delim("phaster/summary_iai1.txt", head = T, sep = "\t")
phaster.iai1 <- strsplit(as.character(phaster.iai1$REGION_POSITION), split = '-')
phaster.iai1 <- as.data.frame(phaster.iai1)
phaster.iai1 <- apply(phaster.iai1, c(1,2), as.integer)
phaster.k12 <- read.delim("phaster/summary_k12.txt", head = T, sep = "\t")
phaster.k12 <- strsplit(as.character(phaster.k12$REGION_POSITION), split = '-')
phaster.k12 <- as.data.frame(phaster.k12)
phaster.k12 <- apply(phaster.k12, c(1,2), as.integer)
phaster.lf82 <- read.delim("phaster/summary_lf82.txt", head = T, sep = "\t")
phaster.lf82 <- strsplit(as.character(phaster.lf82$REGION_POSITION), split = '-')
phaster.lf82 <- as.data.frame(phaster.lf82)
phaster.lf82 <- apply(phaster.lf82, c(1,2), as.integer)
phaster.umn026 <- read.delim("phaster/summary_umn026.txt", head = T, sep = "\t")
phaster.umn026 <- strsplit(as.character(phaster.umn026$REGION_POSITION), split = '-')
phaster.umn026 <- as.data.frame(phaster.umn026)
phaster.umn026 <- apply(phaster.umn026, c(1,2), as.integer)
|
f0f4135a5e3738eaf962e4627a03845ba4e82a9b | 5226f717b17677c2de63188482b5424e9666112d | /man/Moran.I.calc.Rd | 8e84785cc7333436762b2c25be49fe4e36c1ba54 | [] | no_license | ballengerj/FishyR | 04c7fa0934f1979347d699bd002d9a7dc7dbda80 | 9c33bb8f491c4ec481ef0b60988deb5c94d292f4 | refs/heads/master | 2022-06-17T15:24:25.366847 | 2022-06-08T21:52:55 | 2022-06-08T21:52:55 | 54,901,542 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 810 | rd | Moran.I.calc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_correlation_functions.R
\name{Moran.I.calc}
\alias{Moran.I.calc}
\title{Moran's I Test for Spatial Autocorrelation Among Residuals}
\usage{
Moran.I.calc(data)
}
\arguments{
\item{data}{numeric data frame with three columns. First column should
represent the "x"-coordinates, 2nd column represents the "y"-coordinates,
and the 3rd column represents a numeric vector of model residuals}
}
\value{
A list containing the elements \code{observed}, \code{expected},
\code{sd}, & \code{p.value}
}
\description{
\code{Moran.I.calc} computes Moran's I autocorrelation coefficient of *resid*
given a matrix of weights defined as the inverse of a distance matrix of
spatial coordinates.
}
\seealso{
\code{\link[ape]{Moran.I}}
}
|
d15662ae838eded5a46b01af2ce2fa342058a32c | b712e229f1cedbf19cb8e7fcfcc3d59f336429de | /Market_State_Estimation_v1.R | 59bf1bc787f914e7d562618aeb2baed4c1e5159c | [] | no_license | Ani-07/Portfolio_Optimization_with_ML | 904e9798abaabf89af1d2e7851e203a8a07ed7b9 | 3767212bf8fb33b960141ff4af790cf12edf0f3b | refs/heads/master | 2022-12-25T13:56:35.183545 | 2020-10-02T03:31:40 | 2020-10-02T03:31:40 | 297,230,111 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,632 | r | Market_State_Estimation_v1.R |
library(quantmod)
library(TTR)
library(tree)
# Obtain Stock data of Apple July 30th, 2002 and Dec 31st, 2019
RSI_data <- function(ticker){
apple <- getSymbols(ticker, from = "2002-07-30", to = "2019-12-31", auto.assign = F)
weekly.rtn <- weeklyReturn(apple, type = "arithmetic")
rsi_5 = RSI(apple[,4], n = 5)
rsi_14 = RSI(apple[,4], n = 14)
macd_5_10 = MACD(apple[,4],nFast = 5, nSlow = 10)
macd_12_26 = MACD(apple[,4],nFast = 12, nSlow = 26)
# Create a Table with RSI and weekly return data
RSI_Return = data.frame()
weekly.rtn <- weekly.rtn[-c(1,2),]
for(i in index(weekly.rtn)){
name = ticker
rtn_ind = which(index(weekly.rtn) == i)
rtn = weekly.rtn$weekly.returns[rtn_ind]
a = i-7
if (a-3 %in% index(rsi_5)){
ind = which(index(rsi_5) == a)
tmp_rsi_1 = rsi_5$rsi[ind]
tmp_rsi_2 = rsi_14$rsi[ind]
avg_rsi_1 = mean(rsi_5$rsi[(ind-3):ind])
avg_rsi_2 = mean(rsi_14$rsi[(ind-3):ind])
tmp_macd_1 = macd_5_10$macd[ind]
tmp_macd_2 = macd_12_26 $macd[ind]
}else if ((a-4) %in% index(rsi_5)){
#print("a")
a = a-1
ind = which(index(rsi_5) == a)
tmp_rsi_1 = rsi_5$rsi[ind]
tmp_rsi_2 = rsi_14$rsi[ind]
avg_rsi_1 = mean(rsi_5$rsi[(ind-3):ind])
avg_rsi_2 = mean(rsi_14$rsi[(ind-3):ind])
tmp_macd_1 = macd_5_10$macd[ind]
tmp_macd_2 = macd_12_26 $macd[ind]
#tmp_obv = apple_obv$obv[ind]
}else if ((a-2) %in% index(rsi_5)){
a = a-2
ind = which(index(rsi_5) == a)
tmp_rsi_1 = rsi_5$rsi[ind]
tmp_rsi_2 = rsi_14$rsi[ind]
avg_rsi_1 = mean(rsi_5$rsi[(ind-3):ind])
avg_rsi_2 = mean(rsi_14$rsi[(ind-3):ind])
tmp_macd_1 = macd_5_10$macd[ind]
tmp_macd_2 = macd_12_26 $macd[ind]
#tmp_obv = apple_obv$obv[ind]
}else if ((a-3) %in% index(rsi_5)){
a = a-3
ind = which(index(rsi_5) == a)
tmp_rsi_1 = rsi_5$rsi[ind]
tmp_rsi_2 = rsi_14$rsi[ind]
avg_rsi_1 = mean(rsi_5$rsi[(ind-3):ind])
avg_rsi_2 = mean(rsi_14$rsi[(ind-3):ind])
tmp_macd_1 = macd_5_10$macd[ind]
tmp_macd_2 = macd_12_26 $macd[ind]
}else{
#print("c")
tmp_rsi_1 = NA
tmp_macd_1 = NA
tmp_rsi_2 = NA
tmp_macd_2 = NA
avg_rsi_1 = NA
avg_rsi_2 = NA
#tmp_obv = 0
}
#print(avg_rsi)
RSI_Return = rbind(RSI_Return,c(i,rtn,a,tmp_rsi_1,tmp_rsi_2,tmp_macd_1,tmp_macd_2,
avg_rsi_1,avg_rsi_2))
}
colnames(RSI_Return) <- c("Rtn_Date","Return","RSI_Date","RSI_1", "RSI_2",
"MACD_1","MACD_2","Avg_RSI_1","Avg_RSI_2")
RSI_Return$Rtn_Date <- as.Date(RSI_Return$Rtn_Date)
RSI_Return$RSI_Date <- as.Date(RSI_Return$RSI_Date)
d = is.na(RSI_Return$MACD_2)
rows = which(d == 1)
RSI_Return <- RSI_Return[-rows,]
return(RSI_Return)
}
#################################################################################################
tickers = c("MSFT","GOOG","AAPL","FB","PYPL","AMZN","INTC","HPQ","DELL","SNE","IBM")
start_end = data.frame()
RSI_database <- data.frame()
for (tick in tickers){
start_t = dim(RSI_database)[1]+1
RSI_database = rbind(RSI_database,RSI_data(tick))
end_t = dim(RSI_database)[1]
start_end = rbind(start_end,list(start_t,end_t))
}
cor(RSI_database$Return,RSI_database$RSI_1)
cor(RSI_database$Return,RSI_database$RSI_2)
cor(RSI_database$Return,RSI_database$MACD_1)
cor(RSI_database$Return,RSI_database$MACD_2)
cor(RSI_database$Return,RSI_database$Avg_RSI_1)
cor(RSI_database$Return,RSI_database$Avg_RSI_2)
#########################################################################
# Create fresh table for predicting return
ML_data <- function(ticker, db,start_i,end_i){
apple <- getSymbols(ticker, from = "2002-07-30", to = "2019-12-31", auto.assign = F)
daily_ret = dailyReturn(apple[,4])
RSI_Return = db[start_i:(end_i),]
rsi_5 = RSI(apple[,4], n = 5)
rsi_14 = RSI(apple[,4], n = 14)
macd_5_10 = MACD(apple[,4],nFast = 5, nSlow = 10)
macd_12_26 = MACD(apple[,4],nFast = 12, nSlow = 26)
return_forecast = data.frame()
for (i in RSI_Return$Rtn_Date){
target = RSI_Return$Return[which(RSI_Return$Rtn_Date == i)]
print(as.Date(i))
if ((i-7) %in% index(daily_ret)){
end = which(index(daily_ret) == (i-7))
} else if ((i-8) %in% index(daily_ret)){
end = which(index(daily_ret) == (i-8))
} else if ((i-9) %in% index(daily_ret)){
end = which(index(daily_ret) == (i-9))
}
print(end)
start = (end - 5)
print(start)
week_date = index(daily_ret)[end]
avg = round(mean(daily_ret$daily.returns[start:end]),6)
var = round(var(daily_ret$daily.returns[start:end]),6)
t_1 = daily_ret$daily.returns[end]
t_2 = daily_ret$daily.returns[end-1]
t_3 = daily_ret$daily.returns[end-2]
v_1 = apple[,5][end]
v_2 = apple[,5][end-1]
v_3 = apple[,5][end-2]
v_4 = apple[,5][end-3]
v_5 = apple[,5][end-4]
tmp_rsi_1 = rsi_5$rsi[end]
tmp_rsi_2 = rsi_14$rsi[end]
avg_rsi_1 = mean(rsi_5$rsi[(end-3):end])
avg_rsi_2 = mean(rsi_14$rsi[(end-3):end])
tmp_macd_1 = macd_5_10$macd[end]
tmp_macd_2 = macd_12_26 $macd[end]
return_forecast = rbind(return_forecast,c(i,target,week_date,avg,var,t_1,t_2,t_3,
v_1,v_2,v_3,v_4,v_5,tmp_rsi_1,tmp_rsi_2,
tmp_macd_1,tmp_macd_2, avg_rsi_1,avg_rsi_2))
}
colnames(return_forecast) <- c("Rtn_Date","Target","Week_date","Average","Variance",
"t_1", "t_2","t_3","v_1","v_2","v_3","v_4","v_5","RSI_1",
"RSI_2","MACD_1","MACD_2","Avg_RSI_1","Avg_RSI_2")
return_forecast$Rtn_Date <- as.Date(return_forecast$Rtn_Date)
return(return_forecast)
}
########################################################################################
tickers = c("MSFT","GOOG","AAPL","FB","PYPL","AMZN","INTC","HPQ","DELL","SNE","IBM")
ML_database <- data.frame()
for (i in 1:length(tickers)){
tick = tickers[i]
print(tick)
start_i = start_end[i,1]
end_i = start_end[i,2]
tmp = ML_data(tick,RSI_database,start_i,end_i)
print(dim(tmp))
print(colnames(tmp))
ML_database = rbind(ML_database,tmp)
}
ML_database$Week_date <- as.Date(ML_database$Week_date)
write.csv(ML_database,"Return_Data.csv", row.names = FALSE)
|
691cf00652574baeffa74436434353992cf31a09 | 25cb70237f3350bb4b13cb7261bc6bd7593be7d7 | /hawaii_seir_model/test_hawaii/hawaii_test_EH.R | fdeeabf5a948f76b35b4ff5ddf7fb7f5577a0c8c | [] | no_license | mbutler808/Zool719-covid19 | 91de53ee3622023a256b308686f58dc514ed5613 | 65700d6a9e23f71612fa792006c09ab28d0b577c | refs/heads/main | 2023-04-18T21:29:13.810358 | 2021-05-05T08:46:03 | 2021-05-05T08:46:03 | 331,101,143 | 0 | 0 | null | 2021-05-05T08:46:04 | 2021-01-19T20:28:14 | HTML | UTF-8 | R | false | false | 14,785 | r | hawaii_test_EH.R | require(tidyverse)
require(pomp)
require(ggplot2)
#input vaccine data
vax <- read.csv("Hawaii_vaccine_data.csv", fileEncoding = "UTF-8-BOM")
COUNTY = "Honolulu"
data = read.csv("../hawaii_covid_cases.csv")
data = data %>%
select(-c(New.Positive.Tests, Total.Test.Encounters)) %>%
filter(County == COUNTY)
data <- data[-1]
data_t1 <- data[1:23,] #from start until March 23
#start to end of first lockdown
data_t1$Date <- as.Date(data_t1$Date, format = "%m/%d/%Y")
size <- dim(data_t1)[1]
data_t1$day <- c(1:size)
names(data_t1) <- c("date", "C", "day")
data_t1$C <- cumsum(data_t1$C)
ggplot(data_t1, aes(x = date, y = C)) + geom_line() +
ylab("Total Cases") + ggtitle("Daily Confirmed Cases of COVID-19 in", paste(COUNTY))
###Vax plot not showing line :/
# ggplot(vax, aes(full_vax, percent_full)) + geom_line()
# ggplot(vax, aes(x=date, y=percent_full)) + geom_line() + theme(axis.text.x = element_text(angle = 45, hjust = 1))
covid_statenames = c("S", "E", "I", "R")
covid_paramnames = c("Beta", "mu_EI", "rho", "mu_IR", "N", "eta", "k")
covid_obsnames = "C"
covid_dmeasure_t1 = "lik = dpois(C, rho*I + 1e-6, give_log);"
covid_rmeasure_t1 = "C = rnbinom(rho*I, k);"
covid_rprocess_t1 = "
double dN_SE = rbinom(S, 1-exp(-Beta*I/N*dt));
double dN_EI = rbinom(E, 1-exp(-mu_EI*dt));
double dN_IR = rbinom(I, 1-exp(-mu_IR*dt));
S -= dN_SE;
E += dN_SE - dN_EI;
I += dN_EI - dN_IR;
R += dN_IR;
"
#Beta = contact rate
#mu_EI = incubation rate
#rho = reporting rate
#mu_IR = recovery/removed rate
#k = overdispersion in the counts process
#eta = number of susceptible (estimated)
covid_rinit_t1 = "
S = 2500;
E = 1;
I = 1;
R = 0;
"
covid_t1 <- pomp(data = data_t1, times = "day", t0 = 0,
rprocess = euler(step.fun = Csnippet(covid_rprocess_t1), delta.t = 1/7),
rmeasure = Csnippet(covid_rmeasure_t1),
dmeasure = Csnippet(covid_dmeasure_t1),
partrans = parameter_trans(
log=c("Beta","mu_EI","mu_IR", "k", "rho")),
obsnames = covid_obsnames,
statenames = covid_statenames,
paramnames = covid_paramnames,
rinit = Csnippet(covid_rinit_t1)
)
#Beta = contact rate
#mu_EI = incubation rate
#rho = reporting rate
#mu_IR = recovery/removed rate
#k = overdispersion in the counts process
#eta = number of susceptible (estimated)
sims_t1 = covid_t1 %>%
simulate(params = c(Beta = 20, mu_EI = 0.15, mu_IR = .05, k = 0.4,
rho = 2, eta = 0.5, N = 150000),
nsim = 20, format = "data.frame", include = TRUE)
ggplot(sims_t1, aes(x = day, y = C, group = .id, color = .id=="data")) +
geom_line() + guides(color=FALSE)
sims_t1
ends <- grep("20",sims_t1$day)
sims_t1_end <- sims_t1[ends,]
t1_s <- round(mean(sims_t1_end$S, na.rm = T))
t1_e <- round(mean(sims_t1_end$E, na.rm = T))
t1_i <- round(mean(sims_t1_end$I, na.rm = T))
t1_r <- round(mean(sims_t1_end$R, na.rm = T))
########################
#set t1.5
# March 23, 2020 to April 23, 2020
data_t1.5 <- data[23:54,] #from start until May 31
#start to end of first lockdown
data_t1.5$Date <- as.Date(data_t1.5$Date, format = "%m/%d/%Y")
size <- dim(data_t1.5)[1]
data_t1.5$day <- c(1:size)
names(data_t1.5) <- c("date", "C", "day")
data_t1.5$C <- cumsum(data_t1.5$C)
ggplot(data_t1.5, aes(x = date, y = C)) + geom_line() +
ylab("Total Cases") + ggtitle("Daily Confirmed Cases of COVID-19 in", paste(COUNTY))
covid_statenames = c("S", "E", "I", "R")
covid_paramnames = c("Beta", "mu_EI", "rho", "mu_IR", "N", "eta", "k")
covid_obsnames = "C"
covid_dmeasure = "lik = dpois(C, rho*I + 1e-6, give_log);"
covid_rmeasure = "C = rnbinom(rho*I, k);"
covid_rprocess = "
double dN_SE = rbinom(S, 1-exp(-Beta*I/N*dt));
double dN_EI = rbinom(E, 1-exp(-mu_EI*dt));
double dN_IR = rbinom(I, 1-exp(-mu_IR*dt));
S -= dN_SE;
E += dN_SE - dN_EI;
I += dN_EI - dN_IR;
R += dN_IR;
"
#Beta = contact rate
#mu_EI = incubation rate
#rho = reporting rate
#mu_IR = recovery/removed rate
#k = overdispersion in the counts process
#eta = number of susceptible (estimated)
covid_rinit_t1.5 = "
S = 2478;
E = 11;
I = 10;
R = 4;
"
# covid_rinit_t1.5 = "
# int t1_s;
# S = &t1_s;
# E = 11;
# I = 10;
# R = 4;
# "
covid_t1.5 <- pomp(data = data_t1.5, times = "day", t0 = 0,
rprocess = euler(step.fun = Csnippet(covid_rprocess), delta.t = 1/7),
rmeasure = Csnippet(covid_rmeasure),
dmeasure = Csnippet(covid_dmeasure),
partrans = parameter_trans(
log=c("Beta","mu_EI","mu_IR", "k", "rho")),
obsnames = covid_obsnames,
statenames = covid_statenames,
paramnames = covid_paramnames,
rinit = Csnippet(covid_rinit_t1.5)
)
#Beta = contact rate
#mu_EI = incubation rate
#rho = reporting rate
#mu_IR = recovery/removed rate
#k = overdispersion in the counts process
#eta = number of susceptible (estimated)
sims_t1.5 = covid_t1.5 %>%
simulate(params = c(Beta = 15, mu_EI = 0.03, mu_IR = .2, k = 0.42,
rho = 1.2, eta = 0.3, N = 15000),
nsim = 20, format = "data.frame", include = TRUE)
ggplot(sims_t1.5, aes(x = day, y = C, group = .id, color = .id=="data")) +
geom_line() + guides(color=FALSE)
sims_t1.5
ends <- grep("20",sims_t1.5$day)
sims_t1.5_end <- sims_t1.5[ends,]
t_s <- round(mean(sims_t1.5_end$S, na.rm = T))
t_e <- round(mean(sims_t1.5_end$E, na.rm = T))
t_i <- round(mean(sims_t1.5_end$I, na.rm = T))
t_r <- round(mean(sims_t1.5_end$R, na.rm = T))
########################
#set t2
#June 1, 2020 to July 31, 2020
#end of stay at home order, beaches/restaurants open
#close again
data_t2 <- data[93:153,]
data_t2$Date <- as.Date(data_t2$Date, format = "%m/%d/%Y")
size <- dim(data_t2)[1]
data_t2$day <- c(1:size)
names(data_t2) <- c("date", "C", "day")
#create cummulative case count for entire data set to include cases from previous time point
data_sum <- data
data_sum$Date <- as.Date(data_sum$Date, format = "%m/%d/%Y")
size <- dim(data_sum)[1]
data_sum$day <- c(1:size)
names(data_sum) <- c("date", "C", "day")
data_sum$C <- cumsum(data_sum$C)
data_t2$C[1] <- sum(data_sum$C[92:93])
data_t2$C <- cumsum(data_t2$C)
ggplot(data_t2, aes(x = date, y = C)) + geom_line() +
ylab("Total Cases") + ggtitle("Daily Confirmed Cases of COVID-19 in", paste(COUNTY))
covid_rinit_t2 = "
S = 1334;
E = 945;
I = 94;
R = 130;
"
### There is no covid_rprocess_t2, _t3, etc. so I replaced them all with covid_rprocess_t1
covid_t2 <- pomp(data = data_t2, times = "day", t0 = 0,
rprocess = euler(step.fun = Csnippet(covid_rprocess_t1), delta.t = 1/7),
rmeasure = Csnippet(covid_rmeasure),
dmeasure = Csnippet(covid_dmeasure),
partrans = parameter_trans(
log=c("Beta","mu_EI","mu_IR", "k", "rho")),
obsnames = covid_obsnames,
statenames = covid_statenames,
paramnames = covid_paramnames,
rinit = Csnippet(covid_rinit_t2)
)
#Beta = contact rate
#mu_EI = incubation rate
#rho = reporting rate
#mu_IR = recovery/removed rate
#k = overdispersion in the counts process
#eta = number of susceptible (estimated)
sims_t2 = covid_t2 %>%
simulate(params = c(Beta = 1, mu_EI = 0.01, mu_IR = .04, k = 0.42,
rho = 6, eta = 0.3, N = 15000),
nsim = 20, format = "data.frame", include = TRUE)
ggplot(sims_t2, aes(x = day, y = C, group = .id, color = .id=="data")) +
geom_line() + guides(color=FALSE)
sims_t2
ends <- grep("20",sims_t2$day)
sims_t2_end <- sims_t2[ends,]
t_s <- round(mean(sims_t2_end$S, na.rm = T))
t_e <- round(mean(sims_t2_end$E, na.rm = T))
t_i <- round(mean(sims_t2_end$I, na.rm = T))
t_r <- round(mean(sims_t2_end$R, na.rm = T))
########################
#set t3
#August 1, 2020 to October 15, 2020
#Limited social gatherings
#second stay at home order on August 27
#Safe travels start October 15
data_t3 <- data[154:229,]
data_t3$Date <- as.Date(data_t3$Date, format = "%m/%d/%Y")
size <- dim(data_t3)[1]
data_t3$day <- c(1:size)
names(data_t3) <- c("date", "C", "day")
data_t3$C[1] <- sum(data_sum$C[153:154])
data_t3$C <- cumsum(data_t3$C)
ggplot(data_t3, aes(x = date, y = C)) + geom_line() +
ylab("Total Cases") + ggtitle("Daily Confirmed Cases of COVID-19 in", paste(COUNTY))
covid_rinit_t3 = "
S = 1115;
E = 978;
I = 169;
R = 241;
"
covid_t3 <- pomp(data = data_t3, times = "day", t0 = 0,
rprocess = euler(step.fun = Csnippet(covid_rprocess_t1), delta.t = 1/7),
rmeasure = Csnippet(covid_rmeasure),
dmeasure = Csnippet(covid_dmeasure),
partrans = parameter_trans(
log=c("Beta","mu_EI","mu_IR", "k", "rho")),
obsnames = covid_obsnames,
statenames = covid_statenames,
paramnames = covid_paramnames,
rinit = Csnippet(covid_rinit_t3)
)
#Beta = contact rate
#mu_EI = incubation rate
#rho = reporting rate
#mu_IR = recovery/removed rate
#k = overdispersion in the counts process
#eta = number of susceptible (estimated)
sims_t3 = covid_t3 %>%
simulate(params = c(Beta = 7, mu_EI = 0.01, mu_IR = .01, k = 0.42,
rho = 13, eta = 0.3, N = 15000),
nsim = 20, format = "data.frame", include = TRUE)
ggplot(sims_t3, aes(x = day, y = C, group = .id, color = .id=="data")) +
geom_line() + guides(color=FALSE)
sims_t3
ends <- grep("20",sims_t3$day)
sims_t3_end <- sims_t3[ends,]
t_s <- round(mean(sims_t3_end$S, na.rm = T))
t_e <- round(mean(sims_t3_end$E, na.rm = T))
t_i <- round(mean(sims_t3_end$I, na.rm = T))
t_r <- round(mean(sims_t3_end$R, na.rm = T))
########################
#set t4
#October 16, 2020 to December 15, 2020
#Start of Safe travels program to first administered vaccine
data_t4 <- data[230:290,]
data_t4$Date <- as.Date(data_t4$Date, format = "%m/%d/%Y")
size <- dim(data_t4)[1]
data_t4$day <- c(1:size)
names(data_t4) <- c("date", "C", "day")
data_t4$C[1] <- sum(data_sum$C[153:154])
data_t4$C <- cumsum(data_t4$C)
ggplot(data_t4, aes(x = date, y = C)) + geom_line() +
ylab("Total Cases") + ggtitle("Daily Confirmed Cases of COVID-19 in", paste(COUNTY))
covid_rinit_t4 = "
S = 2000;
E = 250;
I = 200;
R = 200;
"
covid_t4 <- pomp(data = data_t4, times = "day", t0 = 0,
rprocess = euler(step.fun = Csnippet(covid_rprocess_t1), delta.t = 1/7),
rmeasure = Csnippet(covid_rmeasure),
dmeasure = Csnippet(covid_dmeasure),
partrans = parameter_trans(
log=c("Beta","mu_EI","mu_IR", "k", "rho")),
obsnames = covid_obsnames,
statenames = covid_statenames,
paramnames = covid_paramnames,
rinit = Csnippet(covid_rinit_t4)
)
#Beta = contact rate
#mu_EI = incubation rate
#rho = reporting rate
#mu_IR = recovery/removed rate
#k = overdispersion in the counts process
sims_t4 = covid_t4 %>%
simulate(params = c(Beta = 4.5, mu_EI = 0.01, mu_IR = .04, k = 0.42,
rho = 15, eta = 0.3, N = 15000),
nsim = 20, format = "data.frame", include = TRUE)
ggplot(sims_t4, aes(x = day, y = C, group = .id, color = .id=="data")) +
geom_line() + guides(color=FALSE)
sims_t4
ends <- grep("20",sims_t4$day)
sims_t4_end <- sims_t4[ends,]
t_s <- round(mean(sims_t4_end$S, na.rm = T))
t_e <- round(mean(sims_t4_end$E, na.rm = T))
t_i <- round(mean(sims_t4_end$I, na.rm = T))
t_r <- round(mean(sims_t4_end$R, na.rm = T))
########################
#set t5
#December 16, 2020 to March 28, 2021
#First vaccine administration to present
####Added vaccine data here
data_t5 <- data[291:393,]
data_t5$Date <- as.Date(data_t5$Date, format = "%m/%d/%Y")
#shorten vax data to match dataframe size
# vax1 <- head(vax, -12)
size <- dim(data_t5)[1]
data_t5$day <- c(1:size)
#merge vax dataframe with cases dataframe
# data_t5 <- cbind(data_t5, new_col = vax1$percent_partial)
names(data_t5) <- c("date", "C", "day")
data_t5$C[1] <- sum(data_sum$C[290:291])
data_t5$C <- cumsum(data_t5$C)
ggplot(data_t5, aes(x = date, y = C)) + geom_line() +
ylab("Total Cases") + ggtitle("Daily Confirmed Cases of COVID-19 in", paste(COUNTY))
covid_rinit_t5 = "
S = 3000;
E = 200;
I = 200;
R = 20000;
"
covid_t5 <- pomp(data = data_t5, times = "day", t0 = 0,
rprocess = euler(step.fun = Csnippet(covid_rprocess_t1), delta.t = 1/7),
rmeasure = Csnippet(covid_rmeasure_t1),
dmeasure = Csnippet(covid_dmeasure_t1),
partrans = parameter_trans(
log=c("Beta","mu_EI","mu_IR", "k", "rho")),
obsnames = covid_obsnames,
statenames = covid_statenames,
paramnames = covid_paramnames,
rinit = Csnippet(covid_rinit_t5)
)
spy(covid_t5)
#Beta = contact rate
#mu_EI = incubation rate
#rho = reporting rate
#mu_IR = recovery/removed rate
#k = overdispersion in the counts process
#eta = number of susceptible (estimated)
sims_t5 = covid_t5 %>%
simulate(params = c(Beta = 1, mu_EI = 0.0001, mu_IR = .0025, k = 0.3,
rho = 75, eta = 0.3, N = 15000, dvax=0),
nsim = 20, format = "data.frame", include = TRUE)
ggplot(sims_t5, aes(x = day, y = C, group = .id, color = .id=="data")) +
geom_line() + guides(color=FALSE)
pf <- replicate(n=20,logLik(pfilter(covid, Np = 500,
params = c(Beta = 7.75, mu_EI = 0.001, mu_IR = .04, k = 0.42,
rho = 400, eta = 0.2, N = 15000),
partrans = parameter_trans(
log = c("Beta", "mu_EI", "mu_IR", "k", "rho")),
dmeasure = Csnippet(covid_dmeasure),
statenames = covid_statenames,
paramnames = covid_paramnames)))
beta7.75 <- logmeanexp(pf, se =T)
pf <- replicate(n=20,logLik(pfilter(covid, Np = 500,
params = c(Beta = 8, mu_EI = 0.001, mu_IR = .04, k = 0.42,
rho = 400, eta = 0.2, N = 15000),
partrans = parameter_trans(
log = c("Beta", "mu_EI", "mu_IR", "k", "rho")),
dmeasure = Csnippet(covid_dmeasure),
statenames = covid_statenames,
paramnames = covid_paramnames)))
beta8 <- logmeanexp(pf, se =T)
|
8b8103d08a6dee410c6aeafaaed44464e433e7f2 | 4e50d2345a2cfeb3c9ecb02187f88e753d1ed83c | /bin/02.taxonomy/species.barplot.r | deb43e96e3bdb4cd4ac3865fc04ee01c86aeda1a | [] | no_license | ms201420201029/real_metagenome_pipeline | 7c7b54e5e8a798933387f960256ebb849e9c2200 | e8f0b188f21975305565d06e4fefe4f4c9adc1f7 | refs/heads/master | 2020-04-05T06:41:34.349662 | 2018-12-04T05:48:52 | 2018-12-04T05:48:52 | 156,646,750 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,304 | r | species.barplot.r | args = commandArgs(T)
profile <- read.table(args[1], header = T, check.names = F);
number <- args[2]
level <- args[3]
## reorder
#neworder = as.character(unlist(read.table("species/top20/20150717/bar/sample.order.list")))
#profile = profile[, neworder, drop = F]
## unknown change
#colnames(profile) = gsub("M", "m", colnames(profile))
## reorder taxonomy
rowsums <- rowSums(profile);
profile <- as.matrix(profile[order(-rowsums), , drop = F]);
for (i in colnames(profile)){
order <- rev(order(profile[, i]));
number <- min(number, length(order));
profile[-(order[1:number]), i] <- 0;
}
Others <- 1 - apply(profile, 2, sum);
profile <- rbind(profile, Others);
profile <- profile[which(apply(profile, 1, sum) > 0), , drop = F];
## picture parameters
palette <- c("red", "gray", "cornflowerblue", "chartreuse3",
"yellow", "honeydew4", "indianred4", "khaki",
"lightblue1", "lightseagreen", "lightslateblue", "magenta",
"blue", "orange2", "purple", "black");
color <- colorRampPalette(palette, interpolate = "spline", space = "Lab");
space <- 0.5;
width <- 2;
## calculate size
## draw picture
pdf(args[4], height = 15, width = 15);
layout(matrix(c(1, 2), nrow = 2));
par(oma = c(2, 2, 2, 2),
mar = c(5, 5, 5, 5));
#barplot(table, col = colorvector(colornumber), xaxt = "n", space = spa, width = width)
if (ncol(profile) > number){
barplot(profile, col = color(nrow(profile)), space = space, width = width, las = 2)
}else {
barplot(profile, col = color(nrow(profile)), space = space, width = width)
}
#text(seq(from = width - 0.3,length = ncol(profile), by=2 * space + width),par("usr")[3, drop = F] - 0.15,srt=90,adj=0.5,labels=gsub("^Q", "", colnames(table)),xpd=T,font=1,cex=2, pos = 1)
mtext(paste(number, "Main", level, "in Each Sample"), side = 3, line = 1, cex = 2)
## legend
par(mar = c(5, 5, 2, 2));
plot(0, type = "n", xaxt = "n", yaxt = "n", bty ="n", xlab = "", ylab = "");
legend("top", pch = 15, col = rev(color(nrow(profile))), legend = rev(rownames(profile)), bty = "n", pt.cex = 2, ncol = 3);
#text(seq(from = spa+0.5*width - 0.1,length = ncol(table), by=spa + width),par("usr")[3, drop = F] - 0.06,srt=90,adj=0.5,labels=new_order,xpd=T,font=1,cex=0.9, pos = 1)
#mtext(title,side=3,line=1)
dev.off();
|
4772407d01b7c8572b4966d02c3783450528da5d | fd42672a6013783e5348bb313c3ae9c4f5d224f8 | /R/create_dataset.R | 629f23239b5cf737a73e464c1bb72a07220c8162 | [] | no_license | cran/openblender | 5d3d202cb7623210daec0647b74459cc9f687728 | 3d7a297710830f648b06347c9cfff73ac6c5f22a | refs/heads/master | 2020-07-18T05:25:17.689857 | 2020-06-19T11:00:02 | 2020-06-19T11:00:02 | 206,186,000 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,137 | r | create_dataset.R | #'@title Request to the API, depending on the action provided
#'@description Prepare the creation of a Dataset in 'OpenBlender' API. This function is not used by users.
#'@param json_parametros Request parameters that contains the dataset structure
#'@param url Url selected
#'@return Dataset's id just created, using \link{dameRespuestaLlamado}.
#'@keywords internal
create_dataset <- function(json_parametros, url) {
action <- "API_createDataset"
if ("dataframe" %in% attributes(json_parametros)$names) {
nom_obs <- "dataframe"
} else {
nom_obs <- "observations"
}
obj <- comprobarJSONaDF(json_parametros[nom_obs])
if (!obj$valido) {
return(obj$msj)
}
n_filas <- nrow(obj$df_nuevo)
tam_pedazo_ini <- 1000
insert_observations <- TRUE
json_particion <- json_parametros
if ("insert_observations" %in% attributes(json_parametros)$names) {
if (json_parametros$insert_observations == 1 || json_parametros$insert_observations == "on") {
insert_observations <- TRUE
} else {
insert_observations <- FALSE
}
}
if ("test_call" %in% attributes(json_parametros)$names && (json_parametros$test_call == 1 || json_parametros$test_call == "on")) {
test_call <- 1
} else {
test_call <- FALSE
}
if (test_call == 1) {
message("This is a TEST CALL, set \"test_call\"=\"off\" or remove to execute service.")
}
respuesta0 <- NULL
#Creación del dataset
if (!test_call && (n_filas > tam_pedazo_ini)) {
if (insert_observations) {
start <- Sys.time()
json_particion[nom_obs] <- toJSON(obj$df_nuevo[sample(nrow(obj$df_nuevo), tam_pedazo_ini), ], dataframe = "columns")
json_particion_molde <- json_particion
json_particion_molde$insert_observations <- 0
data <- list(action = action, json = json_particion_molde)
respuesta <- dameRespuestaLlamado(url, data)
if (!"id_dataset" %in% attributes(respuesta)$names) {
return(respuesta)
}
respuesta0 <- respuesta
json_particion$id_dataset <- respuesta$id_dataset
message(paste("Dataset created succesfully, id:", json_particion$id_dataset))
message("Starting upload..")
stop <- Sys.time()
segundos <- as.integer(ceiling(stop - start))
tam_pedazo <- as.integer(round((600 / segundos), digits = 0))
action <- "API_insertObservationsFromDataFrame"
rownames(obj$df_nuevo) <- 1:n_filas
for (i in seq(0, n_filas, by = tam_pedazo)) {
tryCatch({
if ((n_filas - i) < tam_pedazo) {
tam_pedazo <- (n_filas - i)
}
df_nuevo <- obj$df_nuevo[(i + 1) : (i + tam_pedazo), ]
json_particion[nom_obs] <- toJSON(df_nuevo, dataframe = "columns")
data <- list(action = action, json = json_particion)
respuesta <- dameRespuestaLlamado(url, data)
#Imprimir avance
avance <- round(((i + tam_pedazo) / n_filas) * 100, digits = 2)
if (avance > 100) {
message("100%")
message("Wrapping Up..")
} else {
message(paste(avance, "%"))
Sys.sleep(2)
}
}, error = function(e) {
message("Some observations might not have been uploaded.")
})
}
} else {
df_nuevo <- obj$df_nuevo[sample(nrow(obj$df_nuevo), tam_pedazo_ini), ]
rownames(df_nuevo) <- 0:(nrow(df_nuevo) - 1)
json_particion[nom_obs] <- toJSON(df_nuevo, dataframe = "columns")
data <- list(action = action, json = json_particion)
respuesta <- dameRespuestaLlamado(url, data)
return(respuesta)
}
} else {
if (n_filas > tam_pedazo_ini) {
tam_pedazo_ini <- tam_pedazo_ini
} else {
tam_pedazo_ini <- n_filas
}
df_nuevo <- obj$df_nuevo[sample(nrow(obj$df_nuevo), tam_pedazo_ini), ]
json_particion[nom_obs] <- toJSON(df_nuevo, dataframe = "columns")
data <- list(action = action, json = json_particion)
respuesta <- dameRespuestaLlamado(url, data)
return(respuesta)
}
return(respuesta0)
}
|
046761659111effd3e2edba03a636a19fa9c4568 | 0913ef989631d5fbb6461667563bb13c102d22c8 | /man/ss_section.Rd | 3ac665451817f2b34c63dfa86d2f331a57b28fe7 | [] | no_license | oucru-biostats/C306 | 57c8b501a106384f101d20c54bdcbc54d99c8bdf | 7d30d14b081ba64b32fc47ac985bc45ad2672f70 | refs/heads/master | 2022-05-01T09:33:44.355345 | 2022-04-28T11:42:23 | 2022-04-28T11:42:23 | 203,103,489 | 0 | 2 | null | 2019-11-07T05:46:23 | 2019-08-19T05:21:54 | R | UTF-8 | R | false | true | 450 | rd | ss_section.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ssformat.R
\name{ss_section}
\alias{ss_section}
\title{Designation of section title rows for custom sstable}
\usage{
ss_section(sstable, rows)
}
\arguments{
\item{sstable}{a data frame following sstable's grammar}
\item{rows}{a numeric vector}
}
\value{
a matrix of class ss_tbl
}
\description{
This function set the designated rows as section title rows of an sstable
}
|
aaa75395c8f2c4ccd33af1358cdec854196623c3 | dfd5e44293393f83019bb905bb96cb1d6353903a | /master_data/raw_species_papers/599_Noe_and_Zedler_2001/599_Noe_and_Zelder_2001_diversity_calcs.R | df3e745863e7f458d725573754ad284e2ce07fda | [] | no_license | jdunic/local-marine-meta | efe61516e0492d0bfa9532e76b256b639aa92689 | b5ba23d901f8d3e785a35b60dbe93f40966d1434 | refs/heads/master | 2021-03-19T09:33:43.926716 | 2019-02-20T22:00:56 | 2019-02-20T22:00:56 | 43,651,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,329 | r | 599_Noe_and_Zelder_2001_diversity_calcs.R | library(tidyr)
library(dplyr)
library(vegan)
data <- read.csv('/Users/jillian/Desktop/Meta-analysis_papers/Papers to QC/599_Noe_and_Zelder_2001_Table1.csv', stringsAsFactors = FALSE, na.strings = '')
data <- select(data, -Species.classification)
data[is.na(data)] <- "9999 (9999)"
mean_sd_df <- lapply(names(data[, -1]), FUN = function(name = x) {
#browser()
mean_sd <-
data %>%
extract_(col = name,
into = c(paste(name, 'mean', sep = '_'), paste(name, 'sd', sep = '_')),
regex = "(\\d*)\\ \\((\\d*)\\)", convert = TRUE) %>%
select_(paste(name, 'mean', sep = '_'), paste(name, 'sd', sep = '_'))
return(mean_sd)
})
mean_sd_df <- as.data.frame(mean_sd_df)
mean_sd_df[mean_sd_df == 9999] <- 0
mean_df <- select(mean_sd_df, contains('mean'))
# correct for density - go back to the number of individuals they actually
# counted in their four 0.04 m^2 sub plots
mean_df <- mean_df / (1 / 0.16)
# abundance
as.data.frame(colSums(mean_df))
# richness
as.data.frame(specnumber(mean_df, MARGIN = 2))
# shannon
as.data.frame(diversity(mean_df, index = "shannon", MARGIN = 2))
# simpson
as.data.frame(diversity(mean_df, index = "simpson", MARGIN = 2))
# Pielou
as.data.frame(diversity(mean_df, index = "shannon", MARGIN = 2)/log(specnumber(mean_df, MARGIN = 2))) |
a1e750027eae8c389748a2afd53025086a6eb931 | 2d6eb3d08d5cb8d5ae075dd2c1e113d0996c3009 | /man/gitlabr_0_7_renaming.Rd | 60f7cfb1acfbe8c7293e56f21640d47da763d675 | [] | no_license | jirkalewandowski/gitlabr | 4c040c1e320c2cfb852de5dad79bb18e6bbebd61 | c64979c71c8d2e2be4a00f031e560da7c085892f | refs/heads/master | 2021-01-11T15:58:19.068337 | 2019-03-14T19:08:05 | 2019-03-14T19:08:05 | 79,971,979 | 23 | 9 | null | 2019-06-05T03:34:39 | 2017-01-25T01:23:36 | R | UTF-8 | R | false | true | 377 | rd | gitlabr_0_7_renaming.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/legacy_headers.R
\docType{data}
\name{gitlabr_0_7_renaming}
\alias{gitlabr_0_7_renaming}
\title{renamings from gitlabr version 0.6.4 to 0.7}
\format{A data frame with 33 rows and 2 variables}
\description{
List of of old and new function name. Used internally by
\code{\link{update_gitlabr_code}}
}
|
336197ff7bc7922cc2f7ba54a1951d76708d1714 | f6150b8fe6f9dc44be22cd470969afacb44efe51 | /figs/flavorsMS/mv_spam_coefs.r | 34df250f382e46de1baf94931ce005409a1e461a | [] | no_license | qdread/nasabio | 83e83a4d0e64fc427efa7452033eb434add9b6ee | 7c94ce512ae6349d84cb3573c15be2f815c5758d | refs/heads/master | 2021-01-20T02:02:53.514053 | 2019-12-28T15:22:53 | 2019-12-28T15:22:53 | 82,062,690 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 21,003 | r | mv_spam_coefs.r | # Fixed effect coefficient plots from spatial mixed models (multivariate version)
# QDR/NASABioxgeo/28 May 2018
### THESE ARE THE ACTUAL PUBLICATION FIGURES FOR THE "FLAVORS" MANUSCRIPT!
# Modified 24 Dec 2019: make figures legible in grayscale.
# Modified 20 Dec 2019: also output PDFs
# Modified 08 May 2019: add WAIC to summary stats table
# Modified 08 Jan 2019: change the blue color scheme and fix the mismatch in the spatial var color scheme.
# Modified 18 June: Add the new null models.
# Modified 15 November: Make a few visual changes to figs 5 and 6 for better readability.
# Load and combine data ---------------------------------------------------
fp <- '~/Dropbox/Q/projects/nasabiodiv/modelfits' # Local
model_coef <- read.csv(file.path(fp, 'multivariate_spatial_coef.csv'), stringsAsFactors = FALSE)
model_pred <- read.csv(file.path(fp, 'multivariate_spatial_pred.csv'), stringsAsFactors = FALSE)
model_rmse <- read.csv(file.path(fp, 'multivariate_spatial_rmse.csv'), stringsAsFactors = FALSE)
model_r2 <- read.csv(file.path(fp, 'multivariate_spatial_r2.csv'), stringsAsFactors = FALSE)
model_coef_var <- read.csv(file.path(fp, 'multivariate_spatial_coef_variation_corrected.csv'), stringsAsFactors = FALSE)
model_waic <- read.csv(file.path(fp, 'multivariate_spatial_waic.csv'), stringsAsFactors = FALSE)
kfold_rmse <- read.csv(file.path(fp, 'multivariate_kfold_rmse.csv'), stringsAsFactors = FALSE)
library(dplyr)
library(ggplot2)
library(reshape2)
library(purrr)
prednames50 <- c('elevation_5k_tri_50_mean', 'bio1_5k_50_mean', 'geological_age_5k_50_diversity', 'soil_type_5k_50_diversity', 'bio12_5k_50_mean', 'dhi_gpp_5k_tri_50_mean')
geo_names <- c('elevation diversity','temperature mean','geol. age diversity','soil diversity','precip. mean','GPP diversity')
geo_names_order <- c('temperature mean', 'precip. mean', 'elevation diversity', 'GPP diversity', 'geol. age diversity', 'soil diversity')
bio_titles <- c('alpha TD', 'beta TD', 'gamma TD', 'alpha PD', 'beta PD', 'gamma PD', 'alpha FD', 'beta FD', 'gamma FD')
bio_names <- c("alpha_richness", "beta_td_sorensen_pa", "gamma_richness",
"alpha_phy_pa", "beta_phy_pa", "gamma_phy_pa",
"alpha_func_pa", "beta_func_pa", "gamma_func_pa")
twocolors <- c('black', 'gray60')
# Combine full-model and k-fold RMSEs.
# Include RMSE from each fold so we can see variability due to folds.
# Edited 02 May 2019: correct the data wrangling code because the output of the new k-fold is slightly different.
all_rmse <- kfold_rmse %>%
rename_if(is.numeric, ~ paste0('kfold_', .x)) %>%
right_join(model_rmse) %>%
left_join(model_r2 %>% select(-fold) %>% rename(r2 = Estimate, r2_error = Est.Error, r2_q025 = Q2.5, r2_q975 = Q97.5)) %>%
mutate(response = factor(bio_titles[match(response, bio_names)], levels = bio_titles),
flavor = map_chr(strsplit(as.character(response), ' '), 2) %>%
factor(levels = c('TD','PD','FD'), labels = c('taxonomic', 'phylogenetic', 'functional')))
# Reshape coefficient plot and relabel it
all_coef <- model_coef %>%
filter(effect == 'fixed', !parameter %in% 'Intercept') %>%
dcast(taxon + rv + model + response + parameter ~ stat) %>%
mutate(predictor = factor(geo_names[match(parameter, prednames50)], levels = geo_names_order),
response = factor(bio_titles[match(response, bio_names)], levels = bio_titles),
flavor = map_chr(strsplit(as.character(response), ' '), 2) %>%
factor(levels = c('TD','PD','FD'), labels = c('taxonomic', 'phylogenetic', 'functional')))
# Relabel data frame of spatial variability metrics
model_coef_var <- model_coef_var %>%
mutate(predictor = factor(geo_names[match(parameter, prednames50)], levels = geo_names_order),
response = factor(bio_titles[match(response, gsub('_', '', bio_names))], levels = bio_titles),
flavor = map_chr(strsplit(as.character(response), ' '), 2) %>%
factor(levels = c('TD','PD','FD'), labels = c('taxonomic', 'phylogenetic', 'functional'))) %>%
rename(coef_var = Estimate)
# Coefficient plots -------------------------------------------------
fpfig <- '~/google_drive/NASABiodiversityWG/Figures/multivariate_maps_figs/07may2019' # updated file path to put new figs in a separate location
# Add some color to indicate which ones' credible intervals are not zero
# Also shade the climate mean region with a gray rectangle
coefdat_bbs <- all_coef %>%
filter(taxon == 'bbs') %>%
mutate(nonzero = Q2.5 > 0 | Q97.5 < 0)
coefplot_bbs <- ggplot(coefdat_bbs %>% filter(model=='full')) +
geom_rect(xmin=0, xmax=2.5, ymin=-Inf, ymax=Inf, fill = 'gray90') +
geom_hline(yintercept = 0, linetype = 'dotted', color = 'slateblue', size = 1) +
geom_errorbar(aes(x = predictor, ymin = Q2.5, ymax = Q97.5, color = nonzero), width = 0) +
geom_point(aes(x = predictor, y = Estimate, color = nonzero)) +
facet_grid(rv ~ flavor) +
scale_y_continuous(name = 'coefficient estimate', limits = c(-0.73, 0.73), expand = c(0,0)) +
scale_color_manual(values = c('black', 'red')) +
theme_bw() +
theme(strip.background = element_rect(fill=NA),
panel.grid = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = 'none')
coefdat_fia <- all_coef %>%
filter(taxon == 'fia') %>%
mutate(nonzero = Q2.5 > 0 | Q97.5 < 0)
coefplot_fia <- ggplot(coefdat_fia %>% filter(model=='full')) +
geom_rect(xmin=0, xmax=2.5, ymin=-Inf, ymax=Inf, fill = 'gray90') +
geom_hline(yintercept = 0, linetype = 'dotted', color = 'slateblue', size = 1) +
geom_errorbar(aes(x = predictor, ymin = Q2.5, ymax = Q97.5, color = nonzero), width = 0) +
geom_point(aes(x = predictor, y = Estimate, color = nonzero)) +
facet_grid(rv ~ flavor) +
scale_color_manual(values = c('black', 'red')) +
scale_y_continuous(name = 'coefficient estimate', limits = c(-0.73, 0.73), expand = c(0,0)) +
theme_bw() +
theme(strip.background = element_rect(fill=NA),
panel.grid = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = 'none')
ggsave(file.path(fpfig, 'BBS_multivariate_coef.png'), coefplot_bbs, height = 8, width = 8, dpi = 300)
ggsave(file.path(fpfig, 'FIA_multivariate_coef.png'), coefplot_fia, height = 8, width = 8, dpi = 300)
# Edit 6 June: sideways coefficient plot.
coef_bbs_sideways <- coefplot_bbs +
coord_flip() +
theme(axis.text.x = element_text(angle=0, hjust=0.5))
coef_fia_sideways <- coefplot_fia +
coord_flip() +
theme(axis.text.x = element_text(angle=0, hjust=0.5))
ggsave(file.path(fpfig, 'BBS_multivariate_coef_sideways.png'), coef_bbs_sideways, height = 8, width = 8, dpi = 300)
ggsave(file.path(fpfig, 'FIA_multivariate_coef_sideways.png'), coef_fia_sideways, height = 8, width = 8, dpi = 300)
# Edit 25 July 2019: change legend position, add scale shape, resize so things are bigger
# Edit 18 June: plot of coefficients with both, to compare.
# THIS IS FIG 5 IN THE MANUSCRIPT.
coefdat_both <- all_coef %>%
filter(model == 'full') %>%
mutate(nonzero = Q2.5 > 0 | Q97.5 < 0)
pd <- position_dodge(width = 0.12)
coefplot_both <- ggplot(coefdat_both %>% mutate(taxon = factor(taxon, labels = c('birds','trees')))) +
geom_rect(xmin=0, xmax=2.5, ymin=-Inf, ymax=Inf, fill = 'gray95') +
geom_hline(yintercept = 0, linetype = 'dotted', color = 'slateblue', size = 1) +
geom_errorbar(aes(x = predictor, ymin = Q2.5, ymax = Q97.5, color = taxon, group = taxon, linetype = nonzero), width = 0, position=pd) +
geom_point(aes(x = predictor, y = Estimate, size = nonzero, color = taxon, fill = nonzero, group = taxon, shape = taxon), position=pd) +
facet_grid(rv ~ flavor) +
scale_color_manual(values = twocolors) +
scale_fill_manual(values = c('black', 'indianred1')) +
scale_size_manual(values = c(1.5, 2)) +
scale_shape_manual(values = c(24, 21)) +
scale_linetype_manual(values = c(3, 1)) +
scale_y_continuous(name = 'coefficient estimate', limits = c(-0.73, 0.73), expand = c(0,0)) +
theme_bw() +
theme(strip.background = element_rect(fill=NA),
panel.grid = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = 'none')
coef_both_sideways <- coefplot_both +
coord_flip() +
theme(axis.text.x = element_text(angle=0, hjust=0.5),
legend.background = element_rect(color = 'black'),
legend.position = c(0.92,0.93)) +
guides(shape = guide_legend(override.aes = list(fill = twocolors)), fill = 'none', size = 'none', linetype = 'none')
ggsave('~/google_drive/NASABiodiversityWG/FlavorsOfDiversityPaper/figures_pdf/fig5.pdf', coef_both_sideways, height = 8, width = 8)
ggsave(file.path(fpfig, 'both_multivariate_coef_sideways.png'), coef_both_sideways, height = 8, width = 8, dpi = 300)
# Plot of spatial variability ---------------------------------------------
pd = position_dodge(width = 0.5)
coefvar_plot <- ggplot(model_coef_var %>%
filter(!is.na(predictor)) %>%
mutate(taxon = factor(taxon,levels=c('fia','bbs'),labels=c('trees','birds')),
response = map_chr(strsplit(as.character(response), ' '), 1))) +
geom_rect(xmin=0, xmax=2.5, ymin=-Inf, ymax=Inf, fill = 'gray90') +
geom_col(aes(x = predictor, y = coef_var, fill = taxon, group = taxon), position = pd, width = 0.5) +
geom_errorbar(aes(x = predictor, ymin = q025, ymax = q975, group = taxon), position = pd, width = 0.15, color = 'gray20') +
facet_grid(response ~ flavor) +
scale_fill_manual(values = rev(twocolors)) +
guides(fill = guide_legend(reverse = TRUE)) +
scale_y_continuous(name = 'spatial variability of relationship', limits = c(0, 1.16), expand = c(0,0)) +
theme_bw() +
theme(strip.background = element_rect(fill = NA),
panel.grid = element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = c(0.92, 0.92))
coefvar_sideways <- coefvar_plot +
coord_flip() +
theme(axis.text.x = element_text(angle=0, hjust=0.5),
legend.background = element_rect(color = 'black'))
ggsave(file.path(fpfig, 'both_multivariate_coefvar_sideways.png'), coefvar_sideways, height = 8, width = 8, dpi = 300)
# Plot showing RMSEs --------------------------------------------------------
pn1 <- position_nudge(x = -0.06, y = 0)
pn2 <- position_nudge(x = 0.06, y = 0)
# rmseplot for both
# Comparison of RMSE and R-squared among models.
rmseplot_both <- all_rmse %>%
filter(model == 'full') %>%
ggplot(aes(x = response)) +
facet_grid(. ~ taxon, labeller = labeller(taxon = c(bbs = 'birds', fia = 'trees'))) +
geom_errorbar(aes(ymin = RMSE_q025_relative, ymax = RMSE_q975_relative), width = 0, position = pn1) +
geom_errorbar(aes(ymin = kfold_RMSE_q025_relative, ymax = kfold_RMSE_q975_relative), width = 0, color = 'red', position = pn2) +
geom_point(aes(y = RMSE_mean_relative), position = pn1) +
geom_point(aes(y = kfold_RMSE_mean_relative), color = 'red', position = pn2) +
geom_text(aes(label = round(r2, 2)), y = -Inf, vjust = -0.2, fontface = 3, size = 3) +
theme_bw() +
scale_y_continuous(limits = c(0, 0.65), expand = c(0,0), name = 'relative root mean squared error') +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
strip.background = element_rect(fill = NA))
# Save the plots
ggsave(file.path(fpfig, 'both_performance_multivariate.png'), rmseplot_both, height = 4, width = 7, dpi = 300)
# Edit 18 June: plot comparing RMSEs and R-squared for the 3 model types
# Edit 08 Aug: add geodiversity-only to this
all_rmse <- all_rmse %>%
mutate(model = factor(model, levels=c('space','climate','geo','full'), labels=c('space only', 'space+climate','space+geodiversity','space+climate+geodiversity')))
pd <- position_dodge(width = 0.05)
rmseplot_bymodel_bird <- all_rmse %>%
filter(taxon == 'bbs') %>%
ggplot(aes(x = rv, color = model, group = model)) +
facet_grid(. ~ flavor, switch = 'x') +
geom_errorbar(aes(ymin = RMSE_q025_relative, ymax = RMSE_q975_relative), width = 0, position = pd) +
geom_point(aes(y = RMSE_mean_relative), position = pd) +
theme_bw() +
scale_y_continuous(limits = c(0, 0.32), expand = c(0,0), name = 'relative root mean squared error') +
scale_x_discrete(name = 'response') +
ggtitle('birds') +
theme(strip.background = element_blank(),
strip.placement = 'outside',
panel.spacing = unit(0, 'lines'),
legend.position = 'none')
rmseplot_bymodel_tree <- all_rmse %>%
filter(taxon == 'fia') %>%
ggplot(aes(x = rv, color = model, group = model)) +
facet_grid(. ~ flavor, switch = 'x') +
geom_errorbar(aes(ymin = RMSE_q025_relative, ymax = RMSE_q975_relative), width = 0, position = pd) +
geom_point(aes(y = RMSE_mean_relative), position = pd) +
theme_bw() +
scale_y_continuous(limits = c(0, 0.32), expand = c(0,0), name = 'relative root mean squared error') +
scale_x_discrete(name = 'response') +
ggtitle('trees') +
theme(strip.background = element_blank(),
strip.placement = 'outside',
panel.spacing = unit(0, 'lines'),
legend.position = c(0.5, 0.2),
legend.background = element_rect(color = 'black'),
legend.text = element_text(size = 6.5))
kfold_rmseplot_bymodel_bird <- all_rmse %>%
filter(taxon == 'bbs') %>%
ggplot(aes(x = rv, color = model, group = model)) +
facet_grid(. ~ flavor, switch = 'x') +
geom_errorbar(aes(ymin = kfold_RMSE_q025_relative, ymax = kfold_RMSE_q975_relative), width = 0, position = pd) +
geom_point(aes(y = kfold_RMSE_mean_relative), position = pd) +
theme_bw() +
scale_y_continuous(limits = c(0, 0.7), expand = c(0,0), name = 'CV relative root mean squared error') +
scale_x_discrete(name = 'response') +
ggtitle('birds') +
theme(strip.background = element_blank(),
strip.placement = 'outside',
panel.spacing = unit(0, 'lines'),
legend.position = 'none')
kfold_rmseplot_bymodel_tree <- all_rmse %>%
filter(taxon == 'fia') %>%
ggplot(aes(x = rv, color = model, group = model)) +
facet_grid(. ~ flavor, switch = 'x') +
geom_errorbar(aes(ymin = kfold_RMSE_q025_relative, ymax = kfold_RMSE_q975_relative), width = 0, position = pd) +
geom_point(aes(y = kfold_RMSE_mean_relative), position = pd) +
theme_bw() +
scale_y_continuous(limits = c(0, 0.7), expand = c(0,0), name = 'CV relative root mean squared error') +
scale_x_discrete(name = 'response') +
ggtitle('trees') +
theme(strip.background = element_blank(),
strip.placement = 'outside',
panel.spacing = unit(0, 'lines'),
legend.position = c(0.5, 0.2),
legend.background = element_rect(color = 'black'),
legend.text = element_text(size = 6.5))
# RMSE plot as 2-way facet
# Update 20 May 2019: add R-squared.
# This is fig 6 in the REVISED manuscript.
# Edit 25 July 2019: include higher jitter and make points bigger
pd <- position_dodge(width = 0.15)
rmseplot_bymodel_2wayfacet <- all_rmse %>%
ggplot(aes(x = rv, color = model, group = model)) +
facet_grid(taxon ~ flavor, switch = 'x', labeller = labeller(taxon = c(bbs = 'birds', fia = 'trees'))) +
geom_errorbar(aes(ymin = RMSE_q025_relative, ymax = RMSE_q975_relative), width = 0, position = pd) +
geom_point(aes(y = RMSE_mean_relative), position = pd, size = 2) +
geom_text(aes(y = -Inf, label = round(r2, 2)), data = all_rmse %>% filter(model == 'space+climate+geodiversity'), color = 'black', fontface = 'italic', vjust = -0.2) +
theme_bw() +
scale_y_continuous(limits = c(0, 0.315), name = 'relative root mean squared error', expand = c(0,0)) +
scale_x_discrete(name = 'response') +
theme(strip.background = element_blank(),
strip.placement = 'outside',
panel.spacing = unit(0, 'lines'),
legend.position = 'bottom',
legend.background = element_rect(color = 'black'),
legend.text = element_text(size = 8))
kfold_rmseplot_bymodel_2wayfacet <- all_rmse %>%
ggplot(aes(x = rv, color = model, group = model, shape = model)) +
facet_grid(taxon ~ flavor, switch = 'x', labeller = labeller(taxon = c(bbs = 'birds', fia = 'trees'))) +
geom_errorbar(aes(ymin = kfold_RMSE_q025_relative, ymax = kfold_RMSE_q975_relative), width = 0, position = pd) +
geom_point(aes(y = kfold_RMSE_mean_relative), position = pd, size = 2) +
theme_bw() +
scale_shape_manual(values = 21:24) +
scale_y_continuous(limits = c(0, 0.7), name = 'CV relative root mean squared error', expand = c(0,0)) +
scale_x_discrete(name = 'response') +
theme(strip.background = element_blank(),
strip.placement = 'outside',
panel.spacing = unit(0, 'lines'),
legend.position = 'bottom',
legend.background = element_rect(color = 'black'),
legend.text = element_text(size = 8),
panel.grid.major.x = element_blank())
# Just compare the predictive power of the full model, putting birds and trees on the same one.
# THIS IS FIG 6 IN THE MANUSCRIPT (or it was in the old version)
rmseplot_taxacolor <- all_rmse %>%
filter(model == 'space+climate+geodiversity') %>%
mutate(taxon = factor(taxon,labels=c('birds','trees'))) %>%
ggplot(aes(x = rv)) +
facet_grid(. ~ flavor, switch = 'x') +
geom_errorbar(aes(ymin = RMSE_q025_relative, ymax = RMSE_q975_relative, color = taxon, group = taxon), width = 0, position = pd) +
geom_point(aes(y = RMSE_mean_relative, color = taxon, group = taxon), position = pd, size = 2) +
theme_bw() +
scale_color_manual(values = twocolors) +
scale_y_continuous(limits = c(0, 0.31), expand = c(0,0), name = 'relative root mean squared error') +
scale_x_discrete(name = 'response') +
theme(strip.background = element_blank(),
strip.placement = 'outside',
panel.spacing = unit(0, 'lines'),
legend.position = c(0.91, 0.8),
legend.background = element_rect(color = 'black'))
# Save plots
library(gridExtra)
png(file.path(fpfig, 'both_fittedrmse_allmodels.png'), height = 8, width = 7, res = 400, units = 'in')
grid.arrange(rmseplot_bymodel_bird, rmseplot_bymodel_tree, nrow = 2)
dev.off()
png(file.path(fpfig, 'both_lolormse_allmodels.png'), height = 8, width = 7, res = 400, units = 'in')
grid.arrange(kfold_rmseplot_bymodel_bird, kfold_rmseplot_bymodel_tree, nrow = 2)
dev.off()
ggsave(file.path(fpfig, 'both_fittedrmse_fullonly.png'), rmseplot_taxacolor, height = 4, width = 7, dpi = 400)
ggsave(file.path(fpfig, 'both_fittedrmse_allmodels_2wayfacet.png'), rmseplot_bymodel_2wayfacet, height = 6, width = 7, dpi = 400)
ggsave(file.path(fpfig, 'both_lolormse_allmodels_2wayfacet.png'), kfold_rmseplot_bymodel_2wayfacet, height = 6, width = 7, dpi = 400)
ggsave('~/google_drive/NASABiodiversityWG/FlavorsOfDiversityPaper/figures_pdf/fig6.pdf', kfold_rmseplot_bymodel_2wayfacet, height = 6, width = 7)
# Table of fit stats ------------------------------------------------------
# By taxon, diversity level, flavor, model.
# Include RMSE, kfold RMSE, and R2 (with CIs)
# Also do a version with relative RMSEs to see if it's any better
fit_table <- all_rmse %>%
mutate(taxon = factor(taxon, labels = c('birds','trees')),
RMSE = paste0(round(RMSE_mean,2), ' [', round(RMSE_q025,2), ',', round(RMSE_q975,2), ']'),
kfold_RMSE = paste0(round(kfold_RMSE_mean,2), ' [', round(kfold_RMSE_q025,2), ',', round(kfold_RMSE_q975,2), ']'),
Rsquared = paste0(round(r2,2), ' [', round(r2_q025,2), ',', round(r2_q975,2), ']')) %>%
select(taxon, rv, flavor, model, RMSE, kfold_RMSE, Rsquared) %>%
arrange(taxon, rv, flavor, model)
write.csv(fit_table, file = '~/google_drive/NASABiodiversityWG/FlavorsOfDiversityPaper/supptable_fitstats.csv', row.names = FALSE)
fit_table_relative <- all_rmse %>%
mutate(taxon = factor(taxon, labels = c('birds','trees')),
RMSE = paste0(round(RMSE_mean_relative,2), ' [', round(RMSE_q025_relative,2), ',', round(RMSE_q975_relative,2), ']'),
kfold_RMSE = paste0(round(kfold_RMSE_mean_relative,2), ' [', round(kfold_RMSE_q025_relative,2), ',', round(kfold_RMSE_q975_relative,2), ']'),
Rsquared = paste0(round(r2,2), ' [', round(r2_q025,2), ',', round(r2_q975,2), ']')) %>%
select(taxon, rv, flavor, model, RMSE, kfold_RMSE, Rsquared) %>%
arrange(taxon, rv, flavor, model)
write.csv(fit_table_relative, file = '~/google_drive/NASABiodiversityWG/FlavorsOfDiversityPaper/supptable_fitstats_withrelativeRMSEs.csv', row.names = FALSE)
# WAIC table added 08 May 2019
waic_table <- model_waic %>%
mutate(model = factor(model, levels=c('space','climate','geo','full'), labels=c('space only', 'space+climate','space+geodiversity','space+climate+geodiversity')),
taxon = factor(taxon, labels = c('birds', 'trees')),
WAIC = round(WAIC, 1),
WAIC_SE = round(WAIC_SE, 1)) %>%
select(taxon, rv, model, WAIC, WAIC_SE) %>%
arrange(taxon, rv, model)
write.csv(waic_table, file = '~/google_drive/NASABiodiversityWG/FlavorsOfDiversityPaper/supptable_WAICs.csv', row.names = FALSE)
|
790273e99b84b64e4d15c88b4dbb1d1539574d89 | cef3b5e2588a7377281a8f627a552350059ca68b | /paws/man/connect_describe_quick_connect.Rd | 078ea61171ca3da3b6f4e6168619e3acc9654265 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sanchezvivi/paws | b1dc786a9229e0105f0f128d5516c46673cb1cb5 | 2f5d3f15bf991dcaa6a4870ed314eb7c4b096d05 | refs/heads/main | 2023-02-16T11:18:31.772786 | 2021-01-17T23:50:41 | 2021-01-17T23:50:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 761 | rd | connect_describe_quick_connect.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_describe_quick_connect}
\alias{connect_describe_quick_connect}
\title{This API is in preview release for Amazon Connect and is subject to
change}
\usage{
connect_describe_quick_connect(InstanceId, QuickConnectId)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{QuickConnectId}{[required] The identifier for the quick connect.}
}
\description{
This API is in preview release for Amazon Connect and is subject to
change.
Describes the quick connect.
}
\section{Request syntax}{
\preformatted{svc$describe_quick_connect(
InstanceId = "string",
QuickConnectId = "string"
)
}
}
\keyword{internal}
|
521309f741a22f0d9ef892d16957f2bbf4c4aa42 | e6e19f449aee70d3dbb56ac405ba657cb602cc1e | /_build/jupyter_execute/RL_implementacion.r | b7bfce989164f5b548935accec850530dd94f726 | [] | no_license | david-dlta/libroBasicosML | ec2217eb94c7057e28ed97dc65a34bd219fab786 | 89bca2dd2cbb5601c0a4a3cfb973e62b3e0c05e7 | refs/heads/master | 2023-01-20T00:48:23.018369 | 2020-11-19T19:03:34 | 2020-11-19T19:03:34 | 309,460,050 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | RL_implementacion.r | #Paquetes necesarios
head(mtcars,10)
RegresionLinealFit <- function() {
return()
}
|
bfa6279e3074c73b27431980a9393e39b6447c52 | 57a791c35698e39313c97598d7d528843a0b0f1f | /02-event-studies.R | 1bde0f89b6bb6fb35b4eb8871ed18d731c044825 | [] | no_license | arnsbarger/opioid-mine-proj | 87d290723c551c382c36ba87ef80e2f4c1278cf3 | b4385c7b12606243d07a4987ee6df7555cd83ccf | refs/heads/master | 2023-08-04T15:47:32.994278 | 2021-09-17T17:22:25 | 2021-09-17T17:22:25 | 235,850,624 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 257 | r | 02-event-studies.R | library(eventstudies)
# https://r-forge.r-project.org/scm/viewvc.php/*checkout*/pkg/vignettes/eventstudies.Rnw?revision=339&root=eventstudies
# have a clean data script and source it here...
# for now, I'm picking up at the end of 01-randomness-tables.R
|
782ec956de61d2289761169944eb0bef0ea24fd6 | 4abb96d54155309109db47b9b39e183484999864 | /runStanModel.R | 668dbf2ea88fbfa4c571d07ea2fa0d122e1cfae2 | [] | no_license | diogro/arabidopsis_gmatrix | 210926d8b8a69b54b2e56dc33b883e6544e7b630 | c2856b82581694fe55bdee8886cf2ead94bc43d5 | refs/heads/master | 2021-01-23T22:53:27.578931 | 2020-10-22T13:35:44 | 2020-10-22T13:35:44 | 20,691,702 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,338 | r | runStanModel.R | library(ggplot2)
library(reshape2)
library(plyr)
library(dplyr)
library(lme4)
library(rstan)
library(gridExtra)
library(gtools)
library(glmer2stan)
library(MCMCglmm)
raw_arabi_data <- read.csv2("./raw_data.csv")
arabi_data <- select(raw_arabi_data, ID, RIL, Block, Partner, HEIGHT, WEIGHT, SILIQUEN, NODEN, BOLT3)
names(arabi_data) <- c("ID", "RIL", "block", "partner", "height", "weight", "silique", "branch", "flower")
arabi_data$flower[is.na(arabi_data$flower)] <- 0
arabi_data = arabi_data[complete.cases(arabi_data),]
arabi_data = arabi_data[arabi_data$flower > 0,]
arabi_data = arabi_data[arabi_data$height > 0,]
#arabi_data$weight <- scale(log(arabi_data$weight))
#arabi_data$weight <- scale(sqrt(arabi_data$weight))
arabi_data$weight <- sqrt(arabi_data$weight)
mask_0 = arabi_data$silique == 0
arabi_data$silique[!mask_0] <- scale(log(arabi_data$silique[!mask_0]))
plot(silique~weight, arabi_data)
plot(silique~height, arabi_data)
table(arabi_data$flower, arabi_data$partner)
m_arabi_data = melt(arabi_data, id.vars = c('partner', 'block', 'ID', 'RIL'))
ggplot(m_arabi_data, aes(x = value, color = partner)) +
geom_histogram() + theme_classic() + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
facet_wrap(~variable, ncol = 5, scale = "free")
###################
## Silique
###################
N = dim(arabi_data)[1]
silique = arabi_data$silique
partnerNONE = as.integer(as.factor(arabi_data$partner)) - 1
RIL = as.integer(as.factor(arabi_data$RIL))
block = as.integer(as.factor(arabi_data$block))
N_RIL = length(unique(arabi_data$RIL))
N_block = length(unique(arabi_data$block))
silique_data<- list(N = N,
silique = silique,
partnerNONE = partnerNONE,
RIL = RIL,
bloob = block,
N_RIL = N_RIL,
N_block = N_block)
silique_stan_model = stan(file = './silique.stan', data = silique_data, chain=1)
sm = extract(silique_stan_model, permuted = TRUE)
replicates = dim(sm$vary_RIL)[1]
silique_sim = array(0, c(replicates, N))
for(i in 1:replicates){
vary <- sm$vary_RIL[i, RIL,1] +
sm$vary_RIL[i, RIL,2] * partnerNONE +
sm$vary_block[i, block]
glm <- vary + sm$Intercept[i]
for ( j in 1:N )
silique_sim[i,j] = rnorm(1, glm[j], sm$sigma[i])
}
par(mfrow=c(2, 3))
hist((apply(silique_sim, 1, min))) ; abline(v = min(((arabi_data$silique))), col = "red")
hist((apply(silique_sim, 1, mean))); abline(v = mean(((arabi_data$silique))), col = "red")
hist((apply(silique_sim, 1, max))) ; abline(v = max(((arabi_data$silique))), col = "red")
hist((apply(silique_sim, 1, sd))) ; abline(v = sd(((arabi_data$silique))), col = "red")
hist(silique_sim[1,]); hist(arabi_data$silique)
extractHerit = function(x) diag(cov(cbind(x[,1], x[,1]+x[,2])))
herit_silique = t(apply(sm$vary_RIL, 1, extractHerit)/rbind(sm$sigma, sm$sigma))
dimnames(herit_silique) = list(NULL, c("L", "NONE"))
colMeans(herit_silique)
boxplot(herit_silique)
silique_model = lmer(silique ~ 1 + (0 + partner|RIL) + (1|block),
data = arabi_data)
summary(silique_model)
varRIL = diag(VarCorr(silique_model)$RIL)
varRep = rep(VarCorr(silique_model)$block[1], 2)
varRes = rep(attributes(VarCorr(silique_model))$sc^2, 2)
(h2 = varRIL/(varRIL + varRep + varRes))
###################
## Weight
###################
the_formula <- list(weight ~ 1 + (partner|RIL) + (1|block))
weight_model = glmer2stan(the_formula, data=arabi_data,
family="gaussian",
sample = FALSE, calcDIC = FALSE)
write(weight_model$model, file = "weight.stan")
N = dim(arabi_data)[1]
weight = arabi_data$weight
partnerNONE = as.integer(as.factor(arabi_data$partner)) - 1
RIL = as.integer(as.factor(arabi_data$RIL))
block = as.integer(as.factor(arabi_data$block))
N_RIL = length(unique(arabi_data$RIL))
N_block = length(unique(arabi_data$block))
weight_data <- list(N = N,
weight = weight,
partnerNONE = partnerNONE,
RIL = RIL,
bloob = block,
N_RIL = N_RIL,
N_block = N_block)
weight_stan_model = stan(file = './weight.stan', data = weight_data, chain=1)
wm = extract(weight_stan_model, permuted = TRUE)
replicates = dim(wm$vary_RIL)[1]
weight_sim = array(0, c(replicates, N))
for(i in 1:replicates){
vary <- wm$vary_RIL[i, RIL,1] +
wm$vary_RIL[i, RIL,2] * partnerNONE +
wm$vary_bloob[i, block]
glm <- vary + wm$Intercept[i]
for ( j in 1:N )
weight_sim[i,j] = rnorm(1, glm[j], wm$sigma[i])
}
par(mfrow=c(2, 3))
hist((apply(weight_sim, 1, min))) ; abline(v = min(((arabi_data$weight))), col = "red")
hist((apply(weight_sim, 1, mean))); abline(v = mean(((arabi_data$weight))), col = "red")
hist((apply(weight_sim, 1, max))) ; abline(v = max(((arabi_data$weight))), col = "red")
hist((apply(weight_sim, 1, sd))) ; abline(v = sd(((arabi_data$weight))), col = "red")
hist(weight_sim[1,]); hist(arabi_data$weight)
extractHerit = function(x) diag(cov(cbind(x[,1], x[,1]+x[,2])))
herit_weight = t(apply(wm$vary_RIL, 1, extractHerit)/rbind(wm$sigma, wm$sigma))
dimnames(herit_weight) = list(NULL, c("L", "NONE"))
colMeans(herit_weight)
boxplot(herit_weight)
weight_model = lmer(weight ~ 1 + (0 + partner|RIL) + (1|block), data = arabi_data, REML = FALSE, na.action = 'na.omit')
summary(weight_model)
varRIL = diag(VarCorr(weight_model)$RIL)
varRep = rep(VarCorr(weight_model)$block[1], 2)
varRes = rep(attributes(VarCorr(weight_model))$sc^2, 2)
(h2 = varRIL/(varRIL + varRep + varRes))
###################
## Height
###################
the_formula <- list(height ~ 1 + (partner|RIL) + (1|block))
height_model = glmer2stan(the_formula, data=arabi_data,
family="gaussian",
sample = FALSE, calcDIC = FALSE)
write(height_model$model, file = "height.stan")
N = dim(arabi_data)[1]
height = arabi_data$height[,1]
partnerNONE = as.integer(as.factor(arabi_data$partner)) - 1
RIL = as.integer(as.factor(arabi_data$RIL))
block = as.integer(as.factor(arabi_data$block))
N_RIL = length(unique(arabi_data$RIL))
N_block = length(unique(arabi_data$block))
height_data <- list(N = N,
height = height,
partnerNONE = partnerNONE,
RIL = RIL,
bloob = block,
N_RIL = N_RIL,
N_block = N_block)
height_stan_model = stan(file = './height.stan', data = height_data, chain=1)
#print(height_stan_model)
hm = extract(height_stan_model, permuted = TRUE)
replicates = dim(hm$vary_RIL)[1]
height_sim = array(0, c(replicates, N))
for(i in 1:replicates){
vary <- hm$vary_RIL[i, RIL,1] +
hm$vary_RIL[i, RIL,2] * partnerNONE +
hm$vary_block[i, block]
glm <- vary + hm$Intercept[i]
for ( j in 1:N )
height_sim[i,j] = rnorm(1, glm[j], hm$sigma[i])
}
par(mfrow=c(2, 3))
hist((apply(height_sim, 1, min))) ; abline(v = min(((arabi_data$height))), col = "red")
hist((apply(height_sim, 1, mean))); abline(v = mean(((arabi_data$height))), col = "red")
hist((apply(height_sim, 1, max))) ; abline(v = max(((arabi_data$height))), col = "red")
hist((apply(height_sim, 1, sd))) ; abline(v = sd(((arabi_data$height))), col = "red")
hist(height_sim[1,]); hist(arabi_data$height)
extractHerit = function(x) diag(cov(cbind(x[,1], x[,1]+x[,2])))
herit_height = t(apply(hm$vary_RIL, 1, extractHerit)/rbind(hm$sigma, hm$sigma))
dimnames(herit_height) = list(NULL, c("L", "NONE"))
colMeans(herit_height)
boxplot(herit_height)
height_model = lmer(height ~ 1 + (0 + partner|RIL) + (1|block), data = arabi_data)
summary(height_model)
varRIL = diag(VarCorr(height_model)$RIL)
varRep = rep(VarCorr(height_model)$block[1], 2)
varRes = rep(attributes(VarCorr(height_model))$sc^2, 2)
(h2 = varRIL/(varRIL + varRep + varRes))
multi_model = lmer(value ~ variable + (0 + variable:partner|RIL) + (variable|block), data = m_arabi_data)
summary(multi_model)
VarCorr(multi_model)
|
62660ecee914b33e598f626e98aa3dcecdb95ff1 | cec0ceec4b86c70ea2ee6253200aa609db0395f2 | /debug_runner.R | 8766480088b682c31c388dfff31c388453520edd | [] | no_license | TheHuessy/Viewer | a8aed038742de0a3a6bcc8bd0359deebcaecee69 | ab9854a54ca5f177380ed29af37225f574512794 | refs/heads/master | 2023-04-24T12:04:01.433663 | 2021-05-17T20:19:14 | 2021-05-17T20:19:14 | 295,907,312 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 527 | r | debug_runner.R | library(shiny)
options(shiny.host = "192.168.0.113")
options(shiny.port = 6519)
# Adjust the path to match where the server file is.
# It looks like it starts the kernel in Documents on this machine when running Rstudio
#runApp('Jupyter Notebooks/Mobile Culler/Swiper')
# Running the runner as a standalone script from the command prompt
# Standalone requires relative path
# Also requires user access control toggling in Windows, which is not ideal
# Probs won't be an issue on linux
runApp('Viewer')
|
5d22ee2c28ce5c3ea736d1b733cec6777abac88f | 154e28504bee14a1f994b52c989715771611bc61 | /01-preprocessing.R | 7c46c495e093986889f95d07963dfd86ccef2763 | [] | no_license | dfalbel/deep-autoencoder-netflix | 0fd88bcb288ba6a5853cc26d9e48e688c96a4138 | 936def06a86d08048eb79574e83f8c7a9dae9d96 | refs/heads/master | 2021-09-11T12:31:00.796247 | 2018-04-06T20:51:08 | 2018-04-06T20:51:08 | 119,589,441 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,255 | r | 01-preprocessing.R | library(readr)
library(purrr)
library(lubridate)
library(dplyr)
library(Matrix)
# Untar dataset -----------------------------------------------------------
# untar("nf_prize_dataset.tar.gz", exdir = "data-raw")
# untar("data-raw/download/training_set.tar", exdir = "data-raw")
# Files -------------------------------------------------------------------
files <- dir("data-raw/training_set/", full.names = TRUE)
parse_movie <- function(file) {
movie_id <- read_lines(file, n_max = 1) %>% parse_number()
df <- read_csv(file, skip = 1, col_names = c("uid", "rating", "date"),
col_types = cols(
uid = col_integer(),
rating = col_integer(),
date = col_date(format = "")
))
df$mid <- movie_id
df
}
df <- map_df(files, parse_movie)
# Creating 3 datasets -----------------------------------------------------
# 1) Netflix - 3 months
# 2) Netflix - 6 months
# 3) Netflix - 1 year
# 4) Netflix - FULL
# defining functions to split data and to transform to a sparse matrix
split_netflix_data <- function(df, min_date, max_date) {
x <- df %>%
filter(between(date, min_date, max_date))
y <- df %>%
filter(
between(date, max_date + days(1), max_date + months(1) - days(1)),
uid %in% unique(x$uid),
mid %in% unique(x$mid)
)
# split test and validation randomly
ind_validation <- sample.int(nrow(y), nrow(y)/2)
y_test <- y[-ind_validation,]
y_val <- y[ind_validation,]
cat("Dataset Info \n")
cat("min_date = ", as.character(min_date), " max_date = ", as.character(max_date), "\n")
cat("Train -> #Users ", length(unique(x$uid)), " #Ratings ", nrow(x), "\n")
cat("Test -> #Users ", length(unique(y_test$uid)), " #Ratings ", nrow(y_test), "\n")
cat("Valid -> #Users ", length(unique(y_val$uid)), " #Ratings ", nrow(y_val), "\n")
list(x = x, y_val = y_val, y_test = y_test)
}
to_sparse <- function(netflix_data) {
x <- netflix_data$x
y_val <- netflix_data$y_val
y_test <- netflix_data$y_test
uids <- data_frame(
uid = unique(x$uid),
user_id = row_number(uid)
)
mids <- data_frame(
mid = unique(x$mid),
movie_id = row_number(mid)
)
x <- x %>%
left_join(uids, by = "uid") %>%
left_join(mids, by = "mid")
y_val <- y_val %>%
left_join(uids, by = "uid") %>%
left_join(mids, by = "mid")
y_test <- y_test %>%
left_join(uids, by = "uid") %>%
left_join(mids, by = "mid")
x <- sparseMatrix(x$user_id, x$movie_id, x = x$rating)
# validation data
y_val <- sparseMatrix(y_val$user_id, y_val$movie_id, x = y_val$rating)
ind_ratings <- which(rowSums(y_val) > 0)
y_val <- y_val[ind_ratings,]
x_val <- x[ind_ratings,]
# test data
y_test <- sparseMatrix(y_test$user_id, y_test$movie_id, x = y_test$rating)
ind_ratings <- which(rowSums(y_test) > 0)
y_test <- y_test[ind_ratings,]
x_test <- x[ind_ratings,]
list(
train = list(x = x, y = x),
val = list(x = x_val, y = y_val),
test = list(x = x_test, y = y_test)
)
}
netflix3m <- split_netflix_data(df, ymd("2005-09-01"), ymd("2005-11-30"))
netflix3m <- to_sparse(netflix3m)
saveRDS(netflix3m, "data/netflix3m.rds")
|
95f3cc7c30b2a41c59ec9d2d881afc73d41c4fcb | c87f87a4956b76fadf40374c9da3d0aba93fe6d4 | /R/update_relevants.R | 44ce4680fe28d917bd61f0c48b6b46771a61342d | [] | no_license | zackarno/kobold | 20c012fb8595726ad8b1ab2a2aad827de781715f | 9bb0a93e1f4853fb6038e0b9d25cfdaedf4d7c26 | refs/heads/master | 2021-07-11T20:23:00.993698 | 2021-03-04T13:33:42 | 2021-03-04T13:33:42 | 236,489,190 | 2 | 0 | null | 2020-01-27T12:48:06 | 2020-01-27T12:48:05 | null | UTF-8 | R | false | false | 6,843 | r | update_relevants.R | #' Update relevant logic referencing a different sheet
#'
#' @importFrom glue glue glue_collapse
#' @importFrom dplyr filter select matches mutate
#' @importFrom rlang sym !! := is_empty
#' @importFrom stringr str_detect
#'
#' @noRd
separate_relevants <- function(rel_sheet, var_sheet, q_name, relevant, env) {
select_multiple <- str_detect(c(filter(env$object$survey, name == q_name)$type), "^.*(select_multiple|select multiple)")
if (select_multiple) {
l_name <- filter(env$object$survey, name == q_name)$list_name
choices <- filter(env$object$choices, list_name == l_name)$name
search_rgx <- glue("(\\b{q_name})(\\.|\\/)({choices}\\b)")
search_rgx <- glue_collapse(search_rgx, sep = "|")
binary_names <- unique(names(env$object[[var_sheet]] %>%
select(matches(search_rgx))))
}
# Get the UUID from the main sheet to connect to separate sheets
if (!is.na(match("uuid", names(env$object[[var_sheet]])))) {
chg_uuid <- filter(env$object[[rel_sheet]], !(!!convert_relevant(relevant)))$uuid
if (!is_empty(chg_uuid)) {
env$object[[var_sheet]] <- mutate(env$object[[var_sheet]],
!!q_name := ifelse(uuid %in% chg_uuid,
NA,
!!sym(q_name)))
if (select_multiple) {
for (i in 1:length(binary_names)) {
env$object[[var_sheet]] <- mutate(env$object[[var_sheet]],
!!binary_names[i] := ifelse(uuid %in% chg_uuid,
NA,
!!sym(binary_names[i])))
}
}
}
} else if (!is.na(match("index", names(env$object[[var_sheet]])))) {
sheet_chain <- filter(env$object$data_sheets, sheets == var_sheet)$parent
while (sheet_chain[1] != rel_sheet) {
parent <- filter(env$object$data_sheets, sheets == sheet_chain[1])$parent
sheet_chain <- append(sheet_chain, parent, before = 0)
}
sheet_chain <- append(sheet_chain, var_sheet)
chg_index <- filter(env$object[[rel_sheet]], !(!!convert_relevant(relevant)))$index
i <- 2
while (i <= length(sheet_chain)) {
if (is_empty(chg_index)) {
i <- length(sheet_chain) + 1
} else {
chg_index <- filter(env$object[[sheet_chain[i]]], parent_index %in% chg_index)$index
i <- i + 1
}
}
if (!is_empty(chg_index)) {
env$object[[var_sheet]] <- mutate(env$object[[var_sheet]],
!!q_name := ifelse(index %in% chg_index,
NA,
!!sym(q_name)))
if (select_multiple) {
for (i in 1:length(binary_names)) {
env$object[[var_sheet]] <- mutate(env$object[[var_sheet]],
!!binary_names[i] := ifelse(index %in% chg_index,
NA,
!!sym(binary_names[i])))
}
}
}
}
}
#' Update relevant logic referencing the same sheet
#'
#' @importFrom dplyr filter mutate select matches %>%
#' @importFrom glue glue glue_collapse
#' @importFrom rlang sym !! :=
#'
#' @noRd
same_relevants <- function(sheet, q_name, relevant, env) {
select_multiple <- str_detect(c(filter(env$object$survey, name == q_name)$type), "^.*(select_multiple|select multiple)")
if (select_multiple) {
l_name <- filter(env$object$survey, name == q_name)$list_name
choices <- filter(env$object$choices, list_name == l_name)$name
search_rgx <- glue("(\\b{q_name})(\\.|\\/)({choices}\\b)")
search_rgx <- glue_collapse(search_rgx, sep = "|")
binary_names <- unique(names(env$object[[sheet]] %>%
select(matches(search_rgx))))
for (i in 1:length(binary_names)) {
env$object[[sheet]] <- mutate(env$object[[sheet]],
!!binary_names[i] := ifelse(!(!!convert_relevant(relevant)),
NA,
!!sym(binary_names[i])))
}
}
env$object[[sheet]] <- mutate(env$object[[sheet]],
!!q_name := ifelse(!(!!convert_relevant(relevant)),
NA,
!!sym(q_name)))
}
#' Determine variable for relevant logic updating
#'
#' @importFrom glue glue
#' @importFrom stringr str_which str_detect str_match_all
#' @importFrom dplyr filter
#' @importFrom purrr pmap map2
#' @importFrom rlang warn
#'
#' @noRd
relevant_determiner <- function(q_name, type, relevant, env) {
group_rgx <- "^.*(begin_group|begin group|begin repeat|begin_repeat)"
group <- str_detect(type, group_rgx)
end_group_rgx <- "^.*(end_group|end group|end repeat|end_repeat)"
end_group <- str_detect(type, end_group_rgx)
# Ensure variables within groups get relevants ------------
if (group) {
group_name <- glue("\\b{q_name}\\b")
var_rows <- filter(env$object$survey, str_detect(group, (!!group_name)) & !str_detect(type, (!!group_rgx)) & sheet %in% env$object$data_sheets$sheets & !is.na(name))
vars <- var_rows$name
types <- var_rows$type
map2(vars, types, relevant_determiner, relevant, env)
} else if (!end_group) {
srch_term <- "\\$\\{(.*?)\\}"
relevant_vars <- str_match_all(relevant, srch_term)[[1]][,2]
relevant_vars <- unique(relevant_vars)
rel_indices <- match(relevant_vars, env$object$survey$name)
rel_sheets <- env$object$survey$sheet[rel_indices]
rel_sheets <- unique(rel_sheets)
var_sheet <- filter(env$object$survey, name == q_name)$sheet
if (length(rel_sheets) > 1) {
warn(glue("Can't correct for {q_name} relevant logic since it references two or more data sheets."))
} else if (var_sheet == rel_sheets) {
same_relevants(var_sheet, q_name, relevant, env)
} else {
separate_relevants(rel_sheets, var_sheet, q_name, relevant, env)
}
}
}
#' Update data based on XLSForm relevant logic
#'
#' @importFrom rlang current_env
#' @importFrom purrr pmap
#' @importFrom dplyr filter
#'
#' @export
relevant_updater <- function(object) {
env <- current_env()
relevant_data <- filter(object$survey,
(!is.na(relevant)) & sheet %in% object$data_sheets$sheets)
pmap(list(relevant_data$name,
relevant_data$type,
relevant_data$relevant),
relevant_determiner,
env)
return(object)
}
|
d760d886a893da889310d57e880ac169dd92bb91 | bbbc74bb47beeb260b9a7b2e6f3307a4ccaccaa6 | /others/SDM/landraces_classification/classification_algorithms/explore_and_classify_occurrence_data_statistical_approach.R | 18c9c1da307b8d8140d14bb23ceb40f3d93cece6 | [] | no_license | CIAT-DAPA/gap_analysis_landraces | a534c92478fba0174be2c0821dd4c1e5b90dc4d7 | 19d6ca622cf4c5d93c2e895074aed7e86d541746 | refs/heads/master | 2021-11-10T17:02:09.417139 | 2021-11-02T14:42:33 | 2021-11-02T14:42:33 | 95,039,342 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 43,269 | r | explore_and_classify_occurrence_data_statistical_approach.R | # Explore and classify occurrence data
# A. Mendez & H. Achicanoy
# CIAT, 2017
# R options
options(warn = -1); options(scipen = 999); g <- gc(reset = T); rm(list = ls())
OSys <- Sys.info()[1]
OSysPath <- switch(OSys, "Linux" = "/mnt", "Windows" = "//dapadfs")
root <- switch(OSys, "Linux" = "/mnt/workspace_cluster_9", "Windows" = "//dapadfs/Workspace_cluster_9")
# Load packages
suppressMessages(if(!require(tidyverse)){install.packages("tidyverse");library(tidyverse)}else{library(tidyverse)})
suppressMessages(if(!require(rgdal)){install.packages("rgdal");library(rgdal)}else{library(rgdal)})
suppressMessages(if(!require(sp)){install.packages("sp");library(sp)}else{library(sp)})
suppressMessages(if(!require(raster)){install.packages("raster");library(raster)}else{library(raster)})
suppressMessages(if(!require(ncdf4)){install.packages("ncdf4");library(ncdf4)}else{library(ncdf4)})
suppressMessages(if(!require(rasterVis)){install.packages("rasterVis");library(rasterVis)}else{library(rasterVis)})
suppressMessages(if(!require(htmlwidgets)){install.packages("htmlwidgets");library(htmlwidgets)}else{library(htmlwidgets)})
suppressMessages(if(!require(compiler)){install.packages("compiler");library(compiler)}else{library(compiler)})
suppressMessages(if(!require(leaflet)){install.packages("leaflet");library(leaflet)}else{library(leaflet)})
suppressMessages(if(!require(highcharter)){install.packages("highcharter");library(highcharter)}else{library(highcharter)})
suppressMessages(if(!require(plotly)){install.packages("plotly");library(plotly)}else{library(plotly)})
suppressMessages(if(!require(d3heatmap)){install.packages("d3heatmap");library(d3heatmap)}else{library(d3heatmap)})
suppressMessages(if(!require(cluster)){install.packages("cluster");library(cluster)}else{library(cluster)})
suppressMessages(if(!require(FactoMineR)){install.packages("FactoMineR");library(FactoMineR)}else{library(FactoMineR)})
suppressMessages(if(!require(factoextra)){install.packages("factoextra");library(factoextra)}else{library(factoextra)})
suppressMessages(if(!require(Rtsne)){install.packages("Rtsne");library(Rtsne)}else{library(Rtsne)})
suppressMessages(if(!require(InformationValue)){install.packages("InformationValue");library(InformationValue)}else{library(InformationValue)})
suppressMessages(if(!require(corrplot)){install.packages("corrplot");library(corrplot)}else{library(corrplot)})
suppressMessages(if(!require(caTools)){install.packages("caTools");library(caTools)}else{library(caTools)})
suppressMessages(if(!require(caret)){install.packages("caret");library(caret)}else{library(caret)})
suppressMessages(if(!require(shiny)){install.packages("shiny");library(shiny)}else{library(shiny)})
suppressMessages(if(!require(miniUI)){install.packages("miniUI");library(miniUI)}else{library(miniUI)})
suppressMessages(if(!require(assertthat)){install.packages("assertthat");library(assertthat)}else{library(assertthat)})
suppressMessages(if(!require(nnet)){install.packages("nnet");library(nnet)}else{library(nnet)})
# Load data
genotypic_climate <- readRDS(paste0(root, "/gap_analysis_landraces/Input_data/_datosAndres/acp/data4modeling.RDS"))
rownames(genotypic_climate) <- genotypic_climate$ID
# Descriptive analysis
# source("descriptive_analysis4cleanedDB.R")
# Shiny app for selection of variables
ui <- miniPage(
gadgetTitleBar("Variable Selector"),
miniContentPanel(padding = 0,
checkboxGroupInput("vars", "Select Vars", choices = names(genotypic_climate ), selected = names(genotypic_climate))
)
)
server <- function(input, output, session){
observeEvent(input$done, {
genotypic_climate <<- genotypic_climate[,input$vars]
stopApp(genotypic_climate)
})
}
runGadget(shinyApp(ui, server),viewer = dialogViewer("Select Vars", width = 600, height = 600))
##################################################
#################################################
#####----------Main function------------#########
#################################################
#################################################
y = "Race.interpreted.ACID"
genepool_predicted <- function(data_gen = genotypic_climate, y = c("Genepool.interpreted.ACID","Race.interpreted.ACID"), area = "Americas"){
# ---------------------------------------------------------------- #
# Train models
# ---------------------------------------------------------------- #
cat("\n>>>> Starting training process\n\n")
if( length(grep("Color_",names(data_gen) ) )!= 0 ){
data_gen[,grep("Color_",names(data_gen) )]<- data_gen[,grep("Color_",names(data_gen) )] %>% mutate_all(., funs(as.factor(.)))
}
if( length(grep("Protein_",names(data_gen) ) )!= 0 ){
data_gen[,grep("Protein_",names(data_gen) )]<- data_gen[,grep("Protein_",names(data_gen) )] %>% mutate_all(., funs(as.factor(.)))
}
# Function to exclude correlated variables before to model
colinearity <- function(genepool_data,i=1){
numeric <- genepool_data[,sapply(genepool_data, is.numeric)]
descrCor <- cor(numeric)
highlyCorDescr <- findCorrelation(descrCor, cutoff = .75)
numeric <- numeric[,-highlyCorDescr]
vec <- which(names(genepool_data) %in% names(numeric))
genepool_data <- eval(parse(text = paste0("data.frame(", y[i], "=", "genepool_data$", y[i], ",", "numeric", ")")))
return(genepool_data)
}
# Function to get just numeric variables before to model
only_numeric <- function(genepool_data,i=1 ){
genepool_data2 <- genepool_data[,sapply(genepool_data, is.numeric)]
genepool_data <- eval(parse(text = paste0("data.frame(", y[i], "=", "genepool_data$", y[i], ",", "genepool_data2", ")")))
return(genepool_data)
}
# Process response variable
eval(parse(text = paste0("data_gen$", y[1], " <- as.character(data_gen$", y[1], ")")))
eval(parse(text = paste0("data_gen$", y[1], "[which(data_gen$", y[1], " == 'N/A')] <- NA")))
if(length(grep("Spain_Andean_I", eval(parse(text = paste0("data_gen$", y[1]))))) != 0){
eval(parse(text = paste0("data_gen$", y[1], "[which(data_gen$", y[1], " =='Spain_Andean_I')] <- 'Andean'")))
}
eval(parse(text = paste0("data_gen$", y[1], " <- factor(data_gen$", y[1], ")")))
# Apply filters
row.names(data_gen) <- data_gen$ID
genepool_data <- data_gen %>%
dplyr::filter(., Analysis == area & To.use.ACID == 1) %>% `rownames<-`(.$ID) %>%
dplyr::select(., -ID, -Analysis, -To.use.ACID)
data_gen<- data_gen %>% dplyr::select(., -ID, -Analysis, -To.use.ACID)
# Arrange and let just completed data for the training process
genepool_data<- genepool_data[complete.cases(genepool_data),]
# Select response variable
if( y[1] == "Genepool.interpreted.ACID" ){
if(assertthat::has_name( genepool_data, "Race.interpreted.ACID")){ genepool_data$Race.interpreted.ACID <- NULL}
if(assertthat::has_name( genepool_data, "Subgroup.interpreted.ACID")){ genepool_data$Subgroup.interpreted.ACID <- NULL}
}
if(assertthat::has_name(genepool_data, "Genepool.protein")){ genepool_data$Genepool.protein[which(genepool_data$Genepool.protein == "N/A")] <- NA }
genepool_data <- genepool_data[complete.cases(genepool_data),]
if(assertthat::has_name(genepool_data, "Growth.habit")){genepool_data$Growth.habit <- factor(genepool_data$Growth.habit)}
if(assertthat::has_name(genepool_data, "Seed.shape")){genepool_data$Seed.shape <- factor(genepool_data$Seed.shape)}
if(assertthat::has_name(genepool_data, "Seed.brightness")){genepool_data$Seed.brightness <- factor(genepool_data$Seed.brightness)}
if(assertthat::has_name(genepool_data, "Genepool.protein")){
genepool_data$Genepool.protein <- as.character(genepool_data$Genepool.protein)
genepool_data$Genepool.protein <- factor(genepool_data$Genepool.protein)
}
# Identify and exclude variables with low frequencies and variance close to 0
nzv <- nearZeroVar(genepool_data)
genepool_data <- genepool_data[,-nzv]
# Define parameters to train models
set.seed(825); ctrol2 <- trainControl(method = "LGOCV", p = 0.8, number = 1, savePredictions = T)
# In case of imbalance: ctrol2 <- trainControl(method = "LGOCV", p = 0.8, number = 1, savePredictions = T, sampling = "down")
##########################################
# Model 1
# Bagged Flexible Discriminant Analysis
##########################################
cat("Running FDA ...\n")
data_in <- only_numeric(genepool_data,i=1)
eval(parse(text = paste0("FDA <- train(", y[1], " ~ ., data = data_in, method = 'bagFDA', trControl = ctrol2)"))) # FDA training
cat("finishing FDA ...\n")
##########################################
# Model 2
# GLM: Logistic Regression Model
##########################################
cat("Running GLM ...\n")
vf <- colinearity(genepool_data,i=1)
pos <- which(sapply(vf, is.factor))
for(i in 1:length(pos)){
vf[,pos[i]] <- make.names((vf[,pos[i]]))
}
eval(parse(text = paste0("glmFit1 <- train(", y[1], " ~ ., data = vf, method = 'glm', family = 'binomial', trControl = ctrol2)"))) # GLM training
cat("finishing GLM ...\n")
##########################################
# Model 3
# Random Forest
##########################################
cat("Running Random Forest ...\n")
grid <- expand.grid(mtry = round((ncol(genepool_data)-4)/3))
eval(parse(text = paste0("Rforest <- train(", y[1], " ~ ., data = genepool_data, method = 'rf', tuneGrid = grid, importance = TRUE, ntree = 2000, metric = 'Accuracy', trControl = ctrol2)"))) # RF training
cat("finishing Rforest ...\n")
##########################################
# Model 4
# Support Vector Machines
##########################################
cat("Running Support Vector Machine ...\n\n")
eval(parse(text = paste0("svmFit <- train(", y[1], " ~ ., data = genepool_data, method = 'svmRadial', tuneLength = 9, trControl = ctrol2, importance = T)")))
cat("finishing SVM ...\n")
# ---------------------------------------------------------------- #
# Predict new cases
# ---------------------------------------------------------------- #
cat(">>>> Starting predicting process\n\n")
genepool_na <- data_gen[!complete.cases(eval(parse(text = paste0("data_gen$", y[1])))),]
if( y[1] == "Genepool.interpreted.ACID" ){
if(assertthat::has_name( genepool_na, "Race.interpreted.ACID")){ genepool_na$Race.interpreted.ACID <- NULL}
if(assertthat::has_name( genepool_na, "Subgroup.interpreted.ACID")){ genepool_na$Subgroup.interpreted.ACID <- NULL}
}
# genepool_na <- genepool_na[, names(genepool_data)]
genepool_na$Genepool.protein <- as.character(genepool_na$Genepool.protein)
genepool_na$Genepool.protein[which(genepool_na$Genepool.protein == "N/A")] <- NA
genepool_na$Genepool.protein <- factor(genepool_na$Genepool.protein)
genepool_na$Growth.habit[which(genepool_na$Growth.habit == "Climbing-Determinate")] <- NA
genepool_na$Growth.habit <- factor(genepool_na$Growth.habit)
genepool_na <- genepool_na[complete.cases(genepool_na[,-which( names(genepool_na) == y[1] )]),]
model_type <- c("FDA", "glmFit1", "Rforest", "svmFit")
predictions <- lapply(model_type, function(x){
model <- eval(parse(text = x ))
ifelse(model$method == "glm" | model$method == "rf", tp <- "response", tp <- "class")
if(model$method == "rf"){ pred <- predict(model, newdata = genepool_na[,-which(names(genepool_na) == y[1])]) }
if(model$method == "svmRadial"){ pred <- predict(model, newdata = genepool_na[,-which(names(genepool_na) == y[1])]) }
if(model$method == "bagFDA"){ pred <- predict(model$finalModel, newdata = genepool_na[,names(data_in)] ,type = tp) }
if(model$method == "glm"){
vf_p <- genepool_na[,names(vf)[-which(names(vf) == y[1])]]
pos <- which(sapply(vf_p, is.factor))
if(length(pos)!=0){
for(i in 1:length(pos)){
vf_p[,pos[i]] <- make.names((vf_p[,pos[i]]))
}
}
g1 <- glm(factor(Genepool.interpreted.ACID) ~ ., data = vf, family = binomial(link = "logit"))
pred <- predict(g1, newdata = na.omit(vf_p), type = "response")
pred <- ifelse(pred < 0.5, "Andean", "Mesoamerican")
pred <- as.factor(pred)
}
return(pred)
})
names(predictions) <- model_type
accu.FDA <- mean(FDA$finalModel$oob[,1])
accu.glmFit1 <- mean(apply(gd <- data.frame(glmFit1$resampledCM[,1:4]), 1, function(x){
(x[1] + x[4]) /sum(x)
}))
accu.Rforest <- mean(apply(gd <- data.frame(Rforest$resampledCM[,1:4]), 1, function(x){
(x[1] + x[4]) /sum(x)
}))
accu.svm <- mean(apply(gd <- data.frame(svmFit$resampledCM[,1:4]), 1, function(x){
(x[1] + x[4]) /sum(x)
}))
accuracy <- c(accu.FDA, accu.glmFit1, accu.Rforest, accu.svm)
names(accuracy) <- model_type
#------------------------------------------------------ ^ -------------------------------
#### predictions for races
#------------------------------------------------------ ^ -------------------------------
if( !is.na( y[2]) ){
cat("Starting predictions proccess to beans race... \n")
eval(parse(text = paste0("data_gen$", y[2], " <- as.character(data_gen$", y[2], ")")))
eval(parse(text = paste0("data_gen$", y[2], "[which(data_gen$", y[2], " == 'N/A')] <- NA")))
if(length(grep("Chile", eval(parse(text = paste0("data_gen$", y[2]))))) != 0){
eval(parse(text = paste0("data_gen$", y[2], "[which(data_gen$", y[2], " =='Chile')] <- 'Peru'")))
}
eval(parse(text = paste0("data_gen$", y[2], " <- factor(data_gen$", y[2], ")")))
genepool_data <- data_gen
genepool_data <- genepool_data[ complete.cases(genepool_data) , ]
if(assertthat::has_name(genepool_data, "Subgroup.interpreted.ACID")){genepool_data$Subgroup.interpreted.ACID <- NULL}
if(assertthat::has_name(genepool_data, "Genepool.protein")){ genepool_data$Genepool.protein[which(genepool_data$Genepool.protein == "N/A")] <- NA }
genepool_data <- genepool_data[complete.cases(genepool_data),]
if(assertthat::has_name(genepool_data, "Growth.habit")){genepool_data$Growth.habit <- factor(genepool_data$Growth.habit)}
if(assertthat::has_name(genepool_data, "Seed.shape")){genepool_data$Seed.shape <- factor(genepool_data$Seed.shape)}
if(assertthat::has_name(genepool_data, "Seed.brightness")){genepool_data$Seed.brightness <- factor(genepool_data$Seed.brightness)}
if(assertthat::has_name(genepool_data, "Genepool.protein")){
genepool_data$Genepool.protein <- as.character(genepool_data$Genepool.protein)
genepool_data$Genepool.protein <- factor(genepool_data$Genepool.protein)
}
# Identify and exclude variables with low frequencies and variance close to 0
nzv <- nearZeroVar(genepool_data)
genepool_data <- genepool_data[,-nzv]
# Define parameters to train models
set.seed(825); ctrol2 <- trainControl(method = "LGOCV", p = 0.8, number = 1, savePredictions = T)
# In case of imbalance: ctrol2 <- trainControl(method = "LGOCV", p = 0.8, number = 1, savePredictions = T, sampling = "down")
##########################################
# Model 1
# Bagged Flexible Discriminant Analysis
##########################################
cat("Running FDA ...\n")
data_in <- only_numeric(genepool_data,i=2)
data_in<- eval( parse( text= paste0( "data.frame( data_in,", y[1], "= genepool_data$",y[1], ")") ) )
eval(parse(text = paste0("FDA.race <- train(", y[2], " ~ ., data = data_in, method = 'bagFDA', trControl = ctrol2)"))) # FDA training
cat("finishing FDA ...\n")
##########################################
# Model 2
# GLM: Logistic Regression Model
##########################################
cat("Running multinom glm ...\n")
vf <- colinearity(genepool_data,i=2)
vf<- eval( parse( text= paste0( "data.frame( vf,", y[1], "= genepool_data$",y[1], ")") ) )
# vf$Race.interpreted.ACID<-c(0,1,2,3,4)[vf$Race.interpreted.ACID]
# vf$Genepool.interpreted.ACID<-c(0,1)[vf$Genepool.interpreted.ACID]
vf$Race.interpreted.ACID<-relevel(genepool_data$Race.interpreted.ACID,ref="Durango-Jalisco")
set.seed(1200)
#genepool_data<- colinearity(genepool_data)
folds<-modelr::crossv_kfold(vf,k=6)
multi<- eval ( parse ( text= paste0( "folds %>% mutate(.,model=purrr::map(train, ~ nnet::multinom(", y[2],"~. , data=. ) ) )" ) ) )
multi<- multi %>% dplyr::mutate(.,tested= purrr::map2(model,test, ~predict(.x,newdata=.y) ) )
multi<- eval (parse( text= paste0(" multi %>% dplyr::mutate(., cm=purrr::map2(test,tested, ~table(data.frame(.x)$",y[2],",.y) ) ) %>% mutate(., accuracy=purrr::map(cm, function(x){ sum(diag(x))/sum(x)} ) )" )))
#select the best model#
multi.model<- multi[which(unlist(multi$accuracy)==max(unlist(multi$accuracy))),"model"]$model
mean(unlist(multi$accuracy))
##########################################
# Model 3
# Random Forest
##########################################
cat("Running Random Forest ...\n")
grid <- expand.grid(mtry = round((ncol(genepool_data)-4)/3))
eval(parse(text = paste0("Rforest.race <- train(", y[2], " ~ ., data = genepool_data, method = 'rf', tuneGrid = grid, importance = TRUE, ntree = 2000, metric = 'Accuracy', trControl = ctrol2)"))) # RF training
cat("finishing Rforest ...\n")
##########################################
# Model 4
# Support Vector Machines
##########################################
cat("Running Support Vector Machine ...\n\n")
eval(parse(text = paste0("svmFit.race <- train(", y[2], " ~ ., data = genepool_data[,-which( names(genepool_data)==", 'y[1]' ,") ], method = 'svmRadial', tuneLength = 9, trControl = ctrol2, importance = T)")))
cat("finishing SVM ...\n")
cat(">>>> Starting predicting process for race...\n\n")
#genepool_na <- data_gen[!complete.cases(eval(parse(text = paste0("data_gen$", y[2])))),]
df<-as.data.frame(data_gen[, y[2] ])
row.names(df)<- row.names(data_gen)
genepool_na_race<- base::merge(df,data.frame(genepool_na, predictions$Rforest) , by = "row.names" )
genepool_na_race<-genepool_na_race %>% dplyr::select(., -Row.names,-Genepool.interpreted.ACID)
row.names(genepool_na_race)<-row.names(data.frame(genepool_na, predictions$Rforest))
names(genepool_na_race)[ which( names(genepool_na_race)== "data_gen[, y[2]]" ) ]<-y[2]
names(genepool_na_race)[ which( names(genepool_na_race)== "predictions.Rforest" ) ]<- y[1]
if(assertthat::has_name(genepool_na_race, "Subgroup.interpreted.ACID")){ genepool_na_race$Subgroup.interpreted.ACID <-NULL}
if(assertthat::has_name(genepool_na_race, "Genepool.protein")){genepool_na_race$Genepool.protein <- as.character(genepool_na_race$Genepool.protein)
genepool_na_race$Genepool.protein[which(genepool_na_race$Genepool.protein == "N/A")] <- NA
genepool_na_race$Genepool.protein <- factor(genepool_na_race$Genepool.protein)
}
if(assertthat::has_name(genepool_na_race,"Growth.habit") ) {genepool_na_race$Growth.habit[which(genepool_na_race$Growth.habit == "Climbing-Determinate")] <- NA
genepool_na_race$Growth.habit <- factor(genepool_na_race$Growth.habit)
}
genepool_na_race <- genepool_na_race[complete.cases(genepool_na_race[,-which(names(genepool_na_race) == y[2] & names(genepool_na_race) == "Genepool.interpreted.ACID" )]),]#predicciones con el Rforest
model_type <- c("FDA.race", "multi.model", "Rforest.race", "svmFit.race")
predictions_race <- lapply(model_type, function(x){
cat(paste("Predicting",x,"\n"))
model <- eval(parse(text = x ))
if( !is.null( model$method) ){
if(model$method == "rf"){ pred <- predict(model, newdata = genepool_na_race[,-which(names(genepool_na_race) == y[2])] ) }
if(model$method == "svmRadial"){ pred <- predict(model, newdata = genepool_na_race[,-which(names(genepool_na_race) == y[2])]) }
if(model$method == "bagFDA"){ pred <- predict(model, newdata = genepool_na_race[, names(data_in)[-which(names(genepool_na_race) == y[2])] ] ,type = "raw") }
}else{
vf_p <- genepool_na_race[,names(vf)[-which(names(vf) == y[2])]]
pos <- which(sapply(vf_p, is.factor))
if(length(pos)!=0){
for(i in 1:length(pos)){
vf_p[,pos[i]] <- make.names((vf_p[,pos[i]]))
}
}
pred <- predict(model, newdata = na.omit(vf_p), type = "class")
}
return(pred)
} )
#### REVISAR ESTA PARTE POR QUE LAS ACCURACIS ESTAN MAL CALCULADAS
names(predictions_race)<-model_type
data_predicted_race <- data.frame(genepool_na_race, predictions_race)
accu.FDA.race <- mean(FDA.race$finalModel$oob[,1])
accu.multinom <- mean(unlist(multi$accuracy))
n.lev<-length( eval (parse( text = paste0("levels(genepool_data$",y[2],")" ) )) )
accu.Rforest.race <- sum(unlist(diag(matrix(Rforest.race$resampledCM[1:(n.lev*n.lev)],n.lev,n.lev,byrow = T))))/ sum( unlist(matrix(Rforest.race$resampledCM[1:(n.lev*n.lev)],n.lev,n.lev,byrow = T)))
accu.svm.race <- sum(unlist(diag(matrix(svmFit.race$resampledCM[1:(n.lev*n.lev)],n.lev,n.lev,byrow = T))))/ sum( unlist(matrix(svmFit.race$resampledCM[1:(n.lev*n.lev)],n.lev,n.lev,byrow = T)))
accuracy_race <- c(accu.FDA.race, accu.multinom, accu.Rforest.race, accu.svm.race)
names(accuracy_race) <- model_type
}
if( !is.null(y[1]) ){
if(length(y)==2){
return( list(data_predicted_genepool = data.frame(genepool_na, predictions), accuracy.genepool = accuracy, data_predicted_race= data.frame(genepool_na_race, predictions_race), accuracy.race = accuracy_race ,data = data_gen) )
}else{
return(list(data_predicted = data.frame(genepool_na, predictions), models_accuracy = accuracy, data = data_gen))
cat(">>>> Process done\n")
}
}else{
stop("ERROOOOOORRRRR")
}
}
predictions <- genepool_predicted(data_gen = genotypic_climate, c("Genepool.interpreted.ACID","Race.interpreted.ACID"), area = "Americas")
df<-predictions[[2]]
predictions
saveRDS(predictions, "/home/hachicanoy/genepool_predictions.RDS")
data_gen$Genepool.predicted <- NA
data_gen$Genepool.predicted[match(rownames(predictions[[1]]), rownames(data_gen))] <- as.character(apply(X = predictions[[1]][,c("FDA", "glmFit1", "Rforest", "svmFit")], MARGIN = 1, function(x){Mode(x)}))
# Map example
shp_wld <- rgdal::readOGR(dsn = "/home/hachicanoy", layer = "all_countries")
proj4string(shp_wld) <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
shp_wld$CONTINENT <- iconv(shp_wld$CONTINENT, from = "UTF-8", to = "latin1")
shp_wld <- shp_wld[shp_wld@data$CONTINENT == "North America" | shp_wld@data$CONTINENT == "South America",]
shp_wld <- fortify(shp_wld)
ggplot() +
geom_polygon(data = shp_wld, aes(long, lat, group = group)) +
geom_point(data = predictions[[1]], aes(x = Longitude, y = Latitude, fill = svmFit, colour = svmFit)) +
coord_cartesian(xlim = c(-180, 0)) + theme_bw()
###########################################################
###### MULTINOMIAL LOGISTIC REGRESSION "nnet" package#####
###########################################################
install.packages("nnet")
library("nnet")
genepool_data <- data_gen
genepool_data <- genepool_data[complete.cases(genepool_data$Race.interpreted.ACID),]
genepool_data$Race.interpreted.lit<-factor(genepool_data$Race.interpreted.lit)
genepool_data$Race.interpreted.lit<-relevel(genepool_data$Race.interpreted.lit,ref="Durango-Jalisco")
set.seed(1200)
#genepool_data<- colinearity(genepool_data)
folds<-modelr::crossv_kfold(genepool_data,k=6)
multi<- folds %>% mutate(.,model=purrr::map(train, ~ nnet::multinom( Race.interpreted.lit~. , data=. ) ) )
multi<- multi %>% dplyr::mutate(.,tested= purrr::map2(model,test, ~predict(.x,newdata=.y) ) )
multi<- multi %>% dplyr::mutate(., cm=purrr::map2(test,tested, ~table(data.frame(.x)$Race.interpreted.lit,.y) ) ) %>% mutate(., accuracy=purrr::map(cm, function(x){ sum(diag(x))/sum(x)} ) )
#select the best model#
multi.model<- multi[which(unlist(multi$accuracy)==max(unlist(multi$accuracy))),"model"]$model
##global accuracy
mean(unlist(multi$accuracy))
####mediante un FDA
genepool_data$Genepool.lit<- as.numeric(genepool_data$Genepool.lit)
#as.numeric(genepool_data$Race.interpreted.lit )
genepool_data2<-genepool_data[,sapply(genepool_data, is.numeric)]
genepool_data2<-data.frame(Race.interpreted.lit=genepool_data$Race.interpreted.lit,genepool_data2)
genepool_data2$Race.interpreted.lit<-factor(genepool_data2$Race.interpreted.lit)
genepool_data2<- genepool_data2[complete.cases(genepool_data2$Race.interpreted.lit),]
head(genepool_data2)
set.seed(825)
ctrol2<-trainControl(method="LGOCV",p=0.8,number=1,savePredictions = T)
FDA.race<-train(Race.interpreted.lit~.,data=genepool_data2,method="bagFDA",trControl = ctrol2)
mean(FDA.race$finalModel$oob[,1])
genepool_data$Genepool.lit<- factor(genepool_data$Genepool.lit)
vf<- only_numeric(genepool_data )
vf$Genepool.lit<-as.numeric(vf$Genepool.lit)
vf<-data.frame(Race.interpreted.lit=genepool_data$Race.interpreted.lit,vf)
genepool_data$Race.interpreted.lit<-factor(genepool_data$Race.interpreted.lit)
genepool_data$Genepool.lit<-factor(genepool_data$Genepool.lit)
gam<-caret::train( Race.interpreted.lit ~.,data=vf , method="nnet", trcontrol=ctrol2,
seed = 1)
gam$results
hist(vf$bio_19)
hist(log(scale(vf$bio_19,center = T, scale = T)) )
###### REDUCCION DE DIMENSIONALIDAD ###########
OSys <- Sys.info()[1]
OSysPath <- switch(OSys, "Linux" = "/mnt", "Windows" = "//dapadfs")
root <- switch(OSys, "Linux" = "/mnt/workspace_cluster_9", "Windows" = "//dapadfs/Workspace_cluster_9")
suppressMessages(library(Rtsne))
library(dplyr)
suppressMessages(if(!require(corrplot)){install.packages("corrplot");library(corrplot)}else{library(corrplot)})
biophysicalVars <- readRDS(paste0(root, "/gap_analysis_landraces/Input_data/_occurrence_data/_ciat_data/Bean/BEAN-GRP-COORDINATES-CLIMATE.RDS"))
names(biophysicalVars)
biophysicalVars<-biophysicalVars[,-1]
colinearity<-function(genepool_data,tol=0.75){
#detectar varibles altamente correlacionadas y quitarlas de la base de datos
numeric<-genepool_data %>% dplyr::select(.,bio_1:bio_19)
descrCor<-cor(numeric)
highlyCorDescr <- findCorrelation(descrCor, cutoff = tol)
numeric <- numeric[,-highlyCorDescr]
genepool_data <- cbind(genepool_data %>% dplyr::select(1:(which(names(genepool_data)=="bio_1")-1)), numeric)
return(genepool_data)
}
colinearity(biophysicalVars,tol=0.75)
M<-cor(colinearity(biophysicalVars,tol=0.75), use = "complete.obs")
corrplot(M)
hist(M)
biophysicalVars <-colinearity(biophysicalVars,tol=0.5)
bio_tsne1 <- Rtsne(biophysicalVars[complete.cases(biophysicalVars),] %>% unique, dims = 2, perplexity = 400, verbose = TRUE, max_iter = 2000,pca=TRUE)
bio_tsne2 <- Rtsne(biophysicalVars[complete.cases(biophysicalVars),] %>% unique, dims = 2, perplexity = 25, verbose = TRUE, max_iter = 2000,pca=TRUE)
bio_tsne3 <- Rtsne(biophysicalVars[complete.cases(biophysicalVars),] %>% unique, dims = 2, perplexity = 10, verbose = TRUE, max_iter = 2000,pca=TRUE)
par(mfrow=c(1,3))
plot(bio_tsne1$Y, pch = 20, main = "tsne for biophysical variables")
plot(bio_tsne2$Y, pch = 20, main = "tsne for biophysical variables")
plot(bio_tsne3$Y, pch = 20, main = "tsne for biophysical variables")
bio_tsne1$M
M<-cor(biophysicalVars, use = "complete.obs")
corrplot(M)
hist(M)
cancer<- read.table("C:/Users/ACMENDEZ/Desktop/cancer.txt",sep=",")
cancer.dat<-cancer[,-(1:2)]
can1<- Rtsne(cancer %>% unique, dims = 2, perplexity = 30, verbose = TRUE, max_iter = 1000,pca=TRUE)
tsn<-as.data.frame(can1$Y)
tsn.clust<-tsn
cluster<-kmeans( scale( tsn ),4)
tsn.clust$kmeans<-factor(cluster$cluster)
clust.h<-stats::hclust(dist(scale(tsn) ) )
tsn.clust$hierar<-factor(cutree(clust.h,4) )
plot(clust.h)
ggplot(tsn.clust,aes_string(x="V1",y="V2",color="kmeans") )+geom_point(size=0.25) + guides(colour=guide_legend(override.aes=list(size=6))) +
xlab("") + ylab("") +
ggtitle("") +
theme_light(base_size=20) +
theme(axis.text.x=element_blank(),
axis.text.y=element_blank(),
legend.direction = "horizontal",
legend.position = "bottom",
legend.box = "horizontal") +
scale_colour_brewer(palette = "Accent")
###################
ciat <- gs_ls("Bean_landrace_name_table")
ciat <- gs_title("Bean_landrace_name_table")
ciat %>% gs_browse(ws = "Pvulgaris_CIATdb")
ciat <- ciat %>% gs_read(ws = "Pvulgaris_CIATdb")
names(ciat) <- c("ID", "Source", "Cleaned.by", "Accession.number", "Synonyms", "Common.names",
"Interpreted.name.csosa", "To.use.ACID", "Common.name.ACID",
"Genepool.ACID", "Genepool.literature.ACID","Race_interpreted_ACID",
"Race.literature.ACID", "Subgroup.interpreted.ACID", "Subgroup.literature.ACID",
"Reference.ACID", "TEST.vernacular", "Name.literature.vernacular",
"Genepool.literature.vernacular", "Race.interpreted.vernacular", "Race.literature.vernacular",
"Subgroup.literature.vernacular", "Reference.vernacular", "Genus", "Species", "Subspecies", "Variety",
"Biological.status", "Material.type", "CORE.collection", "Country", "Department", "County", "Place",
"Altitude", "Latitude", "Longitude", "Lat.geo", "Lon.geo", "Coord.status", "Collection.date", "Name",
"Name2", "Institution", "Country3", "Receipt.date", "Growth.habit", "Seed.color",
"Seed.shape", "Seed.brightness", "Seed.weight", "Protein", "Genepool.WEIGHT.fix",
"Genepool.protein", "Race.protein", "Responsible11")
ciat <- ciat %>% filter(Coord.status != "No coords") # 16038
ciat$Latitude[which(!is.na(ciat$Lat.geo) & is.na(ciat$Latitude))] <- ciat$Lat.geo[which(!is.na(ciat$Lat.geo) & is.na(ciat$Latitude))]
ciat$Longitude[which(!is.na(ciat$Lon.geo) & is.na(ciat$Longitude))] <- ciat$Lon.geo[which(!is.na(ciat$Lon.geo) & is.na(ciat$Longitude))]
# ------------------------------------ #
# Include altitude records from SRTM
# ------------------------------------ #
# Identify coordinates without altitude data
which(!is.na(ciat$Latitude) & is.na(ciat$Altitude)) %>% length
ciat %>% dplyr::filter(!is.na(Latitude) & is.na(Altitude)) %>% dplyr::select(Longitude, Latitude) %>% head
srtm <- raster::raster(paste0(OSysPath, "/data_cluster_4/observed/gridded_products/srtm/Altitude_30s/alt"))
srtm.vals <- raster::extract(x = srtm,
y = ciat %>% dplyr::filter(!is.na(Latitude) & is.na(Altitude)) %>% dplyr::select(Longitude, Latitude))
# Density plots before and after update altitude records
ciat %>% ggplot(aes(x = Altitude)) + geom_density() # Before
srtm.vals %>% data.frame %>% ggplot(aes(x = .)) + geom_density() # SRTM values
ciat$Altitude[which(!is.na(ciat$Latitude) & is.na(ciat$Altitude))] <- srtm.vals
rm(srtm.vals, srtm)
ciat <- ciat %>% filter(Altitude <= 3500)
biophysicalVars
ciat<-ciat[complete.cases(ciat$Genepool.literature.ACID),]
ciat <- ciat %>% filter(To.use.ACID == 1)
ciat <- ciat %>% dplyr::filter(!is.na(Longitude) & !is.na(Altitude) &
!is.na(Growth.habit) & !is.na(Seed.color) &
!is.na(Seed.shape) & !is.na(Seed.brightness) &
!is.na(Seed.weight) & !is.na(Protein) &
!is.na(Genepool.protein))
ciat.bio<-left_join(x=ciat,y=biophysicalVars,by="ID")
ciat.bio<-as.data.frame(ciat.bio)
if(all( (ciat.bio$Genepool.ACID == "Spain_Andeanean_I" )==FALSE )==FALSE ){
ciat.bio$Genepool.ACID[which(ciat.bio$Genepool.ACID=="Spain_Andeanean_I")]<-"Andean"
}
ciat.bio$Genepool.ACID<-factor(ciat.bio$Genepool.ACID)
table(ciat.bio$Race.literature.ACID)
table(ciat$Race.literature.ACID)
ciat.bio <- ciat.bio %>% dplyr::select(.,ID,Race.interpreted.ACID,Seed.weight,Altitude,Latitude.x,Longitude.x,aridityIndexThornthwaite:bio_19)
#Cambiar dependiendo de la raza
ciat.bio<-ciat.bio[complete.cases(ciat.bio),]
row.names(ciat.bio)<-ciat.bio$ID
M<-cor(ciat.bio[,-1], use = "complete.obs")
corrplot(M)
hist(M)
#detectar varibles altamente correlacionadas y quitarlas de la base de datos
numeric<-ciat.bio[,sapply(ciat.bio,is.numeric)]
numeric<-numeric[,-1]
numeric<-na.omit(numeric)
descrCor<-cor(numeric)
highlyCorDescr <- findCorrelation(descrCor, cutoff = 0.85)
numeric <- numeric[,-highlyCorDescr]
row.names(numeric)<-row.names(ciat.bio)
numeric<-data.frame(Genepool.ACID=ciat.bio$Race.interpreted.ACID,numeric)#cambiar dependiendo de la raza
#-----RACE
levels(factor(numeric$Genepool.ACID))
numeric$Genepool.ACID[which(numeric$Genepool.ACID=="N/A")]<-NA
numeric<-na.omit(numeric)
numeric$Genepool.ACID<-factor(numeric$Genepool.ACID)
#--------end RACE
numeric<- numeric[,] %>% unique
bio_tsne3 <- Rtsne(numeric[,], dims = 2, perplexity =40, verbose = TRUE, max_iter = 1500,pca=TRUE)
plot(bio_tsne3$Y, pch = 20, main = "tsne for biophysical variables")
row.names(bio_tsne3$Y)<-row.names(numeric)
tsn.clust<-data.frame(bio_tsne3$Y,Genepool.ACID=numeric$Genepool.ACID)
ggplot(tsn.clust,aes_string(x="X1",y="X2",color="Genepool.ACID") )+geom_point(size=1.8) + guides(colour=guide_legend(override.aes=list(size=6))) +
xlab("") + ylab("") +
ggtitle("") +
theme_light(base_size=20) +
theme(axis.text.x=element_blank(),
axis.text.y=element_blank(),
legend.direction = "horizontal",
legend.position = "bottom",
legend.box = "horizontal")
################ DBSCAN CLUSTERING
install.packages("dbscan")
install.packages("factoextra")
library(dbscan)
library(factoextra)
set.seed(123)
cl<- dbscan::dbscan(tsn.clust[,1:2],eps=1.9, MinPts=10 )
cl
fviz_cluster(cl, data = tsn.clust[,1:2], stand = FALSE,
ellipse = F, show.clust.cent = F,
geom = "point",palette = "jco", ggtheme = theme_classic())
###HIERARCHICAL CLUSTERING
tsn<-as.data.frame(bio_tsne3$Y)
tsn.clust<-tsn
cluster<-kmeans( scale( tsn ),4)
tsn.clust$kmeans<-factor(cluster$cluster)
clust.h<-stats::hclust(dist(scale(tsn) ) )
barplot(clust.h$height)
tsn.clust$hierar<-factor(cutree(clust.h,4) )
plot(clust.h)
ggplot(tsn.clust,aes_string(x="V1",y="V2",color="hierar") )+geom_point(size=1.85) + guides(colour=guide_legend(override.aes=list(size=6))) +
xlab("") + ylab("") +
ggtitle("") +
theme_light(base_size=20) +
theme(axis.text.x=element_blank(),
axis.text.y=element_blank(),
legend.direction = "horizontal",
legend.position = "bottom",
legend.box = "horizontal") +
scale_colour_brewer(palette = "Accent")
### RANDOM FOREST
ciat.tsne<- base::merge(bio_tsne3$Y,numeric,by="row.names" ,all.x=TRUE )
row.names(ciat.tsne)<- ciat.tsne$Row.names
ciat.tsne<-ciat.tsne[,-1]
set.seed(250)
trcont<- trainControl(method="LGOCV",p=0.8,number=1,savePredictions = T)
grid <- expand.grid(mtry = round((ncol(ciat.tsne)-4)/3))
##con TSNE
rforest_1<-train(Genepool.ACID~., data=ciat.tsne ,method="rf",tuneGrid=grid, importance=T, ntree=2000, metric="Accuracy", trControl= trcont)
accu.rforest_1<- mean(apply(gd<-data.frame(rforest_1$resampledCM[,1:4]),1,function(x){
(x[1] + x[4]) /sum(x)
}) )
#### sin TSNE
rforest_2<-train(Genepool.ACID~., data=ciat.tsne[,-(1:2)] ,method="rf",tuneGrid=grid, importance=T, ntree=2000, metric="Accuracy", trControl= trcont)
accu.rforest_2<- mean(apply(gd<-data.frame(rforest_2$resampledCM[,1:4]),1,function(x){
(x[1] + x[4]) /sum(x)
}) )
##### PRINCIPAL COMPONENTS ANALYSIS
pca<-PCA(ciat.bio[,-(1:3)],ncp=4)
View(pca$var$cos2)
df<-data.frame(pca$ind$coord[,1:2],ciat.bio$Race.interpreted.ACID)
plot(pca)
ggplot(df,aes_string(x="Dim.1",y="Dim.2",color="ciat.bio.Race.interpreted.ACID", shape="ciat.bio.Race.interpreted.ACID")) + geom_point(size=1.85) + guides(colour=guide_legend(override.aes=list(size=6))) +
xlab("") + ylab("") +
ggtitle("") +
theme_light(base_size=20) +
theme(axis.text.x=element_blank(),
axis.text.y=element_blank(),
legend.direction = "horizontal",
legend.position = "bottom",
legend.box = "horizontal") +
scale_colour_brewer(palette = "Accent")
nrow(ciat.bio)
nrow(pca$ind$coord)
df<-data.frame(pca$ind$coord, ciat.bio$Genepool.ACID )
plot(density(df[which(df$ciat.bio.Genepool.ACID=="Mesoamerican"),2]),ylim=c(0,0.2))
lines(density(df[which(df$ciat.bio.Genepool.ACID=="Andean"),2]),col="red")
vect<-which( names(ciat.bio)%in%names(numeric) )
ciat.bio[,sapply(ciat.bio,is.character)]
####### ACP 2 #####
OSys <- Sys.info()[1]
OSysPath <- switch(OSys, "Linux" = "/mnt", "Windows" = "//dapadfs")
root <- switch(OSys, "Linux" = "/mnt/workspace_cluster_9", "Windows" = "//dapadfs/Workspace_cluster_9")
# genep_1<- readRDS("//dapadfs/Workspace_cluster_9/gap_analysis_landraces/Input_data/_datosAndres/acp/genepool_predictions.RDS")
# race_1<-readRDS("//dapadfs/Workspace_cluster_9/gap_analysis_landraces/Input_data/_datosAndres/acp/predictions_race.RDS")
# beancordH<-readRDS("//dapadfs/Workspace_cluster_9/gap_analysis_landraces/Input_data/_datosAndres/acp/BEAN-GRP-COORDINATES-HUMAN-FACTORS.RDS")
# beancordC<-readRDS("//dapadfs/Workspace_cluster_9/gap_analysis_landraces/Input_data/_datosAndres/acp/BEAN-GRP-COORDINATES-CLIMATE.RDS")
dmodel<- readRDS("//dapadfs/Workspace_cluster_9/gap_analysis_landraces/Input_data/_datosAndres/acp/data4modeling.RDS")
files<-dir(file.path("//dapadfs/Workspace_cluster_9/gap_analysis_landraces/Input_data/raster_sdm/2_5m"))
files<-files[grep("dist",files)]
for(i in 1:length(files )){
eval( parse( text = paste0('raster_',files[i],'<-raster(',"'", file.path(paste0( root,"//gap_analysis_landraces/Input_data/raster_sdm/2_5m","/",files[i],"'" ) ),')' ) ) )
eval( parse( text= paste0("dmodel <- data.frame( dmodel,", substr(files[i],1, nchar(files[i])-4 ) ,"= extract(", 'raster_',files[i], ", cbind(dmodel$Longitude,dmodel$Latitude) ,df=T)[,2] )" ) ) )
}
dmodel<-dmodel[, -which(names(dmodel)== "Distance.to.GP1" )]
dmodel<- dmodel[complete.cases(dmodel$Genepool.predicted),]
dmodel<- dmodel[complete.cases(dmodel$Race.predicted),]
dmodel<- dmodel %>% dplyr::select( ., Altitude:Longitude ,annualPET:dist_toGP1 )
dmodel<- na.omit(dmodel)
# all(is.na(dmodel.acp))==FALSE
dmodel.acp <- dmodel %>% dplyr::select( .,-Analysis ,-Genepool.predicted, -Race.predicted )
M<-cor(dmodel.acp)
corrplot(M)
plot(dmodel.acp$aridityIndexThornthwaite,dmodel.acp$Physical.area)
highlyCorDescr <- findCorrelation(M, cutoff = .70)
names(dmodel.acp)[highlyCorDescr]
#dmodel.acp<- dmodel.acp [ , highlyCorDescr]
acp<- PCA( dmodel.acp , quanti.sup = which( names(dmodel.acp) %in% names(dmodel.acp)[-highlyCorDescr] ) )
plot(acp)
names(dmodel)
df<-data.frame(acp$ind$coord[,1:2],gen.pred=dmodel$Genepool.predicted )
plot(acp)
ggplot(df,aes_string(x="Dim.1",y="Dim.2",color="gen.pred", shape="gen.pred")) + geom_point(size=1.85) + guides(colour=guide_legend(override.aes=list(size=6))) +
xlab("") + ylab("") +
ggtitle("") +
theme_light(base_size=20) +
theme(axis.text.x=element_blank(),
axis.text.y=element_blank(),
legend.direction = "horizontal",
legend.position = "bottom",
legend.box = "horizontal") +
scale_colour_brewer(palette = "Accent")
####### CWR ACP #####
cwr<- read.csv2("//dapadfs/Workspace_cluster_9/gap_analysis_landraces/Input_data/_occurrence_data/_gp1_data/GP1_points.csv",sep="|")
str(cwr)
cwr<- data.frame(Latitude=as.numeric(as.character(cwr$latitude)), Longitude=as.numeric(as.character(cwr$longitude) ) )
cwr<- cwr[ complete.cases(cwr) ,]
cwr<- cwr[-which(cwr$Latitude==0), ]
files<-dir(file.path("//dapadfs/Workspace_cluster_9/gap_analysis_landraces/Input_data/raster_sdm/2_5m"))
#files<-files[grep("dist",files)]
for(i in 1:length(files )){
tryCatch( {
eval( parse( text = paste0('raster_',files[i],'<-raster(',"'", file.path(paste0( root,"//gap_analysis_landraces/Input_data/raster_sdm/2_5m","/",files[i],"'" ) ),')' ) ) )
eval( parse( text= paste0("cwr <- data.frame( cwr,", substr(files[i],1, nchar(files[i])-4 ) ,"= extract(", 'raster_',files[i], ", cbind(cwr$Longitude,cwr$Latitude) ,df=T)[,2] )" ) ) )
eval(parse(text= paste0("rm(", "raster_", files[i], ")" ) ) )
}, error=function(e){ cat("ERROR :",conditionMessage(e), "\n") } )
cat(paste0("procesando:"," ",i , "\n") )
}
cwr<-na.omit(cwr)
#dmodel.acp <- dmodel %>% dplyr::select( .,-Analysis ,-Genepool.predicted, -Race.predicted )
M<-cor(cwr)
corrplot(M)
highlyCorDescr <- findCorrelation(M, cutoff = .70)
names(cwr)[highlyCorDescr]
acp<- PCA( cwr , quanti.sup = which( names(cwr) %in% names(cwr)[-highlyCorDescr] ) )
plot(acp)
names(dmodel)
df<-data.frame(acp$ind$coord[,1:2],gen.pred=dmodel$Genepool.predicted )
plot(acp)
ggplot(df,aes_string(x="Dim.1",y="Dim.2",color="gen.pred", shape="gen.pred")) + geom_point(size=1.85) + guides(colour=guide_legend(override.aes=list(size=6))) +
xlab("") + ylab("") +
ggtitle("") +
theme_light(base_size=20) +
theme(axis.text.x=element_blank(),
axis.text.y=element_blank(),
legend.direction = "horizontal",
legend.position = "bottom",
legend.box = "horizontal") +
scale_colour_brewer(palette = "Accent")
###### acp landraces y CWR ###
names(dmodel)
names(cwr)
cwr_landra <- dmodel %>% dplyr::select(.,names(cwr),Genepool.predicted) %>% bind_rows(.,cwr)
cwr_landra<- data.frame(cwr_landra,stringsAsFactors=FALSE)
str(cwr_landra)
cwr_landra$Genepool.predicted<-as.character(cwr_landra$Genepool.predicted)
cwr_landra$Genepool.predicted[is.na(cwr_landra$Genepool.predicted)] <- "CWR"
cwr_landra$Genepool.predicted<-as.factor(cwr_landra$Genepool.predicted)
cwr_landra<-na.omit(cwr_landra)
acp<-PCA(cwr_landra[,-ncol(cwr_landra)] )
df<-data.frame(acp$ind$coord[,1:2],gen.pred=cwr_landra$Genepool.predicted )
plot(acp)
ggplot(df,aes_string(x="Dim.1",y="Dim.2",color="gen.pred", shape="gen.pred")) + geom_point(size=1.85) + guides(colour=guide_legend(override.aes=list(size=6))) +
xlab("") + ylab("") +
ggtitle("") +
theme_light(base_size=20) +
theme(axis.text.x=element_blank(),
axis.text.y=element_blank(),
legend.direction = "horizontal",
legend.position = "bottom",
legend.box = "horizontal") +
scale_colour_brewer(palette = "Accent")
http://1.2.0.1/reg.php?ah_goal=politicas.html&ah_login=true&url=E2B8F3578D88E9BF2388F2468A984E8A8C28109A19
|
3d7e124c1591643aaa93ca4dc6bbb60f4b630acc | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ddpcr/examples/reset.Rd.R | ed3395a50d00c0ff35d4ecc31796579e51d494fe | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 270 | r | reset.Rd.R | library(ddpcr)
### Name: reset
### Title: Reset a plate
### Aliases: reset
### ** Examples
## Not run:
##D plate <- new_plate(sample_data_dir(), type = plate_types$custom_thresholds)
##D plate <- reset(plate, type=plate_types$fam_positive_pnpp)
## End(Not run)
|
b5c2b2eaca3966726c81c26b541c0dead2fcd76f | 1a62112ebb392399e96157a1f27d92a6179b208d | /man/eq_create_label.Rd | 086498176963f15f6d4ec68ccea37a7429583326 | [] | no_license | jianweilu/mycapstone | 1e473e55b2483ac103def004fdafff3ba7156889 | 554153b8cf8dc0fd4335eb35688d683aba63cd11 | refs/heads/master | 2023-03-26T11:11:31.313564 | 2021-03-23T03:42:19 | 2021-03-23T03:42:19 | 350,567,769 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 898 | rd | eq_create_label.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1capstoneproject2.R
\name{eq_create_label}
\alias{eq_create_label}
\title{Creates pop up text for markers.
generates HTML formatted text for popups for map markers.}
\usage{
eq_create_label(eq_clean = NULL)
}
\arguments{
\item{eq_clean}{The clean earthquake data in a tbl_df object.}
}
\value{
returns a character vector containing popup text to be used in a leaflet visualization.
}
\description{
Creates pop up text for markers.
generates HTML formatted text for popups for map markers.
}
\examples{
\dontrun{
filename<-system.file("data","earthquakes_data.txt.zip",package="capstone")
eq_location_clean(eq_clean_data(eq_data_read(filename))) \%>\%
dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(datetime) >= 1980) \%>\%
dplyr::mutate(popup_text = eq_create_label(.)) \%>\%
eq_map(annot_col = "popup_text")
}
}
|
04ef30e3f48004c71c99a5ebad928e8bb75e565a | 79a6beed8e70869b1053cdf85fc13c50c58ffe7e | /MLSplayers-dirty/cluster-MLS-players-diana2.R | 6c65cf357fa9897145e0d76c74c1e3a2cf84d365 | [] | no_license | mimburgi/SoccerStuff | 07cfe200f056d9257d28a2735d68f8ccd6573808 | 5c50a239f4b7f58be7cd0837a378d8e852d2cbee | refs/heads/master | 2022-11-27T21:29:17.312796 | 2020-08-05T01:57:04 | 2020-08-05T01:57:04 | 281,275,496 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,235 | r | cluster-MLS-players-diana2.R | library(dplyr)
library(ggplot2)
source('soccer_util_fxns.R')
library(caret)
## data read in and preproc ####
chain<-read.csv('./2019/ASA_PlayerxGChain_per96table.csv', stringsAsFactors = F)
A3pass<-read.csv('./2019/ASApassingtable-attackthird.csv', stringsAsFactors = F)
M3pass<-read.csv('./2019/ASApassingtable-middlethird.csv', stringsAsFactors = F)
D3pass<-read.csv('./2019/ASApassingtable-defthird.csv', stringsAsFactors = F)
shoot<-read.csv('./2019/ASAshootertable.csv', stringsAsFactors = F)
totalpass<-read.csv('./2019/ASApassingtable-total.csv', stringsAsFactors = F)
chain<-subset(chain, Minutes > 1200)
A3pass<-subset(A3pass, Min > 1200)
M3pass<-subset(M3pass, Min > 1200)
D3pass<-subset(D3pass, Min > 1200)
shoot<-subset(shoot, Min > 1200)
totalpass<-subset(totalpass, Min > 1200)
teams<-chain$Team
#trim to only per 90s
A3pass<-select(A3pass, matches(".96|Player|Pos"))
M3pass<-select(M3pass, matches(".96|Player|Pos"))
D3pass<-select(D3pass, matches(".96|Player|Pos"))
shoot<-select(shoot, matches(".96|Player|Pos|Dist"))
chain<-select(chain, matches(".96|Player|Pos|Team|xB."))
totalpass<-select(totalpass, matches(".96|Player|Pos"))
allplayers<-c(chain$Player, A3pass$Player, M3pass$Player, D3pass$Player, shoot$Player, totalpass$Player)
allplayers<-unique(allplayers)
dat<-data.frame(Player=allplayers,
Pos=as.character(rep(NA, length(allplayers))),
stringsAsFactors = F)
for (player in allplayers){
#add Position and Minutes
if(player %in% shoot$Player){
#dat$InShooter[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-shoot$Pos[shoot$Player==player]
dat$shots[dat$Player==player]<-shoot$Shots.96[shoot$Player==player]
dat$KP[dat$Player==player]<-shoot$KeyP.96[shoot$Player==player]
dat$xG[dat$Player==player]<-shoot$xG.96[shoot$Player==player]
dat$xA[dat$Player==player]<-shoot$xA.96[shoot$Player==player]
dat$xPlace[dat$Player==player]<-shoot$xPlace.96[shoot$Player==player]
dat$ShotDist[dat$Player==player]<-shoot$Dist[shoot$Player==player]
dat$KPDist[dat$Player==player]<-shoot$Dist.key[shoot$Player==player]
}
if(player %in% A3pass$Player){
#dat$InA3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-A3pass$Pos[A3pass$Player==player]
dat$A3Passes[dat$Player==player]<-A3pass$Passes.96[A3pass$Player==player]
}
if(player %in% M3pass$Player){
#dat$InM3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-M3pass$Pos[M3pass$Player==player]
dat$M3Passes[dat$Player==player]<-M3pass$Passes.96[M3pass$Player==player]
}
if(player %in% D3pass$Player){
#dat$InD3[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-D3pass$Pos[D3pass$Player==player]
dat$D3Passes[dat$Player==player]<-D3pass$Passes.96[D3pass$Player==player]
}
if(player %in% chain$Player){
#dat$InChain[dat$Player==player]<-1
dat$Pos[dat$Player==player]<-chain$Pos[chain$Player==player]
dat$percChain[dat$Player==player]<-chain$TeamChain.[chain$Player==player]
dat$xGChain[dat$Player==player]<-chain$xGChain.96[chain$Player==player]
dat$xB[dat$Player==player]<-chain$xB.96[chain$Player==player]
dat$Team[dat$Player==player]<-chain$Team[chain$Player==player]
dat$ShotChainPerc[dat$Player==player]<-chain$PlayerShot.[chain$Player==player]
dat$KPChainPerc[dat$Player==player]<-chain$PlayerKP.[chain$Player==player]
dat$xBperc[dat$Player==player]<-chain$xB.[chain$Player==player]
}
if(player %in% totalpass$Player){
dat$Vertical[dat$Player==player]<-totalpass$Vertical.96[totalpass$Player==player]
dat$PassPct[dat$Player==player]<-totalpass$PassPct.96[totalpass$Player==player]
dat$PassDistance[dat$Player==player]<-totalpass$Distance.96[totalpass$Player==player]
dat$TouchPerc[dat$Player==player]<-totalpass$Touch..96[totalpass$Player==player]
dat$xPassPerc[dat$Player==player]<-totalpass$xPassPct.96[totalpass$Player==player]
dat$Passes[dat$Player==player]<-totalpass$Passes.96[totalpass$Player==player]
}
}
dat[is.na(dat)]<-0 #assuming missing vals mean zeros
dat<-subset(dat, Pos != "GK")
dat$percA3pass<-dat$A3Passes/dat$Passes
dat$percM3pass<-dat$M3Passes/dat$Passes
dat$percD3pass<-dat$D3Passes/dat$Passes
dat$xAper[dat$KP > .2]<-dat$xA[dat$KP > .2]/dat$KP[dat$KP > .2]
dat$xGper[dat$shots > .2]<-dat$xG[dat$shots > .2]/dat$shots[dat$shots > .2]
## tsne ####
initials<-c('shots', 'KP', 'xGChain', 'xBperc', 'ShotChainPerc')
trimmed<-select(dat, one_of(initials))
#
# scalefn<-select(dat, one_of(initials)) %>% preProcess(method=c('BoxCox', 'scale', 'center'))
# scaled<-predict(scalefn, select(dat, one_of(initials)))
#
scaled<-select(dat, one_of(initials)) %>% scale() %>% as.data.frame()
scaled[scaled < -3]<- -3
scaled[is.na(scaled)]<-0 #assuming missing vals mean zeros
res.hc<-diana(scaled)
plot(res.hc, which.plots = 2)
#
#
# fviz_screeplot(FactoMineR::PCA(scaled))
#
# set.seed(100)
# kmm<-kmeans(scaled, 3)
# fviz_cluster(kmm, scaled)
clusters<-cutree(res.hc, 4)
plotclusters(scaled, clusters)
clusters<-as.factor(clusters)
levels(clusters)<-c('shuttler', 'creator', 'buildup', 'finisher')
dat$maintype<-clusters
## positional breakdown ####
dat$Pos<-as.factor(dat$Pos)
summary(dat$Pos[dat$maintype=='shuttler'])
|
10c5d8d1d8c6d0e98727c840dd3c48ed03779608 | 68ff38466550b1c1938d949e444a0603a18bf853 | /R/to_lang2.R | 7f824a11c7ada578376279fb5e5da77a0e389a6a | [
"MIT"
] | permissive | zumbov2/deeplr | 4ca3fec34999cce718a2e7f53e956bce8e4b3adf | b9206467e7b0a116b29476107bddcb84215c4de2 | refs/heads/master | 2022-12-21T06:45:38.311513 | 2022-12-19T20:11:37 | 2022-12-19T20:11:37 | 134,332,386 | 24 | 5 | null | 2022-12-19T20:11:38 | 2018-05-21T22:39:57 | R | UTF-8 | R | false | false | 24,825 | r | to_lang2.R | #' Translate texts into English using DeepL API Free
#'
#' \code{toEnglish2} translates a text from an available language into English
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null},
#' the API guesses the language of the source. If input is of length 1, the same source
#' language is applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toEnglish2("Hallo Welt!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("Me llamo Fred.", "Je suis médecin.", "Ich komme aus der Schweiz.")
#' toEnglish2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toEnglish2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "EN", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
#' Translate texts into German using DeepL API Free
#'
#' \code{toGerman2} translates a text from an available language into German
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null},
#' the API guesses the language of the source. If input is of length 1, the same source
#' language is applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toGerman2("Hello world!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("Me llamo Fred.", "Je suis médecin.", "I'm from Brisbane.")
#' toGerman2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toGerman2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "DE", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
#' Translate texts into French using DeepL API Free
#'
#' \code{toFrench2} translates a text from an available language into French
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null},
#' the API guesses the language of the source. If input is of length 1, the same source
#' language is applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toFrench2("Hallo Welt!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("Me llamo Fred.", "I'm a doctor.", "Ich komme aus der Schweiz.")
#' toFrench2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toFrench2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "FR", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
#' Translate texts into Italian using DeepL API Free
#'
#' \code{toItalian2} translates a text from an available language into Italian
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null}, the API
#' guesses the language of the source. If input is of length 1, the same source language is
#' applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toItalian2("Hallo Welt!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("Me llamo Fred.", "Je suis médecin.", "Ich komme aus der Schweiz.")
#' toItalian2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toItalian2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "IT", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
#' Translate texts into Spanish using DeepL API Free
#'
#' \code{toSpanish2} translates a text from an available language into Spanish
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null},
#' the API guesses the language of the source. If input is of length 1, the same source
#' language is applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toSpanish2("Hallo Welt!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("My name is Fred.", "Je suis médecin.", "Ich komme aus der Schweiz.")
#' toSpanish2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toSpanish2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "ES", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
#' Translate texts into Japanese using DeepL API Free
#'
#' \code{toJapanese2} translates a text from an available language into Japanese
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null}, the API
#' guesses the language of the source. If input is of length 1, the same source language is
#' applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toJapanese2("Hallo Welt!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("My name is Fred.", "Je suis médecin.", "Ich komme aus der Schweiz.")
#' toJapanese2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toJapanese2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "JA", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
#' Translate texts into Russian using DeepL API Free
#'
#' \code{toRussian2} translates a text from an available language into Russian
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null},
#' the API guesses the language of the source. If input is of length 1, the same source
#' language is applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toRussian2("Hallo Welt!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("My name is Fred.", "Je suis médecin.", "Ich komme aus der Schweiz.")
#' toRussian2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toRussian2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "RU", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
#' Translate texts into Chinese using DeepL API Free
#'
#' \code{toChinese2} translates a text from an available language into Chinese
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null},
#' the API guesses the language of the source. If input is of length 1, the same source
#' language is applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toChinese2("Hallo Welt!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("My name is Fred.", "Je suis médecin.", "Ich komme aus der Schweiz.")
#' toChinese2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toChinese2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "ZH", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
#' Translate texts into Portuguese using DeepL API Free
#'
#' \code{toPortuguese2} translates a text from an available language into Portuguese
#' using DeepL API Free. Use \code{available_languages2} to list all supported languages.
#' An authentication key is required to use this service. With the DeepL API Free package,
#' developers can translate up to 500,000 characters per month for free.
#'
#' @param text character vector to be translated. Only UTF8-encoded plain text is supported.
#' An element can contain several sentences, but should not exceed 30kbytes.
#' @param source_lang language of the text to be translated. If parameter \code{is.null},
#' the API guesses the language of the source. If input is of length 1, the same source
#' language is applied to all elements.
#' @param split_sentences if \code{TRUE}, the translation engine splits the input into sentences.
#' If only one sentence is translated, it is recommended to set to \code{FALSE} to prevent
#' the engine from unintentionally splitting the sentence.
#' @param preserve_formatting if \code{TRUE}, the translation engine tries to preserve some aspects
#' (e.g. punctuation at the beginning and end of the sentence, upper/lower case at the beginning
#' of the sentence) of the formatting.
#' @param get_detect if \code{TRUE}, the language detected for the source text is included in
#' the response.
#' @param auth_key Authentication key.
#'
#' @details To get an authentication key, you need to register for a DeepL API Free
#' account (\url{https://www.deepl.com/pro#developer}).
#'
#' @return If \code{get_detect} is set to \code{FALSE} a \code{character vector} containing the
#' translation is returned. Otherwise, a (\code{tibble}) is returned with the following columns:
#' \itemize{
#' \item \code{translation} the translated text.
#' \item \code{source_lang} detected or specified language of the input text.
#' }
#'
#' @references \href{https://www.deepl.com/pro#developer}{DeepL API documentations}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # Translate a single text
#' toPortuguese2("Hallo Welt!", auth_key = "my_key")
#'
#' # Translate multiple texts and return the detected language
#' texts <- c("My name is Fred.", "Je suis médecin.", "Ich komme aus der Schweiz.")
#' toPortuguese2(texts, get_detect = T, auth_key = "x")
#'
#' }
#'
#'
toPortuguese2 <- function(text, source_lang = NULL, split_sentences = TRUE, preserve_formatting = FALSE,
get_detect = FALSE, auth_key = "your_key") {
translate2(text = text, target_lang = "PT", source_lang = source_lang, split_sentences = split_sentences,
preserve_formatting = preserve_formatting, get_detect = get_detect, auth_key = auth_key)
}
|
4023dcbb9d19583b7e1083b5f023ba751692f91c | a89dd7bfee6da1f2192d33e9bc3903feee601fa8 | /Project2_004773895_404753334_704775693 (1)/code/q7.R | 1a02b9b33e48c1a4de80151faec7ff23ef2fdae8 | [] | no_license | jameszrx/EE232E-Network-and-Flows | 1e2c7aa3e6d47857a832b89f036f0b8e9ab60bf1 | 995098166614a5e565aeb63d3b49b4125d044ecf | refs/heads/master | 2020-09-19T18:08:58.969016 | 2017-06-16T02:29:15 | 2017-06-16T02:29:15 | 94,496,055 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,745 | r | q7.R | library(igraph)
library(stringr)
library(data.table)
library(plyr)
print("start reading data")
edges_list <- fread(input="C:/Users/James/Desktop/EE232/Project_2/edge_list_file_15actor_singletab.txt", sep = "\t", header = FALSE)
print("start constructing network")
movie_network <- graph.data.frame(edges_list, directed = FALSE)
print("assign weights to network")
E(movie_network)$weight <- cbind(unlist(edges_list[,V3]))
print("deleting used data object edges_list")
rm(edges_list)
print("finding community structure")
commu_mv <- fastgreedy.community(movie_network)
print(length(commu_mv))
comm_size <- sizes(commu_mv)
print(commu_size)
print(modularity(commu_mv))
for (i in 1:vcount(g)){
movie_name = str_trim(V(g)$name[i])
if (movie_name %in% movie_rating$V1){
rating <- movie_rating$V2[which(movie_rating$V1 == movie_name)]
}else{
rating <- 0
}
rating_list <- rbind(rating_list,rating)
}
mov_list <- c("Batman v Superman: Dawn of Justice (2016)" ,"Mission: Impossible - Rogue Nation (2015)" ,"Minions (2015)")
weight_list = {}
neighbor_list = {}
mov_list <- str_trim(mov_list)
movie_in_graph <- str_trim(V(g)$name)
prediction_of_rating_list <- c()
for(movie in mov_list){
tmp_ind <- which(movie_in_graph == movie)
neighbor_list <- str_trim(neighbors(movie_network,tmp_ind)$name)
index <- c()
for (i in 1:length(neighbor_list)){
if(neighbor_list[i] %in% movie_rating$V1 ){
index[i] <- which(movie_in_graph == neighbor_list[i])
}
}
rating_movie_neighbor <- rating_list[index]
rating_movie_neighbor <- rating_movie_neighbor[is.finite(rating_movie_neighbor)]
rating_pred_i <- mean(rating_movie_neighbor)
prediction_of_rating_list <- append(prediction_of_rating_list,rating_pred_i)
}
|
f168cacccb156eab008f9f095a1258094d158a92 | 19e775dd94828a0323ecae33983f9066bc2f20bb | /barchart_backtest.R | 3945a3cf72190eca0fee73072bdc993be93d2654 | [] | no_license | connerpharmd/Test_fin_stuff | 69f5aec903449d44ba8b6b50297e064cdcaf34d3 | 19f5a62568620711fb84cdc09bb86f89ee44b8b8 | refs/heads/master | 2020-06-10T06:27:32.615068 | 2019-06-25T04:37:18 | 2019-06-25T04:37:18 | 193,607,960 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,400 | r | barchart_backtest.R | ##
##
##
require(quantmod)
require(jsonlite)
require(data.table)
fmt.doll <- function(x){
paste("$", format(round(x, 2) , big.mark = ",", scientific = F ), sep="")
}
source.file.dir <- "home"
##
## NOTE:
## sometimes you have to change name.index to "stocks-options..."
## other tiumes "options..." works fine
##
source.path.dat <- "C:\\Users\\hornr\\OneDrive\\Documents\\R_stuff\\2018.02.stock_analysis\\output.barchart"
all.files <- list.files(
path= source.path.dat
)
all.files <- all.files[order(all.files)]
for(i in 1:length(all.files)){}
path.1 <- paste0( source.path.dat, "\\", all.files[i])
rundate <- substr(all.files[i], start = 1, 10) # create run date from filename
rundate <- as.Date( rundate, "%Y.%m.%d")
files.1 <- list.files(path.1) # need to develop way to handle null *.csv files
#if file.exists ## BUILD THIS STEP
file.index <- grep(pattern = "^aaa.barch.summary.csv", files.1)
path.2 <- paste0(path.1, "\\", files.1[file.index[1]])
rawdat <- read.csv(file = path.2, stringsAsFactors = F)
s2 <- rawdat$Time[grep("/", s1)]
tickerdate <- max(as.Date(s2, "%m/%d/%Y"))
expdate <- as.Date(rawdat$Exp.Date, "%m/%d/%Y")
# convert dollar flows for analysis
rawdat$dollar.flows.nice <- as.numeric(gsub(rawdat$dollar.flows.nice, pattern = ",", replacement = ""))
rawdat$OTM <- rawdat$Strike - rawdat$Price
rawdat$pct.OTM <- rawdat$OTM/rawdat$Price*100
# produces sub data.frame with tickers that exhibit following criteria
# call is OTM, pct profit from premium b/w 60-80% and at least 1MM flowing in
consider.ticker <- (rawdat[ rawdat$OTM > 0 & rawdat$pct.prof.prem < .8 & rawdat$pct.prof.prem >.6 & rawdat$dollar.flows.nice > 1000000, ])
# nested loop to check for only ONE call being bought
ticker.drilldown <- unique(consider.ticker$Symbol)
final.index <- 0
for(i in 1:length(ticker.drilldown)){
ticker <- ticker.drilldown[i]
ticker.file <- files.1[grep(pattern = paste0("^", ticker, ".*", ".csv"), files.1)]
xpath.1 <- paste0(path.1, "\\", ticker.file)
if(nrow(read.csv(xpath.1))<2){
final.index <- c(final.index, ticker)
}
}
# produces an index (final.index) that selects only those tickers where only one call is bought
# create index to select only final.index tickers from rawdat file
# create rules to move on to next all.file folder if ticker is NULL
# if ticker is NON NULL, need a way to write a smaller data.frame with iportant data
|
2a1365fcdca4a622901b1c138a71c1a965128a9f | a8ab4bfe4a9473f73ab7864ac98b625989dbb3b3 | /man/IQR.Rd | 45b4fbc96e56e68ea2ea29e89288c1553ab9d0cd | [] | no_license | WeigeHuangEcon/ccfa | 05955111940c0d59a6224c65efa80d261edb04fe | bf88d858e605e373b883a146cf05e1ad3dc292da | refs/heads/master | 2021-06-04T19:03:31.679532 | 2020-12-14T18:57:35 | 2020-12-14T18:57:35 | 96,262,831 | 9 | 0 | null | null | null | null | UTF-8 | R | false | true | 345 | rd | IQR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper-functions.R
\name{IQR}
\alias{IQR}
\title{IQR}
\usage{
IQR(edf, t1, t2)
}
\arguments{
\item{edf}{an ecdf}
\item{t1}{upper quantile}
\item{t2}{lower quantile}
}
\value{
scalar interquantile range
}
\description{
compute interquantile range from ecdf object
}
|
bdd69d96254a18f001d8416f7f1cbeeb5ac885e3 | 806ca2e424e11a5b7eab6c947fc47b5aee8a6c84 | /R/tsdb.R | 5960de398399abfd6190e220ade1650926bd2a1a | [] | no_license | dfyj/wm | cefcb9f3c11bd37bef69e4a9e2a24b93f180bb60 | ecf27eb4349400717a8fdee16754c253c8543ec7 | refs/heads/master | 2020-03-24T22:34:01.845300 | 2019-01-26T04:02:12 | 2019-01-26T04:02:12 | 143,091,352 | 0 | 2 | null | 2018-08-01T02:52:48 | 2018-08-01T02:04:38 | R | UTF-8 | R | false | false | 1,700 | r | tsdb.R | .tsdb_root <- function(){
'C:/db/tsdb'
}
#' Parse Tsdb symbol
#'
#' @return c(db, dir, name) for valid symbol, NA otherwise
#'
#' @examples
#' symbol <- 'eq_000300.SH@@close'
#' .tsdb_parse_symbol(symbol)
.tsdb_parse_symbol <- function(symbol) {
m <- stringr::str_match(symbol, '([a-zA-Z0-9]+)_(.+)@(.+)')
if (is.na(m[[1]]))
NA
else
m[-1] %>% setNames(c('db', 'dir', 'name'))
}
#' Returns file path of Tsdb symbol
#' @export
#' @examples
#' symbol <- 'eq_000300.SH@@close'
#' .tsdb_path(symbol)
.tsdb_path <- .tsdb_path <- function(symbol) {
c(.tsdb_root(), .tsdb_parse_symbol(symbol)) %>%
purrr::invoke(file.path, .) %>%
sprintf('%s%s', ., '.rds')
}
#' Update Tsdb data
#' @export
#' @examples
#' tsdb_update(data, symbol)
tsdb_update <- function(data, symbol) {
path <- .tsdb_path(symbol)
rds_write(data, path)
}
#' Load Tsdb data
#' @export
#' @examples
#' symbol <- 'eq_000300.SH@@close'
#' tsdb(symbol)
#' tsdb(symbol, '2018-01-01', '2018-01-10')
#' tsdb(symbol, as.Date('2018-01-01'), '2018-01-10')
#' tsdb(symbol, '2018-01')
tsdb <- function(symbol, ...) {
date_range <- list(...) %>% lapply(as.character) %>% str_c(collapse = '/')
data <- rds_read(.tsdb_path(symbol))
if (length(date_range))
data[date_range]
else
data
}
#' Tsdb symbol for equities
#' @export
#' @examples
#' code <- '000300.SH'
#' field <- 'close'
#' tsdb_eq_symbol(code, field)
tsdb_eq_symbol <- function(code, field) {
sprintf('eq_%s@%s', code, field)
}
#' Tsdb symbol for funds
#' @export
#' @examples
#' code <- 'XT1527428.XT'
#' field <- 'nav_adj'
#' tsdb_fund_symbol(code, field)
tsdb_fund_symbol <- function(code, field) {
sprintf('fund_%s@%s', code, field)
}
|
4b4a94d1e04556e54d29319ac55dd2c1f6c1da0e | af1dcafc1ba3cdc3bfeff520177dfa84f337d6d0 | /Charpter_16.R | 66ff98409f5282197be96717ea4a98e3aaaa1186 | [] | no_license | JiahaoWongg/-R-in-Action- | aa4b89e45ab8639ff8dc0c9d96ba4f699a928503 | 0037a1b257e7cabf1d7c29c6817f8557274bd0fc | refs/heads/master | 2020-12-11T12:30:48.035045 | 2020-01-14T13:52:45 | 2020-01-14T13:52:45 | 233,850,436 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,293 | r | Charpter_16.R | install.packages("cluster")
install.packages("NbClust")
install.packages("flexclust")
install.packages("fMultivar")
install.packages("rattle")
library("ggplot2")
library("cluster")
library("NbClust")
library("flexclust")
library("fMultivar")
library("rattle")
library("ggplot2")
data(nutrient, package="flexclust")
head(nutrient, 4)
d <- dist(nutrient)
as.matrix(d)[1:4,1:4]
data(nutrient, package="flexclust")
row.names(nutrient) <- tolower(row.names(nutrient))
nutrient.scaled <- scale(nutrient)
d <- dist(nutrient.scaled)
fit.average <- hclust(d, method="average")
pdf(file = "test.pdf")
plot(fit.average, hang=-1, cex=.8, main="Average Linkage Clustering")
dev.off()
nc <- NbClust(nutrient.scaled, distance="euclidean",
min.nc=2, max.nc=15, method="average")
table(nc$Best.n[1,])
pdf(file = "test.pdf")
barplot(table(nc$Best.n[1,]),
xlab="Numer of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
dev.off()
clusters <- cutree(fit.average, k=5)
table(clusters)
aggregate(nutrient, by=list(cluster=clusters), median)
aggregate(as.data.frame(nutrient.scaled), by=list(cluster=clusters),median)
pdf(file = "test.pdf")
plot(fit.average, hang=-1, cex=.8,
main="Average Linkage Clustering\n5 Cluster Solution")
rect.hclust(fit.average, k=5)
dev.off()
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
}
data(wine, package="rattle")
head(wine)
df <- scale(wine[-1])
pdf(file = "test1.pdf")
wssplot(df)
dev.off()
set.seed(1234)
devAskNewPage(ask=TRUE)
nc <- NbClust(df, min.nc=2, max.nc=15, method="kmeans")
table(nc$Best.n[1,])
pdf(file = "test2.pdf")
barplot(table(nc$Best.n[1,]),
xlab="Number of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
dev.off()
set.seed(1234)
fit.km <- kmeans(df, 3, nstart=25)
fit.km$size
fit.km$centers
aggregate(wine[-1], by=list(cluster=fit.km$cluster), mean)
pdf(file = "test.pdf")
set.seed(1234)
fit.pam <- pam(wine[-1], k=3, stand=TRUE)
fit.pam$medoids
clusplot(fit.pam, main="Bivariate Cluster Plot")
dev.off()
|
95c6178c4918461fadfd48e5c8337329bbcc1fdd | c3dcc9c43c5868ca272a93839a08f80c316820eb | /man/formatOutputData.Rd | eccb440a7bf797f49784200df6d48772278a8e24 | [
"Apache-2.0"
] | permissive | gregorbj/FSDM | 46df63010dc7a9514a8e7d0ff37edb3404e716e4 | 63f8c517e8b931fcb0d45f3e4b01ae6f16dbefdc | refs/heads/master | 2021-06-03T14:19:26.172954 | 2021-05-21T17:30:26 | 2021-05-21T17:30:26 | 70,544,520 | 5 | 3 | Apache-2.0 | 2021-02-11T21:55:29 | 2016-10-11T01:39:44 | R | UTF-8 | R | false | true | 1,057 | rd | formatOutputData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fsdm.R
\name{formatOutputData}
\alias{formatOutputData}
\title{Create data frame of selected scenarios and concepts to plot and save}
\usage{
formatOutputData(ModelDir, ModelName, Sc, Vn)
}
\arguments{
\item{ModelDir}{a string identifying the path to the models folder in which}
\item{ModelName}{a string representation of the model name.}
\item{Sc}{a vector of the names of scenarios to include.}
\item{Vn}{the variable names for the concepts to include.}
}
\value{
A data frame having columns identifying the scenario, concept,
iteration, scaled values, and rescaled values.
}
\description{
\code{formatOutputData} makes a data frame of the summary results for selected
scenarios and selected concepts.
}
\details{
This function creates a data frame of model results for selected scenarios
and selected concepts. The data frame is in 'flat' format where values are
in one column and the corresponding concept names, scenario names, and
iterations are in separate columns.
}
|
424747fde65ecdc0b5a0b8c005ba68ceb61bff95 | b5527e19861d9118617b1b5b9f0f44d0ee615dd8 | /tests/testthat/test-recom.R | b8310e40707560a4cbe0b9abde83f894992664b0 | [] | no_license | tedconf/recommenderlab | 1deaafd1b541a8a142ff683c5a42961a4938e66c | 17f620efbaf97ef0bda44076ed660004529358c7 | refs/heads/master | 2022-04-08T21:47:14.486739 | 2019-12-17T19:29:40 | 2019-12-17T19:29:40 | 109,738,323 | 1 | 0 | null | 2017-11-06T19:48:03 | 2017-11-06T19:14:40 | R | UTF-8 | R | false | false | 2,594 | r | test-recom.R | library("testthat")
library("recommenderlab")
data("MovieLense")
### test all real rating recommenders
methods <- unique(sapply(recommenderRegistry$get_entries(
dataType="realRatingMatrix"), "[[", "method"))
MovieLense100 <- MovieLense[rowCounts(MovieLense) > 100,]
MovieLense100 <- MovieLense[, colCounts(MovieLense100) > 100,]
train <- MovieLense100[1:20]
test1 <- MovieLense100[101]
test3 <- MovieLense100[101:103]
for(m in methods) {
context(paste("Algorithm:", m))
cat("Algorithm:", m, "\n")
rec <- Recommender(train, method = m)
rec
### default is top-N list
pre <- predict(rec, test1, n = 10)
pre
l <- as(pre, "list")
expect_identical(length(l), 1L)
expect_identical(length(l[[1]]), 10L)
### default is ton-N list
pre <- predict(rec, test3, n = 10)
pre
l <- as(pre, "list")
expect_identical(length(l), 3L)
expect_equal(as.integer(sapply(l, length)), c(10L, 10L, 10L))
### contains NAs for known ratings
pre <- predict(rec, test1, n = 10, type = "ratings")
pre
expect_gt(sum(is.na(as(pre, "matrix"))), 0L)
### full rating matrix
### RERECOMMEND cannot do it
if(m != "RERECOMMEND") {
pre <- predict(rec, test1, n = 10, type = "ratingMatrix")
pre
### there can be NAs
#expect_equal(sum(is.na(as(pre, "matrix"))), 0L)
}
pre <- predict(rec, test3, n = 10, type = "ratings")
pre
expect_gt(sum(is.na(as(pre, "matrix"))), 0L)
### RERECOMMEND cannot do it
if(m != "RERECOMMEND") {
pre <- predict(rec, test3, n = 10, type = "ratingMatrix")
pre
}
### there can be NAs
#expect_equal(sum(is.na(as(pre, "matrix"))), 0L)
}
### test all binary recommenders
methods <- unique(sapply(recommenderRegistry$get_entries(
dataType="binaryRatingMatrix"), "[[", "method"))
MovieLense100_bin <- binarize(MovieLense100, minRating = 3)
train <- MovieLense100_bin[1:50]
test1 <- MovieLense100_bin[101]
test3 <- MovieLense100_bin[101:103]
for(m in methods) {
context(paste("Algorithm:", m))
cat("Algorithm:", m, "\n")
rec <- Recommender(train, method = m)
rec
### default is top-N list
pre <- predict(rec, test1, n = 10)
pre
l <- as(pre, "list")
expect_identical(length(l), 1L)
expect_identical(length(l[[1]]), 10L)
pre <- predict(rec, test3, n = 10)
pre
l <- as(pre, "list")
expect_identical(length(l), 3L)
expect_equal(as.integer(sapply(l, length)), c(10L, 10L, 10L))
### AR and RERECOMMEND cannot do it
#if(m != "AR" && m != "RERECOMMEND") {
# pre <- predict(rec, test1, n = 10, type = "ratings")
# pre <- predict(rec, test1, n = 10, type = "ratingMatrix")
#}
}
|
d1c281b1263da5ead49fa155d83fd71b7c5a1026 | ebc224256513e968a325510648cf415004044822 | /R/ex_node.R | 4c90e8c9b394d8d37f8ef5fcfdf04a350658b6e7 | [] | no_license | cran/halfcircle | a118e3513c4ed9ef105eaf765e2b22096eb5aae3 | cc8c312e8e9dc36acd7c88e768249f7cf5433475 | refs/heads/master | 2020-04-04T11:53:46.872315 | 2018-11-02T17:30:11 | 2018-11-02T17:30:11 | 155,907,036 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 757 | r | ex_node.R | #' country attributes
#'
#' A dataset containing 154 countries who participate in the trade and related attributes.
#'
#' @format A data frame with 154 rows and 8 variables:
#' \describe{
#' \item{country}{name of exporting country}
#' \item{x}{longitude of the center of a country}
#' \item{y}{latitude of the center of a country}
#' \item{pop_total}{total number of population}
#' \item{gdpc}{Gross Domestic Product per capita, in dollar}
#' \item{area_cultivation}{total volume land for cultivation use, in ha}
#' \item{water_total}{total volume of usable water, in cubic meter}
#' \item{income_level}{5 levels by income}
#' }
#' @docType data
#' @source \url{http://fao.org/faostat/}
#' @usage data(ex_node)
"ex_node"
|
3be64e40a7a5261bd7e6e32314919f1bc94ec681 | 4ef1abc89cd63293ad7da8c799492aff5ae5a666 | /man/addBarcodes.Rd | 12bafbf95fed88c900b3b7ed95126e8eeaa8bcc4 | [
"MIT"
] | permissive | bengalengel/OmicNavigator | 07ae25f23b8162e3fdee9b7b6cad7f84b4190e32 | 2edaf7204afe9d37467be474ef39ed40ca2d393f | refs/heads/main | 2023-04-17T22:53:51.470685 | 2021-04-26T14:51:43 | 2021-04-26T14:51:43 | 348,800,839 | 0 | 0 | NOASSERTION | 2021-03-17T17:46:59 | 2021-03-17T17:46:58 | null | UTF-8 | R | false | true | 1,424 | rd | addBarcodes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R
\name{addBarcodes}
\alias{addBarcodes}
\title{Add barcode plot metadata}
\usage{
addBarcodes(study, barcodes)
}
\arguments{
\item{study}{An OmicNavigator study created with \code{\link{createStudy}}}
\item{barcodes}{The metadata variables that describe the barcode plot.
The input object is a list of lists (one per model). Each sublist must
contain the element \code{statistic}, which is the column name in the
results table to use to construct the barcode plot. Each sublist may
additionally contain any of the following optional elements:
1) \code{absolute} - Should the statistic be converted to its absolute
value (default is \code{TRUE}).
2) \code{logFoldChange} - The column name in the results table that contains
the log fold change values.
3) \code{labelStat} - The x-axis label to describe the statistic.
4) \code{labelLow} - The left-side label to describe low values of the statistic.
5) \code{labelHigh} - The right-side label to describe high values of the statistic.
6) \code{featureDisplay} - The feature variable to use to label the barcode plot
on hover.
To share metadata across multiple models, use the modelID "default".}
}
\description{
The app can display a barcode plot of the enrichment results for a given
annotation term. The metadata in `barcodes` instructs the app how to create
and label the barcode plot.
}
|
0e5c79253ba929b432e4837b22f7cd9b453791e7 | 3da3895c22be687f0a079877e1c52e9dea283e96 | /R Programming/Week 3/lapply.R | 2f79a21c1b25ef9e8c67b68a7f2b332c8a720a77 | [] | no_license | meethariprasad/Data-Science | b165a04031a5efb266e9e79e0074547e3930e062 | 70abd81773b78bf4e597fb6afd1ed879af5a28d6 | refs/heads/master | 2021-01-17T23:23:09.036056 | 2020-03-30T08:38:30 | 2020-03-30T08:38:30 | 84,219,764 | 0 | 0 | null | 2017-03-07T16:10:42 | 2017-03-07T16:10:42 | null | UTF-8 | R | false | false | 63 | r | lapply.R | library(datasets)
data(iris)
?iris
summary(iris$Sepal.Length)
|
de8c2145a822f1f48bf79d2afcf5fe319836d3e8 | 247106f2776815e5f14d520d2ea822f06490c228 | /R/mnist/hello_keras_mnist.R | 14a7315669e55dab6f47db1ee97330e12c7d8bb5 | [] | no_license | GiulSposito/HelloKeras | 532797320f1e61a2561e009c2b3ed57ee1c6d460 | 3105ff7c15d6c1f7aedacf3d9a034f6d64bdd9a9 | refs/heads/master | 2020-04-13T03:05:58.911956 | 2019-12-04T22:07:03 | 2019-12-04T22:07:03 | 162,921,614 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,444 | r | hello_keras_mnist.R | library(keras)
mnist <- dataset_mnist()
x.train <- mnist$train$x
y.train <- mnist$train$y
x.test <- mnist$test$x
y.test <- mnist$test$y
dim(x.train) <- c(nrow(x.train), 784)
dim(x.test ) <- c(nrow(x.test ), 784)
x.train <- x.train/255
x.test <- x.test/255
y.train <- to_categorical(y.train,10)
y.test <- to_categorical(y.test,10)
head(y.train)
head(x.train)
model <- keras_model_sequential()
model %>%
layer_dense(units=256, activation="relu", input_shape = c(784)) %>%
layer_dropout(rate=0.4) %>%
layer_dense(units=128, activation = "relu") %>%
layer_dropout(rate=0.3) %>%
layer_dense(units=10, activation = "softmax")
model %>% summary()
model %>% compile(
loss = "categorical_crossentropy",
optimizer = optimizer_rmsprop(),
metrics = "accuracy"
)
system.time(
history <- model %>% fit(
x.train, y.train, epochs=30, batch_size=128,
validation_split=0.2
)
)
# layers.outputs <- lapply(model$layers[1:5], function(layer) layer$output)
#
# activation.model <- keras_model(inputs = model$input, outputs = layers.outputs)
#
# activations <- activation.model %>%
# predict(x.test)
#
# fl.activation <- activations[[1]]
# dim(fl.activation)
#
# array_reshape(fl.activation[3,], c(16,16)) %>%
# image(axes=F)
library(caret)
model %>%
predict_classes(x.test) %>%
as.integer() %>%
as.factor() %>%
confusionMatrix(reference=as.factor(as.vector(mnist$test$y, mode="integer")))
|
d7de9d2ea089c6685b6babcebae26ffef3b76518 | 91a77be68e5ad1aa16e9a2681ba6fb090c118e4d | /R/scores.R | 0140801bd616fbc64e97c20e085610634ed944d1 | [] | no_license | cran/qdap | e42f194e98a38eb02084eb6ac92dd587024b8540 | 5f032a6a8bf41255cd2547b11325ed457a02a72a | refs/heads/master | 2023-05-25T03:10:36.324940 | 2023-05-11T05:10:02 | 2023-05-11T05:10:02 | 17,698,836 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 524 | r | scores.R | #' Generic Scores Method
#'
#' Access the scores dataframes from select qdap outputs.
#'
#' @param x A qdap object (list) with a dataframe of scores (e.g.,
#' \code{\link[qdap]{fry}}, \code{\link[qdap]{formality}}).
#' @param \ldots Arguments passed to scores method of other classes.
#' @export
#' @seealso \code{\link[qdap]{counts}}
#' @seealso \code{\link[qdap]{proportions}}
#' @seealso \code{\link[qdap]{preprocessed}}
#' @return Returns a data.frame of scores.
scores <-
function(x, ...){
UseMethod("scores")
}
|
db3c3223cc87653cb309cee8eba52728e9e9cf98 | 37ac6d63b7329dc036eb7cd948f60910ff3bedec | /cachematrix.R | 0f5dc2436108857dc4993b5d9adf061b6e982b4f | [] | no_license | peschleifer/ProgrammingAssignment2 | 7721612b453623ef61d7e8fb7ce84ed2a9cf53bc | 2cf0429a083adda7c2d35a58b3b64d8a47ea1cd3 | refs/heads/master | 2021-01-15T13:14:24.603015 | 2015-04-26T19:40:59 | 2015-04-26T19:40:59 | 34,363,267 | 0 | 0 | null | 2015-04-22T02:11:52 | 2015-04-22T02:11:52 | null | UTF-8 | R | false | false | 1,193 | r | cachematrix.R | ## Creates matrices that are able to cache their own inverse
## and provides a function to calculate the inverse, using the cached inverse when it is valid
## Creates a 'special' matrix that can cache its inverse (can be extended to other operations)
## Creates the functions used for this special matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list( set = set, get = get, setInverse = setInverse, getInverse = getInverse )
}
## This will find the invers of the special matrix, using the cached value when it is valid
## Will raise an error if the matrix is singular
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# If we got here the inverse isn't cached and we need to solve it (and save the result in the cache)
theMatrix <- x$get()
m <- solve(theMatrix, ...)
x$setInverse(m)
m
}
|
69797cdc8f903b00c5a9b6ac3b5c7281e366125c | 3005450e4faa88b0b364da0e4e6f101cd59d5d1a | /man/nirm_ms.Rd | 9c226972c7327ca46b78df27cec15fd72e6f9b40 | [] | no_license | kentjin97/nirm | 11c791599ca0bf48dc827c45e37b8b7a5717ae7c | 14e33a5fec225c7f05c8271597911edba014cca9 | refs/heads/master | 2020-04-06T08:08:50.841624 | 2018-11-13T00:30:37 | 2018-11-13T00:30:37 | 157,296,560 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 507 | rd | nirm_ms.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nirm_ms.R
\name{nirm_ms}
\alias{nirm_ms}
\title{title}
\usage{
nirm_ms(dataset, nsample, nitem, ndim = 2, nset, nsamp_max, nitem_max,
ntotal_max, niter = 30000, nburn = 5000, nthin = 5, nprint = 100,
jump_beta = 0.2, jump_theta = 1, jump_z = 0.05, pr_mean_beta = 0,
pr_sd_beta = 10, pr_mean_theta = 0, pr_sd_theta = 10,
pr_mean_z = 0, prior_a = 0.001, prior_b = 0.001, option = TRUE,
cores = 1)
}
\description{
title
}
|
0c59b8cf9e836f94acc32aa508db94926cb9a405 | 8f8d7c9f128841985bba9f7ecf4b57cc853ec410 | /Tutorial 1/ex1.R | c06051b623ed5a6b5457b77997269c55f8cddbad | [] | no_license | oshadhi-vanodhya/Business-Intelligence | cd632c977e46eb728a6685c7092b754a12eab6d0 | be47e630bb9d6e95b08f86ce01d889c8c0d8e35f | refs/heads/master | 2020-08-07T00:08:47.652268 | 2019-10-06T17:51:45 | 2019-10-06T17:51:45 | 213,213,578 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 218 | r | ex1.R | x = 5
y <- 10
15 -> z
a = x + y
b = x - y
c = x / y
d = x * y
2^3
a = 22 %% 7
a = 20 %% 3
a = 27 %/% 3
a = 22 %/% 3
flag = x > y
flag
x = 1 : 10
x [5]
x = 101 : 110
x[6]
y %in% x
102 %in% x
my_list = c(4,3,4,56,78,100) |
a5bfe0459961c5fe2a8b051c52c14930655c7da6 | 02754f51d7970c6c76097084e8fa1a75bd7bf8dc | /week2a-svm/example1-func.R | 959ddd2c03f95c11a90d3587f9ced2c59ad3a790 | [] | no_license | tutrunghieu/html2015b | eea165eaddc2953ae7097446c393e5433307febd | 10e0933d4d5d7b5fd5a4348ac90929eb3ffbad85 | refs/heads/master | 2016-09-03T07:01:02.903848 | 2015-10-17T15:11:01 | 2015-10-17T15:11:01 | 42,796,729 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 850 | r | example1-func.R | rm(list = ls()); # to remove variable
library(caret); # for machine learning
library(e1071); # to use svm
data(iris); #to load dataset
# print( iris ); # to print the data table
print( head(iris) ); # to print just first rows of the data table
print( iris[5:15, ] ); # to print just selected rows of the table
D <- read.csv("C:/Users/henrytu/Desktop/example1.csv", header=TRUE);
print(D);
M <- lm(y ~ x, D);
print(M);
y <- D$y; #The ground-truth values in the dataset
y1 <- predict(M, D); #The predict valuese by the model M
e1 <- abs(y - y1);
print( data.frame(y, y1, e1));
print( sum(e1) );
D$xx <- D$x * D$x;
print(D);
M <- lm(y ~ x + xx, D);
print(M);
y <- D$y; #The ground-truth values in the dataset
y1 <- predict(M, D); #The predict valuese by the model M
e1 <- abs(y - y1);
print( data.frame(y, y1, e1));
print( sum(e1) );
|
8a7559db55152b7d8488be8bf2476e23aaa7656d | 241555f63dd8a2d315b9efe0d1dcb2f031263b9a | /decisiontree.R | 2add15c45bac3ddb31c687a3d3a25516a5bc4307 | [] | no_license | saikiran2893/Datascience- | 0c72ff4b87fe811cd98709dcb6ac7b94f198911f | 1513cc814d96c2ce13b5d7fb24cf90d4474f87ca | refs/heads/master | 2021-03-22T04:08:59.455230 | 2018-02-08T15:39:50 | 2018-02-08T15:39:50 | 120,771,361 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,611 | r | decisiontree.R | setwd("G:/R_Training/DEcision Tree")
getwd()
credit <- read.csv("credit.csv",stringsAsFactors = TRUE)
View(credit)
str(credit)
##table command used to check the number of occurances
##to explaore categorical variable
table(credit$checking_balance)
table(credit$savings_balance)
##to explore numeric variable
summary(credit$months_loan_duration)
summary(credit$amount)
summary(credit)
##to check outliers,remove outliers observations to build good model
boxplot(credit$amount)
boxplot(credit$months_loan_duration)
##to check the outcome value counts on dependant variable
table(credit$default)
set.seed(1234)
train_sample <- sample(1000,900)
train_sample
##creating the taining smapel with rownumbers
credit_train <- credit[train_sample,]
credit_test <- credit[-train_sample,]
View(credit_test)
View(credit_train)
## checking the dependant variabe counts in test and train
table(credit_train$default)
table(credit_test$default)
## to check data distibuted us sane among test and train dataset,if not ren runt he code without setseed
prop.table(table(credit_train$default))
prop.table(table(credit_test$default))
##TRAINING A MODEL
##we will use C5.0 algorithm in the C50 package to train our decion tree model
library("C50")
names(credit_train)
credit_model <- C50::C5.0(credit_train[-17],credit_train$default)
summary(credit_model)
## evaluating model performance
##apply model on test daata
credit_prediction <- predict(credit_model,credit_test)
##this creates a vector of predicted class values which can compare to the actual
##claSS VALUES
library(gmodels)
CrossTable(credit_test$default,credit_prediction, prop.chisq=FALSE, prop.r=FALSE,
prop.c=FALSE, dnn=c("Actual Default","Predicted Default"))
CrossTable(credit_test$default,credit_prediction,dnn=c("Actual Default","Predicted Default"))
mean(credit_test$default==credit_prediction)
##IMproving the model
##you can boost the accuracy of the decisio tree
##boosting is done by combinig a number of weak performing learners and create
##a team and trail =10 is the best
credit_boost10 <- C50::C5.0(credit_train[-17],credit_train$default,trials = 10)
summary(credit_boost10)
##predict with new boosted model
credit_boos_predict <- predict(credit_boost10,credit_test)
mean(credit_test$default==credit_boos_predict)
CrossTable(credit_test$default,credit_boos_predict, prop.chisq=FALSE, prop.r=FALSE,
prop.c=FALSE, dnn=c("Actual Default","Predicted Default"))
##removing least usage and built the model
|
7aa3723fa9290ae67c200ec1668e8504f44dd555 | 4c6865757cb84a64dbb49aad3c4560571de78308 | /tests/testthat/test_createNanoStringSet.R | a2eb8c7ad72dae8343ff4d81c2f90ff870efc3d8 | [] | no_license | Shedimus/NanoStringDiff | c4a1dd448ceb73591268ca8b8c622241641e8935 | a4ceb4e5ba14e004568a65c60daeb01f56e116c9 | refs/heads/master | 2020-09-21T04:38:06.896828 | 2019-12-02T15:31:14 | 2019-12-02T15:31:14 | 224,680,499 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,117 | r | test_createNanoStringSet.R | ## generate count matrix
endogenous = matrix(rpois(100,50),25,4)
sample.names = paste0("Sample",1:4)
colnames(endogenous) = sample.names
positive = matrix(rpois(24,c(128,32,8,2,0.5,0.125)*30),6,4)
colnames(positive) = paste("Sample",1:4)
negative = matrix(rpois(32,10),8,4)
colnames(negative) = sample.names
housekeeping = matrix(rpois(12,100),3,4)
colnames(housekeeping) = sample.names
## generate phenotype data
designs = data.frame(group=c("Control","Control","Treatment","Treatment"),
gender=c("Male","Female","Female","Male"),
age=c(20,40,39,37))
## input data to create a "NanoStringSet" object
NanoStringData = createNanoStringSet(endogenous,positive,
negative,housekeeping,designs)
expect_true(all(exprs(NanoStringData) == endogenous))
expect_true(all(positiveControl(NanoStringData) == positive))
expect_true(all(negativeControl(NanoStringData) == negative))
expect_true(all(housekeepingControl(NanoStringData) == housekeeping))
expect_true(all(colnames(exprs(NanoStringData)) == sample.names))
|
90bfdd927ed2ae9e94ce7ec0328dd41fd2d9a983 | 70affdb293361dc91363a394a09f557a0730b30c | /dataCompareR/man/listObsNotVerbose.Rd | 67f20caf0a9de6531f9a2610763bd762d7934c9d | [
"Apache-2.0"
] | permissive | Lextuga007/dataCompareR | cb1f4885660af97ef9088a4379eafe58ba190dfd | 2824fecd6746b65a44092d2499bdd30e29cceebd | refs/heads/master | 2022-05-07T14:23:18.028950 | 2018-09-07T09:28:41 | 2018-09-07T09:28:41 | 254,414,896 | 1 | 0 | Apache-2.0 | 2020-04-09T15:51:36 | 2020-04-09T15:51:35 | null | UTF-8 | R | false | true | 580 | rd | listObsNotVerbose.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/out_outputStructureFunctions.R
\name{listObsNotVerbose}
\alias{listObsNotVerbose}
\title{listObsNotVerbose}
\usage{
listObsNotVerbose(i, x, uniquevarlist, nObs)
}
\arguments{
\item{i}{The position of the element we want to compare}
\item{x}{An dataCompareR object}
\item{uniquevarlist}{A list of the variables in the compare}
\item{nObs}{How many obervations to return}
}
\value{
A list of mismatching observations from start/end of mismatches
}
\description{
Return a summary of mismatching data
}
|
0ecd840ccae47711ff7980d3cad853ce63d55836 | 11b95acbf2553de662c028a9de2b019e423d1055 | /HMG_Analysis/gene_counts_ggtree_barplt.R | 4e697fa51dd8018230774838a16ab7395e8d2c6a | [] | no_license | stajichlab/Chytrid_Coelomomyces_Project | 91b9779355e841525a511224dddcef794b08c649 | 44e8d1d5db60300c47f1e8edbc2a197d8ce477db | refs/heads/main | 2023-04-27T17:33:23.066860 | 2023-04-19T21:36:01 | 2023-04-19T21:36:01 | 386,721,342 | 1 | 2 | null | 2023-01-13T04:32:47 | 2021-07-16T17:52:06 | HTML | UTF-8 | R | false | false | 2,056 | r | gene_counts_ggtree_barplt.R | ##################
library(dplyr)
library(rvcheck)
library(ggplot2)
library(ggtree)
library(treeio)
library(ggstance)
library(ggtreeExtra)
library(RColorBrewer)
geneCopies <- read.table("Chytrid_gene_counts.csv", header=TRUE, sep=",", row.names = NULL)
geneCopies
tip_metadata <- read.table("Chytrid_metadata.tab", sep="\t", header=TRUE,check.names=FALSE, stringsAsFactor=F)
tip_metadata
tree <- read.tree("Chytrid_tree")
tipnames <- tree$tip.label
tipnames
to_drop <- setdiff(tree$tip.label, geneCopies$Strain)
to_drop
straintree <- drop.tip(tree, to_drop)
tipnames <- straintree$tip.label
tipnames
p0 <- ggtree(tree, layout="circular") +
geom_tiplab(size=0, color="black")
p0
p1 <- ggtree(straintree, layout="rectangular") +
geom_tiplab(size=3, color="black")
p1
p <- p1 %<+% tip_metadata + geom_tippoint(aes(color=Lifestyle), size=3) +
scale_color_brewer(palette = "Dark2")
plot(p)
#tip_metadata <- read.table("Lineages.tab", sep="\t", header=TRUE,check.names=FALSE, stringsAsFactor=F)
#tip_metadata
#p <- p1 %<+% tip_metadata + geom_tippoint(aes(color=Lineage), size=2)
#plot(p)
difftable <- setdiff(geneCopies$Strain, straintree$tip.label)
geneCopiesFilter <- filter(geneCopies,geneCopies$Strain %in% straintree$tip.label)
geneCopiesFilter
dd = data.frame(id=straintree$tip.label, value=(geneCopiesFilter$Gene_Count))
dd
geneCounts = data.frame(geneCopiesFilter)
geneCounts
# Define the number of colors you want
#nb.cols <- 21
#mycolors <- colorRampPalette(brewer.pal(8, "Set3"))(nb.cols)
# Create a ggplot with 21 colors
# Use scale_fill_manual
ptbl <- facet_plot(p, panel = 'Total CDS', data = geneCopiesFilter, geom = geom_barh, mapping = aes(x=Gene_Count),
stat = "identity") + theme_tree2(legend.position=c(.875, .70))
ptbl
ptbl <- facet_plot(p, panel = 'Total_CDS', data = geneCopiesFilter, geom = geom_barh, mapping = aes(x=Gene_Count, group = label, fill=Lifestyle),
stat = "identity") + scale_fill_brewer(palette = "Dark2") + theme_tree2(legend.position=c(.875, .70))
ptbl
|
303cbddeca375242ee0d897a8fcfb6f57e4b9978 | 8a32ea28dc84c6423221dc0d3d2d44dc38d05cfa | /R/readUniProtExport.R | 110593c13db44944e6ae55fe212b18304d9e9110 | [] | no_license | cran/wrProteo | d5932a2f48d8e1ff0397691b0e8027ca360b9b7f | 348376e6931d279200da2cc1ed85f8c57ec516f4 | refs/heads/master | 2023-09-05T12:04:30.055590 | 2023-08-18T10:10:02 | 2023-08-18T11:31:04 | 236,959,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,614 | r | readUniProtExport.R | #' Read protein annotation as exported from UniProt batch-conversion
#'
#' This function allows reading and importing protein-ID conversion results from \href{https://www.uniprot.org/uploadlists/}{UniProt}.
#' To do so, first copy/paste your query IDs into \href{https://www.uniprot.org/uploadlists/}{UniProt} 'Retrieve/ID mapping' field called '1. Provide your identifiers' (or upload as file), verify '2. Select options'.
#' In a typical case of 'enst000xxx' IDs you may leave default settings, ie 'Ensemble Transcript' as input and 'UniProt KB' as output. Then, 'Submit' your search and retreive results via
#' 'Download', you need to specify a 'Tab-separated' format ! If you download as 'Compressed' you need to decompress the .gz file before running the function \code{readUCSCtable}
#' In addition, a file with UCSC annotation (Ensrnot accessions and chromosomic locations, obtained using \code{\link{readUCSCtable}}) can be integrated.
#' @details
#' In a typicall use case, first chromosomic location annotation is extracted from UCSC for the species of interest and imported to R using \code{\link{readUCSCtable}} .
#' However, the tables provided by UCSC don't contain Uniprot IDs. Thus, an additional (batch-)conversion step needs to get added.
#' For this reason \code{\link{readUCSCtable}} allows writing a file with Ensemble transcript IDs which can be converted tu UniProt IDs at the site of \href{https://www.uniprot.org/uploadlists/}{UniProt}.
#' Then, UniProt annotation (downloaded as tab-separated) can be imported and combined with the genomic annotation using this function.
#' @param UniProtFileNa (character) name (and path) of file exported from Uniprot (tabulated text file inlcuding headers)
#' @param deUcsc (data.frame) object produced by \code{readUCSCtable} to be combined with data from \code{UniProtFileNa}
#' @param targRegion (character or list) optional marking of chromosomal locations to be part of a given chromosomal target region,
#' may be given as character like \code{chr11:1-135,086,622} or as \code{list} with a first component characterizing the chromosome and a integer-vector with start- and end- sites
#' @param useUniPrCol (character) optional declaration which colums from UniProt exported file should be used/imported (default 'EnsID','Entry','Entry.name','Status','Protein.names','Gene.names','Length').
#' @param silent (logical) suppress messages
#' @param debug (logical) display additional messages for debugging
#' @param callFrom (character) allow easier tracking of message(s) produced
#' @return This function returns a data.frame (with columns $EnsID, $Entry, $Entry.name, $Status, $Protein.names, $Gene.names, $Length; if \code{deUcsc} is integrated plus: $chr, $type, $start, $end, $score, $strand, $Ensrnot, $avPos)
#' @seealso \code{\link{readUCSCtable}}
#' @examples
#' path1 <- system.file("extdata",package="wrProteo")
#' deUniProtFi <- file.path(path1,"deUniProt_hg38chr11extr.tab")
#' deUniPr1a <- readUniProtExport(deUniProtFi)
#' str(deUniPr1a)
#'
#' ## Workflow starting with UCSC annotation (gtf) files :
#' gtfFi <- file.path(path1,"UCSC_hg38_chr11extr.gtf.gz")
#' UcscAnnot1 <- readUCSCtable(gtfFi)
#' ## Results of conversion at UniProt are already available (file "deUniProt_hg38chr11extr.tab")
#' myTargRegion <- list("chr1", pos=c(198110001,198570000))
#' myTargRegion2 <-"chr11:1-135,086,622" # works equally well
#' deUniPr1 <- readUniProtExport(deUniProtFi,deUcsc=UcscAnnot1,
#' targRegion=myTargRegion)
#' ## Now UniProt IDs and genomic locations are both available :
#' str(deUniPr1)
#' @export
readUniProtExport <- function(UniProtFileNa, deUcsc=NULL, targRegion=NULL, useUniPrCol=NULL, silent=FALSE, debug=FALSE, callFrom=NULL) {
## read annotation exported from https://www.uniprot.org/uploadlists/ upload Ensemble Transcript => UniprotKB => export
## targRegion : list('chr1',pos=c(198110001,198570000)) or 'chr11:1-135,086,622'
fxNa <- wrMisc::.composeCallName(callFrom,newNa="readUniProtExport")
if(!isTRUE(silent)) silent <- FALSE
if(isTRUE(debug)) silent <- FALSE else debug <- FALSE
if(length(UniProtFileNa) >1) UniProtFileNa <- UniProtFileNa[1] else {if(length(UniProtFileNa) < 1) stop(" argument 'UniProtFileNa' seems empty")}
chFi <- file.exists(UniProtFileNa)
if(!chFi) stop(" file '",UniProtFileNa,"' not found !")
chExt <- length(grep("\\.gz$", UniProtFileNa, fixed=FALSE, perl=FALSE)) >0
chPa <- try(find.package("utils"),silent=TRUE)
if(inherits(chPa, "try-error")) stop("Package 'utils' not found ! Please install first")
## main
deUniProt <- try(utils::read.delim(UniProtFileNa,stringsAsFactors=FALSE), silent=TRUE)
errMsg1 <- " seems not to be in UniProt 'tab-separated' format (does not contain sufficent number of columns) !"
if(inherits(deUniProt, "try-error")) {
deUniProt <- try(wrMisc::readVarColumns(if(chExt) unz(UniProtFileNa) else UniProtFileNa,callFrom=fxNa), silent=TRUE)
if(inherits(deUniProt, "try-error")) stop("Can't read file '",UniProtFileNa,"' - please check format !") else {
if(!silent) message(fxNa,"Managed to read file using readVarColumns()") }
if(ncol(deUniProt) <9) stop("file ",UniProtFileNa,errMsg1)
colnames(deUniProt)[1:9] <- c("EnsTraID","xx","UniprotID",colnames(deUniProt)[c(2:7)]) # initial colnames by readVarColumns are shifted
}
if(ncol(deUniProt) <7) stop("file ",UniProtFileNa,errMsg1) # check if (in)sufficient numer of columns
if(nrow(deUniProt) <2 && !silent) message(fxNa," CAUTION, file '",UniProtFileNa,"' contains only ",nrow(deUniProt)," lines !")
## correct colnames
chCol <- c(grep("yourlist.",colnames(deUniProt)[1]) >0, grep("isomap.",colnames(deUniProt)[2]) >0, "Entry" %in% colnames(deUniProt))
if(chCol[1]) colnames(deUniProt)[1] <- "EnsTraID"
if(chCol[2]) colnames(deUniProt)[2] <- "xx" # this column contains almost no information
colnames(deUniProt)[3] <- "UniProtID"
## combine with data initially/previously read from Ucsc
multID <- NULL
colnames(deUniProt) <- sub(" ",".",colnames(deUniProt))
if(length(useUniPrCol) <1) useUniPrCol <- c("EnsTraID","UniProtID","Entry.name","Status","Protein.names","Gene.names","Length")
useUniPrCo <- wrMisc::extrColsDeX(deUniProt, useUniPrCol, doExtractCols=FALSE, callFrom=fxNa, silent=silent)
## treat multi-Ensemble entries : need to replicate lines of table for multiple concatenated (eg ENSRNOT00000031808,ENSRNOT00000093745)
splitExtendConcat <- function(mat,useCol=1,sep=",",sep2="[[:digit:]],[[:alpha:]]+"){
## extend matrix or data.frame by additional lines if column 'useCol' contains multiple concatenated terms (content of other columns will be duplicated)
## 'sep' used with strsplit() and grep() to identify lines and split, also used to construct (generic) term for keeping just first
## 'sep2' optional custom pattern used with grep() to identify lines; will be used instead of 'generic' sep to identify entries to split lateron
## main
chMult <- grep(if(length(sep2) >0) sep2 else sep, mat[,useCol], fixed=FALSE, perl=FALSE)
if(length(chMult) >0) {
##
spl1 <- strsplit(mat[chMult,useCol],sep, fixed=FALSE, perl=FALSE)
spl2 <- unlist(lapply(spl1, function(x) x[-1]), use.names=FALSE)
toLine <- rep(chMult, sapply(spl1,length) -1)
mat[,useCol] <- sub(paste(sep,"[[:print:]]*$",sep=""),"",mat[,useCol], fixed=FALSE, perl=FALSE)
mat2 <- cbind(spl2,mat[c(toLine),-1])
colnames(mat2)[1] <- colnames(mat)[1]
mat <- rbind(mat,mat2)
}
mat }
deUniProt <- splitExtendConcat(deUniProt, sep=",", sep2="[[:digit:]],[[:upper:]]+")
if(length(deUcsc) >0) {
chGeneId <- which(colnames(deUcsc) =="gene_id")
if(length(chGeneId) <1) stop("Invalid file-content: The file '",UniProtFileNa,"' does not conatain a column 'gene_id' ! Please check the input file")
deUcsc[,"gene_id"] <- sub("\\.[[:digit:]]+$","",deUcsc[,"gene_id"])
useUcCol <- wrMisc::naOmit(match(c("gene_id","chr","start","end","strand","frame"),colnames(deUcsc)))
deUcsc <- wrMisc::convMatr2df(deUcsc[,useUcCol], addIniNa=FALSE, callFrom=fxNa,silent=silent)
matchUniprInUcsc <- match(deUniProt[,1], deUcsc[,"gene_id"])
if(sum(!is.na(matchUniprInUcsc)) <4) {
if(!silent) message(fxNa," low yield matching ",wrMisc::pasteC(deUniProt[1:3,1],quoteC="'")," and ",
wrMisc::pasteC(deUcsc[1:3,"gene_id"],quoteC="'"), " convert all to lower case and remove version numbers ('xxx.2') for better matching")
matchUniprInUcsc <- match(sub("\\.[[:digit:]]+$","", tolower(deUniProt[,1])), sub("\\.[[:digit:]]+$","", tolower(deUcsc[,"gene_id"])))
if(sum(!is.na(matchUniprInUcsc)) <4) warning(fxNa," Matching failed : Very few or no matches between UniProtFile and deUcsc !")}
if(!silent) message(fxNa," intergrating genomic information for ",length(matchUniprInUcsc)," entries (",sum(is.na(matchUniprInUcsc))," not found)")
## add chrom Loc to deUniProt => combined DB
combAllChrDB <- cbind(deUniProt[,useUniPrCo], deUcsc[matchUniprInUcsc,]) ## add Ensrnot c(1,3:5,7,10)
if(!silent) message(fxNa," ",nrow(combAllChrDB)," IDs in output")
combAllChrDB <- cbind(combAllChrDB,avPos=if(all(c("start","end") %in% colnames(combAllChrDB))) {
round(rowMeans(combAllChrDB[,c("start","end")])) } else NA) # add mean gene-position for easier sorting
## mark if genimic positions in targer region
if(!all(c("chr","start") %in% colnames(combAllChrDB))) targRegion <- NULL
if(length(targRegion) >0) if(is.character(targRegion) && length(targRegion) ==1) {
targRegion <- unlist(strsplit(targRegion,":"))
targRegion <- list(targRegion[1],gsub(",","",unlist(strsplit(targRegion[2],"-")))) }
combAllChrDB <- cbind(combAllChrDB,inTarg=if(length(targRegion) >0) {
combAllChrDB[,"chr"]==targRegion[[1]] & as.integer(combAllChrDB[,"start"]) >targRegion[[2]][1] & as.integer(combAllChrDB[,"end"]) <targRegion[[2]][2]} else NA)
} else combAllChrDB <- deUniProt[,useUniPrCo]
## convert factor-columns to character
chFa <- rep(NA,ncol(combAllChrDB))
for(i in 1:ncol(combAllChrDB)) chFa[i] <- is.factor(combAllChrDB[,i])
if(any(chFa)) for(i in which(chFa)) combAllChrDB[,i] <- as.character(combAllChrDB[,i])
chEnsID <- "gene_id" %in% colnames(combAllChrDB)
if(chEnsID) combAllChrDB <- combAllChrDB[,-1*which(colnames(combAllChrDB)=="gene_id")]
combAllChrDB }
|
666ea8bd8204911432d776ba8faad579451c8879 | a9a30dd73cbcea225106545d0d3a06b3d6f0d8f0 | /inst/tests/test-getthetapermute.r | 74447113216f403a73f610b725b5130ddc7093fb | [] | no_license | HopkinsIDD/IDSpatialStats | c5eff56b3b0f861ee1e29ee14aec760b05d1bfa3 | cf8c0602c6af4b471cbd60441237cb1f6acee27d | refs/heads/master | 2022-01-01T07:16:19.928915 | 2021-08-07T19:05:42 | 2021-08-07T19:05:42 | 17,963,841 | 4 | 9 | null | 2023-09-08T14:48:47 | 2014-03-21T00:59:32 | R | UTF-8 | R | false | false | 2,083 | r | test-getthetapermute.r | context("get.theta.permute")
test_that("get.theta.permute returns appropriate values for test case 1 (equilateral triangle)" ,{
x <- rbind(c(1,0,0), c(1,1,0),c(2,.5,sqrt(.75)))
colnames(x) <-c("type","x","y")
test <- function(a,b) {
if (a[1] != 1) return(3)
if (b[1] == 2) return(1)
return(2)
}
#should return 1 for every permutation
res <- get.theta.permute(x, test, 1.5, 0, 500)[,-(1:2)]
res2 <- get.theta.typed.permute(x, 1, 2, 1.5, 0, 500)[,-(1:2)]
expect_that(as.numeric(res), equals(rep(1,500)))
expect_that(as.numeric(res2), equals(rep(1,500)))
})
test_that("get.theta.permute returns appropriate values for test case 2 (points on a line)" ,{
x<-rbind(c(1,0,0), c(2,1,0), c(2,-1,0), c(3,2,0),
c(2,-2,0), c(3,3,0),c(3,-3,0))
colnames(x) <-c("type","x","y")
test <- function(a,b) {
if (a[1] != 1) return(3)
if (b[1] == 2) return(1)
return(2)
}
#the median of the null distribution should be 1 (includes infs so
# mean does not work)
#the 95% CI equals 0,Inf with windows
res <- get.theta.permute(x, test, c(1.5,2.5,3.5), c(0,1.5,2.5), 500)[,-(1:2)]
res2 <- get.theta.typed.permute(x, 1, 2, c(1.5,2.5,3.5), c(0,1.5,2.5), 500)[,-(1:2)]
expect_that(apply(res, 1, median, na.rm=T), equals(rep(1,3), tolerance=0.1))
expect_that(apply(res2, 1, median, na.rm=T), equals(rep(1,3), tolerance=0.1))
for (i in 1:3) {
expect_that(as.numeric(quantile(res[i,], probs=c(.025,.975))),
equals(c(0,Inf)))
expect_that(as.numeric(quantile(res2[i,], probs=c(.025,.975))),
equals(c(0,Inf)))
}
#without windows the 95% CI should be 1/3 and 3
res <- get.theta.permute(x, test, 4,0, 500)[,-(1:2)]
res2 <- get.theta.typed.permute(x, 1, 2, 4,0, 500)[,-(1:2)]
expect_that(as.numeric(quantile(res[1,], probs=c(.025,.975))),
equals(c(1/3,3)))
expect_that(as.numeric(quantile(res2[1,], probs=c(.025,.975))),
equals(c(1/3,3)))
})
|
cc5f6506d0814e90416485be57015046dc868b0b | ccfa45fedfb7a39cc24864a5356be438c6e3dddb | /R/user_info_out.R | a1ec72d4f043a7a820e7b6aaa373db02a97b19a5 | [] | no_license | wing328/namsor-r-client | b0c66151f620736f8aa69fa8921d2e3be2264e6e | 3f950cf980ded7a5997f2429cbe82468cd2de8a5 | refs/heads/master | 2020-04-22T14:51:04.232759 | 2019-02-15T13:06:16 | 2019-02-15T13:06:16 | 170,458,537 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,721 | r | user_info_out.R | # NamSor API v2
#
# NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 1000 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it!
#
# OpenAPI spec version: 2.0.2-beta
# Contact: contact@namsor.com
# Generated by: https://openapi-generator.tech
#' UserInfoOut Class
#'
#' @field uid
#' @field email
#' @field phoneNumber
#' @field emailVerified
#' @field displayName
#' @field photoUrl
#' @field disabled
#' @field firstKnownIpAddress
#' @field providerId
#' @field timeStamp
#' @field verifyToken
#' @field apiKey
#' @field stripePerishableKey
#' @field stripeCustomerId
#' @field otherInfos
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
UserInfoOut <- R6::R6Class(
'UserInfoOut',
public = list(
`uid` = NULL,
`email` = NULL,
`phoneNumber` = NULL,
`emailVerified` = NULL,
`displayName` = NULL,
`photoUrl` = NULL,
`disabled` = NULL,
`firstKnownIpAddress` = NULL,
`providerId` = NULL,
`timeStamp` = NULL,
`verifyToken` = NULL,
`apiKey` = NULL,
`stripePerishableKey` = NULL,
`stripeCustomerId` = NULL,
`otherInfos` = NULL,
initialize = function(`uid`, `email`, `phoneNumber`, `emailVerified`, `displayName`, `photoUrl`, `disabled`, `firstKnownIpAddress`, `providerId`, `timeStamp`, `verifyToken`, `apiKey`, `stripePerishableKey`, `stripeCustomerId`, `otherInfos`){
if (!missing(`uid`)) {
stopifnot(is.character(`uid`), length(`uid`) == 1)
self$`uid` <- `uid`
}
if (!missing(`email`)) {
stopifnot(is.character(`email`), length(`email`) == 1)
self$`email` <- `email`
}
if (!missing(`phoneNumber`)) {
stopifnot(is.character(`phoneNumber`), length(`phoneNumber`) == 1)
self$`phoneNumber` <- `phoneNumber`
}
if (!missing(`emailVerified`)) {
self$`emailVerified` <- `emailVerified`
}
if (!missing(`displayName`)) {
stopifnot(is.character(`displayName`), length(`displayName`) == 1)
self$`displayName` <- `displayName`
}
if (!missing(`photoUrl`)) {
stopifnot(is.character(`photoUrl`), length(`photoUrl`) == 1)
self$`photoUrl` <- `photoUrl`
}
if (!missing(`disabled`)) {
self$`disabled` <- `disabled`
}
if (!missing(`firstKnownIpAddress`)) {
stopifnot(is.character(`firstKnownIpAddress`), length(`firstKnownIpAddress`) == 1)
self$`firstKnownIpAddress` <- `firstKnownIpAddress`
}
if (!missing(`providerId`)) {
stopifnot(is.character(`providerId`), length(`providerId`) == 1)
self$`providerId` <- `providerId`
}
if (!missing(`timeStamp`)) {
stopifnot(is.numeric(`timeStamp`), length(`timeStamp`) == 1)
self$`timeStamp` <- `timeStamp`
}
if (!missing(`verifyToken`)) {
stopifnot(is.character(`verifyToken`), length(`verifyToken`) == 1)
self$`verifyToken` <- `verifyToken`
}
if (!missing(`apiKey`)) {
stopifnot(is.character(`apiKey`), length(`apiKey`) == 1)
self$`apiKey` <- `apiKey`
}
if (!missing(`stripePerishableKey`)) {
stopifnot(is.character(`stripePerishableKey`), length(`stripePerishableKey`) == 1)
self$`stripePerishableKey` <- `stripePerishableKey`
}
if (!missing(`stripeCustomerId`)) {
stopifnot(is.character(`stripeCustomerId`), length(`stripeCustomerId`) == 1)
self$`stripeCustomerId` <- `stripeCustomerId`
}
if (!missing(`otherInfos`)) {
stopifnot(is.vector(`otherInfos`), length(`otherInfos`) != 0)
sapply(`otherInfos`, function(x) stopifnot(R6::is.R6(x)))
self$`otherInfos` <- `otherInfos`
}
},
toJSON = function() {
UserInfoOutObject <- list()
if (!is.null(self$`uid`)) {
UserInfoOutObject[['uid']] <-
self$`uid`
}
if (!is.null(self$`email`)) {
UserInfoOutObject[['email']] <-
self$`email`
}
if (!is.null(self$`phoneNumber`)) {
UserInfoOutObject[['phoneNumber']] <-
self$`phoneNumber`
}
if (!is.null(self$`emailVerified`)) {
UserInfoOutObject[['emailVerified']] <-
self$`emailVerified`
}
if (!is.null(self$`displayName`)) {
UserInfoOutObject[['displayName']] <-
self$`displayName`
}
if (!is.null(self$`photoUrl`)) {
UserInfoOutObject[['photoUrl']] <-
self$`photoUrl`
}
if (!is.null(self$`disabled`)) {
UserInfoOutObject[['disabled']] <-
self$`disabled`
}
if (!is.null(self$`firstKnownIpAddress`)) {
UserInfoOutObject[['firstKnownIpAddress']] <-
self$`firstKnownIpAddress`
}
if (!is.null(self$`providerId`)) {
UserInfoOutObject[['providerId']] <-
self$`providerId`
}
if (!is.null(self$`timeStamp`)) {
UserInfoOutObject[['timeStamp']] <-
self$`timeStamp`
}
if (!is.null(self$`verifyToken`)) {
UserInfoOutObject[['verifyToken']] <-
self$`verifyToken`
}
if (!is.null(self$`apiKey`)) {
UserInfoOutObject[['apiKey']] <-
self$`apiKey`
}
if (!is.null(self$`stripePerishableKey`)) {
UserInfoOutObject[['stripePerishableKey']] <-
self$`stripePerishableKey`
}
if (!is.null(self$`stripeCustomerId`)) {
UserInfoOutObject[['stripeCustomerId']] <-
self$`stripeCustomerId`
}
if (!is.null(self$`otherInfos`)) {
UserInfoOutObject[['otherInfos']] <-
sapply(self$`otherInfos`, function(x) x$toJSON())
}
UserInfoOutObject
},
fromJSON = function(UserInfoOutJson) {
UserInfoOutObject <- jsonlite::fromJSON(UserInfoOutJson)
if (!is.null(UserInfoOutObject$`uid`)) {
self$`uid` <- UserInfoOutObject$`uid`
}
if (!is.null(UserInfoOutObject$`email`)) {
self$`email` <- UserInfoOutObject$`email`
}
if (!is.null(UserInfoOutObject$`phoneNumber`)) {
self$`phoneNumber` <- UserInfoOutObject$`phoneNumber`
}
if (!is.null(UserInfoOutObject$`emailVerified`)) {
self$`emailVerified` <- UserInfoOutObject$`emailVerified`
}
if (!is.null(UserInfoOutObject$`displayName`)) {
self$`displayName` <- UserInfoOutObject$`displayName`
}
if (!is.null(UserInfoOutObject$`photoUrl`)) {
self$`photoUrl` <- UserInfoOutObject$`photoUrl`
}
if (!is.null(UserInfoOutObject$`disabled`)) {
self$`disabled` <- UserInfoOutObject$`disabled`
}
if (!is.null(UserInfoOutObject$`firstKnownIpAddress`)) {
self$`firstKnownIpAddress` <- UserInfoOutObject$`firstKnownIpAddress`
}
if (!is.null(UserInfoOutObject$`providerId`)) {
self$`providerId` <- UserInfoOutObject$`providerId`
}
if (!is.null(UserInfoOutObject$`timeStamp`)) {
self$`timeStamp` <- UserInfoOutObject$`timeStamp`
}
if (!is.null(UserInfoOutObject$`verifyToken`)) {
self$`verifyToken` <- UserInfoOutObject$`verifyToken`
}
if (!is.null(UserInfoOutObject$`apiKey`)) {
self$`apiKey` <- UserInfoOutObject$`apiKey`
}
if (!is.null(UserInfoOutObject$`stripePerishableKey`)) {
self$`stripePerishableKey` <- UserInfoOutObject$`stripePerishableKey`
}
if (!is.null(UserInfoOutObject$`stripeCustomerId`)) {
self$`stripeCustomerId` <- UserInfoOutObject$`stripeCustomerId`
}
if (!is.null(UserInfoOutObject$`otherInfos`)) {
self$`otherInfos` <- sapply(UserInfoOutObject$`otherInfos`, function(x) {
otherInfosObject <- UserInfoOut$new()
otherInfosObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE))
otherInfosObject
})
}
},
toJSONString = function() {
sprintf(
'{
"uid":
"%s",
"email":
"%s",
"phoneNumber":
"%s",
"emailVerified":
"%s",
"displayName":
"%s",
"photoUrl":
"%s",
"disabled":
"%s",
"firstKnownIpAddress":
"%s",
"providerId":
"%s",
"timeStamp":
%d,
"verifyToken":
"%s",
"apiKey":
"%s",
"stripePerishableKey":
"%s",
"stripeCustomerId":
"%s",
"otherInfos":
[%s]
}',
self$`uid`,
self$`email`,
self$`phoneNumber`,
self$`emailVerified`,
self$`displayName`,
self$`photoUrl`,
self$`disabled`,
self$`firstKnownIpAddress`,
self$`providerId`,
self$`timeStamp`,
self$`verifyToken`,
self$`apiKey`,
self$`stripePerishableKey`,
self$`stripeCustomerId`,
paste(unlist(lapply(self$`otherInfos`, function(x) jsonlite::toJSON(x$toJSON(), auto_unbox=TRUE))), collapse=",")
)
},
fromJSONString = function(UserInfoOutJson) {
UserInfoOutObject <- jsonlite::fromJSON(UserInfoOutJson)
self$`uid` <- UserInfoOutObject$`uid`
self$`email` <- UserInfoOutObject$`email`
self$`phoneNumber` <- UserInfoOutObject$`phoneNumber`
self$`emailVerified` <- UserInfoOutObject$`emailVerified`
self$`displayName` <- UserInfoOutObject$`displayName`
self$`photoUrl` <- UserInfoOutObject$`photoUrl`
self$`disabled` <- UserInfoOutObject$`disabled`
self$`firstKnownIpAddress` <- UserInfoOutObject$`firstKnownIpAddress`
self$`providerId` <- UserInfoOutObject$`providerId`
self$`timeStamp` <- UserInfoOutObject$`timeStamp`
self$`verifyToken` <- UserInfoOutObject$`verifyToken`
self$`apiKey` <- UserInfoOutObject$`apiKey`
self$`stripePerishableKey` <- UserInfoOutObject$`stripePerishableKey`
self$`stripeCustomerId` <- UserInfoOutObject$`stripeCustomerId`
data.frame <- UserInfoOutObject$`otherInfos`
self$`otherInfos` <- vector("list", length = nrow(data.frame))
for (row in 1:nrow(data.frame)) {
otherInfos.node <- UserInfoOut$new()
otherInfos.node$fromJSON(jsonlite::toJSON(data.frame[row,,drop = TRUE], auto_unbox = TRUE))
self$`otherInfos`[[row]] <- otherInfos.node
}
self
}
)
)
|
742e6d4d4d9d40c4af9fce564eee6b86409e7c50 | c13ce1d62b066f4180b0a4b5c4db6a068eae079f | /man/parse_surface.Rd | ed0eb5e7940ff6f43bf2e5c611a67c3e25952358 | [] | no_license | muschellij2/cifti | 9aa5c0ef0edeafd1a2688166dfc99a8b0e9f661e | 84b7947310dd5657dd22b809ca838e876f03673b | refs/heads/master | 2020-12-24T11:53:10.701313 | 2020-08-10T16:06:53 | 2020-08-10T16:06:53 | 73,105,792 | 4 | 7 | null | 2020-07-20T12:35:01 | 2016-11-07T17:57:04 | R | UTF-8 | R | false | true | 685 | rd | parse_surface.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_surface.R
\name{parse_surface}
\alias{parse_surface}
\alias{get_surface}
\title{Parse Surface from CIFTI}
\usage{
parse_surface(nodeset)
get_surface(fname, verbose = TRUE)
}
\arguments{
\item{nodeset}{Set of XML nodes corresponding to \code{Surface}}
\item{fname}{filename of CIFTI file}
\item{verbose}{print diagnostic messages}
}
\value{
List of values
}
\description{
Extracts information about Surfaces from CIFTI file
}
\examples{
\dontrun{
doc = cifti_xml(fname)
nodes = xml_find_all(doc, "/CIFTI/Matrix/MatrixIndicesMap")
nodeset = xml_find_all(nodes, "./Surface")
parse_volume(nodeset)
}
}
|
60d34f99d7fa6e3333ad01b9816f0f988b15807a | 4fce29de32f8f7c10321b2de3bc22a214449e3e5 | /tests/testthat/test-estim_tmle_os.R | ed83dfb200c74ddd0baf5ffcf643b4de24569324 | [
"MIT"
] | permissive | ehsanx/txshift | c690b3b2915d5195f94d8f90dffea28c34014e68 | 1622c808291033a73280c42688ee53faa35e5f1c | refs/heads/master | 2023-02-20T12:58:16.095639 | 2021-01-22T22:54:31 | 2021-01-22T22:54:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,136 | r | test-estim_tmle_os.R | context("One-step and TML estimators produce similar results")
library(data.table)
library(rlang)
set.seed(172943)
if (require("sl3")) {
# Example based on the data-generating mechanism presented in the simulation
n <- 100
W <- data.frame(W1 = runif(n), W2 = rbinom(n, 1, 0.7))
A <- rpois(n, lambda = exp(3 + .3 * log(W$W1) - 0.2 * exp(W$W1) * W$W2))
Y <- rbinom(
n, 1,
plogis(-1 + 0.05 * A - 0.02 * A * W$W2 + 0.2 * A * tan(W$W1^2) -
0.02 * W$W1 * W$W2 + 0.1 * A * W$W1 * W$W2)
)
C <- rbinom(n, 1, plogis(rowSums(W) + Y))
delta_shift <- 2
EY <- mean(Y)
# true functional forms
fitA.0 <- glm(
A ~ I(log(W1)) + I(exp(W1)):W2,
family = poisson,
data = data.frame(A, W)
)
fitY.0 <- glm(
Y ~ A + A:W2 + A:I(tan(W1^2)) + W1:W2 + A:W1:W2,
family = binomial, data = data.frame(A, W)
)
gn.0 <- function(A = A, W = W) {
dpois(A, lambda = predict(fitA.0, newdata = W, type = "response"))
}
Qn.0 <- function(A = A, W = W) {
predict(
fitY.0,
newdata = data.frame(A, W, row.names = NULL),
type = "response"
)
}
# SL learners to be used for most fits (e.g., IPCW, outcome regression)
mean_learner <- Lrnr_mean$new()
glm_learner <- Lrnr_glm$new()
rf_learner <- Lrnr_ranger$new()
Q_lib <- Stack$new(mean_learner, glm_learner, rf_learner)
sl <- Lrnr_sl$new(learners = Q_lib, metalearner = Lrnr_nnls$new())
# SL learners for fitting the generalized propensity score fit
hse_learner <- make_learner(Lrnr_density_semiparametric,
mean_learner = glm_learner
)
mvd_learner <- make_learner(Lrnr_density_semiparametric,
mean_learner = rf_learner,
var_learner = glm_learner
)
g_lib <- Stack$new(hse_learner, mvd_learner)
sl_density <- Lrnr_sl$new(
learners = g_lib,
metalearner = Lrnr_solnp_density$new()
)
# NOTE: using true density like Ivan does
gn_ext_fitted <- as.data.table(
lapply(
c(-delta_shift, 0, delta_shift, 2 * delta_shift),
function(shift_value) {
gn_out <- gn.0(A = A + shift_value, W = W)
}
)
) %>% set_names(c("downshift", "noshift", "upshift", "upupshift"))
# NOTE: should also use true Q for good measure (truth includes interactions)
Qn_ext_fitted <- as.data.table(
lapply(c(0, delta_shift), function(shift_value) {
Qn_out <- Qn.0(A = A + shift_value, W = W)
})
) %>% set_names(c("noshift", "upshift"))
# fit TMLE
tmle <- txshift(
Y = Y, A = A, W = W, delta = delta_shift,
g_exp_fit_args = list(fit_type = "external"),
gn_exp_fit_ext = gn_ext_fitted,
Q_fit = list(fit_type = "external"),
Qn_fit_ext = Qn_ext_fitted,
estimator = "tmle"
)
tmle_psi <- as.numeric(tmle$psi)
# fit one-step
os <- txshift(
Y = Y, A = A, W = W, delta = delta_shift,
g_exp_fit_args = list(fit_type = "external"),
gn_exp_fit_ext = gn_ext_fitted,
Q_fit_args = list(fit_type = "external"),
Qn_fit_ext = Qn_ext_fitted,
estimator = "onestep"
)
os_psi <- as.numeric(os$psi)
# test for reasonable equality between estimators
test_that("TMLE and one-step implementations match closely", {
expect_equal(tmle_psi, os_psi, tol = 1e-3)
})
# fit TMLE for delta = 0
tmle_noshift <- txshift(
Y = Y, A = A, W = W, delta = 0, estimator = "tmle",
g_exp_fit_args = list(fit_type = "sl", sl_learners_density = sl_density),
Q_fit_args = list(fit_type = "sl", sl_learners = sl)
)
tmle_psi_noshift <- as.numeric(tmle_noshift$psi)
# fit one-step for delta = 0
os_noshift <- txshift(
Y = Y, A = A, W = W, delta = 0, estimator = "onestep",
g_exp_fit_args = list(fit_type = "sl", sl_learners_density = sl_density),
Q_fit_args = list(fit_type = "sl", sl_learners = sl)
)
os_psi_noshift <- as.numeric(os_noshift$psi)
# test for reasonable equality between estimators
test_that("TMLE and one-step match EY exactly for delta = 0", {
expect_equal(tmle_psi_noshift, EY, tol = 1e-5)
expect_equal(os_psi_noshift, EY, tol = 1e-5)
})
# IPCW-based estimators by adding censoring node
ipcw_tmle <- txshift(
W = W, A = A, Y = Y, delta = delta_shift,
C_samp = C, V = c("W", "Y"),
estimator = "tmle",
max_iter = 5,
samp_fit_args = list(fit_type = "glm"),
g_exp_fit_args = list(fit_type = "external"),
gn_exp_fit_ext = gn_ext_fitted[C == 1, ],
Q_fit_args = list(fit_type = "external"),
Qn_fit_ext = Qn_ext_fitted[C == 1, ],
eif_reg_type = "glm"
)
ipcw_tmle_psi <- as.numeric(ipcw_tmle$psi)
ipcw_os <- txshift(
W = W, A = A, Y = Y, delta = delta_shift,
C_samp = C, V = c("W", "Y"),
estimator = "onestep",
samp_fit_args = list(fit_type = "glm"),
g_exp_fit_args = list(fit_type = "external"),
gn_exp_fit_ext = gn_ext_fitted[C == 1, ],
Q_fit_args = list(fit_type = "external"),
Qn_fit_ext = Qn_ext_fitted[C == 1, ],
eif_reg_type = "glm"
)
ipcw_os_psi <- as.numeric(ipcw_os$psi)
# test for reasonable equality between estimators
test_that("IPCW-augmented TMLE and one-step match reasonably closely", {
expect_equal(ipcw_tmle_psi, ipcw_os_psi, tol = 1e-3)
})
}
|
45af47800ef125143d38e30b0cb1d96008564544 | 6f3bf762c79e6db1434dc67ecbd03abe15ab5752 | /code/ts.R | b9f475f492bd4cb1823a4da24460c04edad3c858 | [] | no_license | Sommerzzz/yelp_star | b570908a7c3c405323808944a976e522e74c8570 | 9b012c89eada1539b3f71d89528784e6d36fa5de | refs/heads/master | 2021-04-03T06:29:49.308103 | 2018-03-12T04:44:28 | 2018-03-12T04:44:28 | 124,795,356 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,841 | r | ts.R | library(plyr)
library(dplyr)
library(readr)
library(SenSrivastava)
library(ggplot2)
library(ggmap)
library(hexbin)
library(data.table)
library(choroplethrMaps)
library(choroplethr)
library(mapproj)
library(tidyr)
library(lubridate)
library(nycflights13)
library(stringr)
library(TSA)
library(forecast)
library(ModelMetrics)
library(forecast)
library(gtable)
library(grid)
yelp = read_csv("train_data.csv")
ts_data = yelp %>% transmute(date = date, stars = stars, city = city)
#cri = group_by(cri, date, type) %>% summarise(n = n())
#crime = crime %>% separate(Date,sep=" ",into = c("date","time","daytime")) %>%
# mutate(date=gsub(date,pattern="(.*)/(.*)/(.*)",replacement = "\\3-\\1-\\2")) %>%
# mutate(date=as_date(date))
madison_ts = filter(ts_data, city == "Madison") %>%
transmute(stars=stars, date = date) %>% group_by(date) %>%
summarise(n = mean(stars))
madison_month = filter(ts_data, city == "Madison") %>%
transmute(stars=stars, date = date) %>% separate(date,sep="-",into = c("year","month","day")) %>%
group_by(year,month) %>% summarise(n = mean(stars, na.rm = T)) %>%
transmute(date = make_date(year = year, month = month), n = n)
madison_jan = filter(ts_data, city == "Madison") %>%
transmute(stars=stars, date = date) %>% separate(date,sep="-",into = c("year","month","day")) %>%
group_by(year,month) %>% summarise(n = mean(stars, na.rm = T)) %>% filter(month == "01") %>% summarise(n = mean(n))
madison_2010 = filter(madison_month, year >= 2010)
ggplot(data = madison_2010)+
geom_point(mapping = aes(x = date, y = n)) +
geom_line(mapping = aes(x = date, y = n)) +
theme_set(theme_bw()) +
labs(title="Mean stars in Madison") +
theme(plot.title = element_text(colour = "black", face = "bold", size = 35, vjust = 1)) +
xlab("Date") + ylab("Stars") +
theme(plot.title=element_text(hjust=0.5))+
theme(axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15))
#LV
lv_ts = filter(ts_data, city == "Las Vegas") %>%
transmute(stars=stars, date = date) %>% group_by(date) %>%
summarise(n = mean(stars))
lv_month = filter(ts_data, city == "Las Vegas") %>%
transmute(stars=stars, date = date) %>% separate(date,sep="-",into = c("year","month","day")) %>%
group_by(year,month) %>% summarise(n = mean(stars, na.rm = T)) %>%
transmute(date = make_date(year = year, month = month), n = n)
lv_jan = filter(ts_data, city == "Las Vegas") %>%
transmute(stars=stars, date = date) %>% separate(date,sep="-",into = c("year","month","day")) %>%
group_by(year,month) %>% summarise(n = mean(stars, na.rm = T)) %>% filter(month == "01") %>% summarise(n = mean(n))
lv_2007 = filter(lv_month, year >= 2007)
ggplot(data = lv_ts[2000:3978,])+
#geom_point(mapping = aes(x = date, y = n)) +
geom_line(mapping = aes(x = date, y = n)) +
theme_set(theme_bw()) +
labs(title="Mean stars in LV") +
theme(plot.title = element_text(colour = "black", face = "bold", size = 35, vjust = 1)) +
xlab("Date") + ylab("Stars") +
theme(plot.title=element_text(hjust=0.5))+
theme(axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
axis.title.y = element_text(size = 15),
axis.title.x = element_text(size = 15))
###total
ts = ts_data %>%
transmute(stars=stars, date = date) %>% group_by(date) %>%
summarise(n = mean(stars))
ts_month = ts_data %>%
transmute(stars=stars, date = date) %>% separate(date,sep="-",into = c("year","month","day")) %>%
group_by(year,month) %>% summarise(stars = mean(stars, na.rm = T)) %>%
transmute(date = make_date(year = year, month = month), stars = stars)
a1 = ggplot(data = ts_month)+
geom_point(mapping = aes(x = date, y = stars)) +
geom_line(mapping = aes(x = date, y = stars)) +
theme_set(theme_bw()) +
labs(title="Mean Stars") +
xlab("Date") + ylab("Stars") +
theme_bw() +
theme(plot.title = element_text(colour = "black", face = "bold", size = 20, vjust = 1)) +
theme(text=element_text(family = "Tahoma"),
axis.text.x = element_text(colour="black", size = 12),
axis.text.y = element_text(colour="black", size = 12),
axis.title.x = element_text(colour="black", size = 15),
axis.title.y = element_text(colour="black", size = 15),
title = element_text(colour="black", size = 32),
axis.line = element_line(size=0.3, colour = "black"),
plot.title = element_text(hjust = 0.5))
#ggsave("meanstars_all.png", width = 30, height = 20, units = "cm")
ts_month_count = ts_data %>%
transmute(date = date) %>% separate(date,sep="-",into = c("year","month","day")) %>%
group_by(year,month) %>% summarise(count = n()) %>%
transmute(date = make_date(year = year, month = month), count = count)
a2 = ggplot(data = ts_month_count) +
geom_point(mapping = aes(x = date, y = count)) +
geom_line(mapping = aes(x = date, y = count)) +
theme_set(theme_bw()) +
labs(title="Number of Reviews per Month") +
xlab("Date") + ylab("Count") +
theme_bw() +
theme(plot.title = element_text(colour = "black", face = "bold", size = 20, vjust = 1)) +
theme(text=element_text(family = "Tahoma"),
axis.text.x = element_text(colour="black", size = 12),
axis.text.y = element_text(colour="black", size = 12),
axis.title.x = element_text(colour="black", size = 15),
axis.title.y = element_text(colour="black", size = 15),
title = element_text(colour="black", size = 32),
axis.line = element_line(size=0.3, colour = "black"),
plot.title = element_text(hjust = 0.5))
#ggsave("reviewsnumber.png", width = 30, height = 20, units = "cm")
plot_grid(a1,a2,labels = NULL, nrow = 1,align = "h")
ggsave("time.png", width = 30, height = 14, units = "cm")
|
bebf61d1b86dcf49be062d79f7a31b5d22dd25e9 | 3d4e617dc530679024c97ad61c4d7445e7eb0932 | /01_Model.R | dee7f62f2389971947e3fce8c41d36d2bb7c31c1 | [] | no_license | rwilkes1/COMPEAT | 1e393d9fb0bf4d24e93d013fe6a0a8247faa73dd | 0727110042b46bb6bf6b5eacf76efcabeb9e89be | refs/heads/master | 2021-02-03T22:03:02.758594 | 2020-02-26T14:28:07 | 2020-02-26T14:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,832 | r | 01_Model.R | # Install and load R packages ---------------------------------------------
#
# Check to see if packages are installed. Install them if they are not, then load them into the R session.
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages <- c("sf", "data.table", "ggplot2", "TTAinterfaceTrendAnalysis")
ipak(packages)
# Station Samples --------------------------------------------------------------
# Extract and Classify stations into OSPAR assessment units from ICES Oceanographic database into OSPAR COMP Assessment database
# Read classified station samples
stationSamples <- fread("Input/StationSamples.txt", sep = "\t", na.strings = "NULL", stringsAsFactors = FALSE, header = TRUE, check.names = TRUE)
# Dissolved inorganic Nitrogen - DIN (Winter) ----------------------------------
# Parameters: [NO3-N] + [NO2-N] + [NH4-N]
# Depth: <= 10
# Period: December - February
# Aggregation Method: Arithmetric mean of mean by station per year
# Copy data
wk <- stationSamples
# Count unique stations
wk[,.(count = uniqueN(StationID))]
# Create grouping variable
wk$Period <- with(wk, ifelse(Month == 12, Year + 1, Year))
# Create indicator
coalesce <- function(x) {
if (all(is.na(x)) | is.na(x[1])){
NA
} else {
sum(x, na.rm = TRUE)
}
}
wk$DIN..umol.l. <- apply(wk[, list(Nitrate..umol.l., Nitrite..umol.l., Ammonium..umol.l.)], 1, coalesce)
# Filter stations rows and columns --> AssessmentUnitID, Period, Depth, Temperature, Salinity, DIN
wk0 <- wk[Depth..m.db. <= 10 & (Month >= 12 | Month <= 2) & (Period >= 2006 & Period <= 2014) & !is.na(DIN..umol.l.), list(AssessmentUnitID = AssessmentUnitID.METAVAR.INDEXED_TEXT, Period, StationID, Depth = Depth..m.db., Temperature = Temperature..degC., Salinity = Salinity..., ES = DIN..umol.l.)]
# Get linear regression coefficients on Indicator~Salinity + mean Salinity
wk00 <- wk0[,
list(
N = .N,
MeanSalinity = mean(Salinity, na.rm = TRUE),
B = coef(lm(ES~Salinity))[1],
A = coef(lm(ES~Salinity))[2],
P = summary(lm(ES~Salinity))$coef[2, 4],
R2 = summary(lm(ES~Salinity))$adj.r.squared),
by = AssessmentUnitID]
# Merge data tables
setkey(wk0, "AssessmentUnitID")
setkey(wk00, "AssessmentUnitID")
wk000 <- wk0[wk00]
# Normalise indicator concentration if the indicator has a significant relation to salinity e.g. above the 95% confidence level (p<0.05)
# ES_normalised = ES_observed + A * (S_reference - S_observed)
# https://www.ospar.org/site/assets/files/37302/national_common_procedure_report_2016_sweden.pdf
wk000$ESS <- with(wk000, ifelse(P < 0.05, ES + A * (MeanSalinity - Salinity), ES))
# Calculate indicator station mean --> AssessmentUnitID, Period, StationID, ES
wk1 <- wk000[, list(ES = mean(ES)), list(AssessmentUnitID, Period, StationID)]
# Calculate indicator annual mean --> AssessmentUnitID, Period, ES, SD, N
wk2 <- wk1[, list(ES = mean(ES), SD = sd(ES), N = .N), list(AssessmentUnitID, Period)]
# Calculate indicator assessment unit mean --> AssessmentUnitID, ES, SD, N
wk4 <- wk2[, list(ES = mean(ES), SD = sd(ES), N = sum(N)), list(AssessmentUnitID)]
# Salinity Mixing diagram per assessment unit
ggplot(wk0, aes(Salinity, ES)) +
geom_point() +
geom_smooth(method = lm, se = FALSE, formula = y ~ x) +
facet_wrap(~AssessmentUnitID)
# Trend diagram per assessment unit
ggplot(wk000, aes(Period, ESS)) +
geom_point() +
facet_wrap(~AssessmentUnitID)
# Dissolved inorganic Phophorus - DIP (Winter) ---------------------------------
# Chlorophyll a (Summer) -------------------------------------------------------
# Dissolved Oxygen () ---------------------------------------------------------- |
d6327616cb55ba0c74668c392139f0741d65355d | aba0008e63f77b56c8d437faa3d77677e5c8aa69 | /0-easy/split_the_number.R | 78f709f5c3e99943b20030f3a7c9cddbd21d62cc | [] | no_license | OehlR/codeeval | d690a11a5a1c2a5bbfaebd607452c953f78706a8 | b269b99d1d639ec17688dd9f7e66e23e6711fc67 | refs/heads/master | 2021-05-02T08:58:46.005968 | 2017-11-03T22:25:12 | 2017-11-03T22:25:12 | 28,037,547 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 561 | r | split_the_number.R | args <- commandArgs(trailingOnly=TRUE)
## FOR LOCAL TEST
if(F) {
cat("3413289830 a-bcdefghij\n776 a+bc\n12345 a+bcde\n1232 ab+cd\n90602 a+bcde\n",file=x<-tempfile())
args<-list(x)
#unlink(x)
}
## BEGIN
con <- file(args[[1]], "r")
while(length(l <- readLines(con, n = 1, warn = FALSE)) > 0) {
x <- strsplit(strsplit(l,split=" ")[[1]],split="")
pos <- which(x[[2]]=="+" | x[[2]]=="-")
cat(eval(parse(text=paste0(paste(x[[1]][1:(pos-1)],collapse=""),x[[2]][pos],paste(x[[1]][pos:(length(x[[1]]))],collapse=""),collapse=""))),sep="\n")
}
close(con)
|
84c7aa0e284dae776c15be96f6bacb303bb30827 | 3c01db1a4b2f29873dab7ff00919ac529645b848 | /man/vis_som_TopoView.Rd | 422013f258a00be959fa3ac04b83993fb804bdd1 | [] | no_license | somdisco/SOMDisco | 8d48740702069f43260d2ec50595f0854bd9416a | 2e653ab4aa5af6aa2b9c960ea2c5ae4df60a59cc | refs/heads/master | 2022-12-29T03:08:06.403448 | 2020-10-16T16:16:13 | 2020-10-16T16:16:13 | 288,159,407 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,553 | rd | vis_som_TopoView.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis_som.R
\name{vis_som_TopoView}
\alias{vis_som_TopoView}
\title{TopoView Visualization}
\usage{
vis_som_TopoView(
SOM,
ADJ,
add = F,
nu.pch = 16,
nu.cex = 1,
nu.col = "black",
edge.col = "darkorange",
edge.lwd_range = c(1, 5),
active = T,
subset = NULL
)
}
\arguments{
\item{SOM}{a SOM object}
\item{ADJ}{an adjacency matrix defining edges connecting SOM neurons (or prototypes)}
\item{add}{whether to create a new plotting device (=FALSE, default), or add to an existing one (=TRUE)}
\item{nu.pch}{see \code{vis_som_neurons}}
\item{nu.cex}{see \code{vis_som_neurons}}
\item{nu.col}{see \code{vis_som_neurons}}
\item{edge.lwd_range}{the min/max range of the plotted CONN edges, Default = c(1, 5).}
\item{active}{Optional, if the SOM object has been recalled,
restricts plotting (of vertices and edges) only to active neurons (those whose RF_size > 0).
Default = TRUE.}
\item{subset}{Optional, a vector of neuron indices to restrict the plotting to.
Default = NULL imposes no restriction (plots whole lattice)}
\item{edge.color}{line color of plotted edges, default = "darkorange".}
}
\description{
TopoView Visualization
}
\details{
The TopoView visualization is documented in \code{TopoRNet::vis_TopoView}. If the input \code{ADJ} is weighted and
\code{edge.lwd_range} spans a non-empty set, the visualized edge widths will represent the edge weights (larger weights = thicker edges).
}
\references{
\insertRef{Merenyietal2009}{SOMDisco}
}
|
9b13386ef963405600769b0a62f608064d388bf7 | e8db9e50f36055b44668f27fe075df51e647b6f6 | /src/day2-03-vector-plot-shapefiles-custom-legend.R | f9595d2bb04b2dbcae91340c4f18f14740780dcb | [] | no_license | huan7515/BCB503_advanced_geospatial_workshop | cff3346c907cb27e658fee200e95a3b79e8c7dd5 | 4435ce6a85a00b0345432da7f17a62b5a141c707 | refs/heads/main | 2023-04-19T17:47:50.797645 | 2021-04-29T20:49:23 | 2021-04-29T20:49:23 | 354,941,322 | 0 | 0 | null | 2021-04-29T20:49:23 | 2021-04-05T19:00:55 | R | UTF-8 | R | false | false | 7,311 | r | day2-03-vector-plot-shapefiles-custom-legend.R | #Title: day2-03-vector-plot-shapefiles-custom-legend.R
#BCB503 Geospatial Workshop, April 20th, 22nd, 27th, and 29th, 2021
#University of Idaho
#Data Carpentry Advanced Geospatial Analysis
#Instructors: Erich Seamon, University of Idaho - Li Huang, University of Idaho
library(raster)
library(rgdal)
library(ggplot2)
library(dplyr)
library(sf)
# learners will have this data loaded from an earlier episode
aoi_boundary_HARV <- st_read("data/NEON-DS-Site-Layout-Files/HARV/HarClip_UTMZ18.shp")
lines_HARV <- st_read("data/NEON-DS-Site-Layout-Files/HARV/HARV_roads.shp")
point_HARV <- st_read("data/NEON-DS-Site-Layout-Files/HARV/HARVtower_UTM18N.shp")
CHM_HARV <- raster("data/NEON-DS-Airborne-Remote-Sensing/HARV/CHM/HARV_chmCrop.tif")
CHM_HARV_df <- as.data.frame(CHM_HARV, xy = TRUE)
road_colors <- c("blue", "green", "navy", "purple")
## Load the Data
#To work with vector data in R, we can use the `sf` library. The `raster`
#package also allows us to explore metadata using similar commands for both
#raster and vector files. Make sure that you have these packages loaded.
#We will continue to work with the three shapefiles that we loaded in the
#Open and Plot Shapefiles in R episode.
## Plotting Multiple Shapefiles
#In the previous episode,
#we learned how to plot information from a single shapefile and do
#some plot customization including adding a custom legend. However,
#what if we want to create a more complex plot with many shapefiles
#and unique symbols that need to be represented clearly in a legend?
#Now, let's create a plot that combines our tower location (`point_HARV`),
#site boundary (`aoi_boundary_HARV`) and roads (`lines_HARV`) spatial objects.
#We will need to build a custom legend as well.
#To begin, we will create a plot with the site boundary as the first layer.
#Then layer the tower location and road data on top using `+`.
ggplot() +
geom_sf(data = aoi_boundary_HARV, fill = "grey", color = "grey") +
geom_sf(data = lines_HARV, aes(color = TYPE), size = 1) +
geom_sf(data = point_HARV) +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
#Next, let's build a custom legend using the symbology (the colors and symbols)
#that we used to create the plot above. For example, it might be good if the
#lines were symbolized as lines. In the previous episode, you may have
#noticed that the default legend behavior for `geom_sf` is to draw a
#'patch' for each legend entry. If you want the legend to draw lines
#or points, you need to add an instruction to the `geom_sf` call -
#in this case, `show.legend = 'line'`.
ggplot() +
geom_sf(data = aoi_boundary_HARV, fill = "grey", color = "grey") +
geom_sf(data = lines_HARV, aes(color = TYPE),
show.legend = "line", size = 1) +
geom_sf(data = point_HARV, aes(fill = Sub_Type), color = "black") +
scale_color_manual(values = road_colors) +
scale_fill_manual(values = "black") +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
#Now lets adjust the legend titles by passing a `name` to the respective
#`color` and `fill` palettes.
ggplot() +
geom_sf(data = aoi_boundary_HARV, fill = "grey", color = "grey") +
geom_sf(data = point_HARV, aes(fill = Sub_Type)) +
geom_sf(data = lines_HARV, aes(color = TYPE), show.legend = "line",
size = 1) +
scale_color_manual(values = road_colors, name = "Line Type") +
scale_fill_manual(values = "black", name = "Tower Location") +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
#Finally, it might be better if the points were symbolized as a symbol.
#We can customize this using `shape` parameters in our call to `geom_sf`: 16
#is a point symbol, 15 is a box.
## Data Tip
#To view a short list of `shape` symbols,
#type `?pch` into the R console.
ggplot() +
geom_sf(data = aoi_boundary_HARV, fill = "grey", color = "grey") +
geom_sf(data = point_HARV, aes(fill = Sub_Type), shape = 15) +
geom_sf(data = lines_HARV, aes(color = TYPE),
show.legend = "line", size = 1) +
scale_color_manual(values = road_colors, name = "Line Type") +
scale_fill_manual(values = "black", name = "Tower Location") +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
## Challenge: Plot Polygon by Attribute
#1. Using the `NEON-DS-Site-Layout-Files/HARV/PlotLocations_HARV.shp`
#shapefile, create a map of study plot locations, with each point colored
#by the soil type (`soilTypeOr`). How many different soil types are
#there at this particular field site? Overlay this layer on top of the
#`lines_HARV` layer (the roads). Create a custom legend that applies line
#symbols to lines and point symbols to the points.
#2. Modify the plot above. Tell R to plot each point, using a different
#symbol of `shape` value.
## Answers
#First we need to read in the data and see how many
#unique soils are represented in the `soilTypeOr` attribute.
plot_locations <- st_read("data/NEON-DS-Site-Layout-Files/HARV/PlotLocations_HARV.shp")
levels(plot_locations$soilTypeOr)
#Next we can create a new color palette with one color for
#each soil type.
blue_green <- c("blue", "darkgreen")
#Finally, we will create our plot.
ggplot() +
geom_sf(data = lines_HARV, aes(color = TYPE), show.legend = "line") +
geom_sf(data = plot_locations, aes(fill = soilTypeOr),
shape = 21, color = "black", show.legend = 'point') +
scale_color_manual(name = "Line Type", values = road_colors,
guide = guide_legend(override.aes = list(linetype = "solid", shape = NA))) +
scale_fill_manual(name = "Soil Type", values = blue_green,
guide = guide_legend(override.aes = list(linetype = "blank", shape = 21, colour = NA))) + ggtitle("NEON Harvard Forest Field Site") + coord_sf()
#If we want each soil to be shown with a different symbol, we can
#give multiple values to the `scale_shape_manual()` argument.
ggplot() +
geom_sf(data = lines_HARV, aes(color = TYPE), show.legend = "line", size = 1) +
geom_sf(data = plot_locations, aes(fill = soilTypeOr, shape = soilTypeOr),
show.legend = 'point', colour = "black", size = 3) +
scale_shape_manual(name = "Soil Type", values = c(21, 22)) +
scale_color_manual(name = "Line Type", values = road_colors,
guide = guide_legend(override.aes = list(linetype = "solid", shape = NA))) +
scale_fill_manual(name = "Soil Type", values = blue_green,
guide = guide_legend(override.aes = list(linetype = "blank", shape = c(21, 22),
color = blue_green))) +
ggtitle("NEON Harvard Forest Field Site") +
coord_sf()
## Challenge: Plot Raster & Vector Data Together
#You can plot vector data layered on top of raster data using the
#`+` to add a layer in `ggplot`. Create a plot that uses the NEON AOI
#Canopy Height Model `NEON_RemoteSensing/HARV/CHM/HARV_chmCrop.tif`
#as a base layer. On top of the CHM, please add:
#* The study site AOI.
#* Roads.
#* The tower location.
#Be sure to give your plot a meaningful title.
## Answers
ggplot() +
geom_raster(data = CHM_HARV_df, aes(x = x, y = y, fill = HARV_chmCrop)) +
geom_sf(data = lines_HARV, color = "black") +
geom_sf(data = aoi_boundary_HARV, color = "grey20", size = 1) +
geom_sf(data = point_HARV, pch = 8) +
ggtitle("NEON Harvard Forest Field Site w/ Canopy Height Model") +
coord_sf()
|
6ab64e1ccc6c28bf0db7d3a03243f114acb43da3 | addb430906b1bb5f7ce034da99f0f39d7366309a | /plot4.R | 3b2083b737af257a1919353274bca5ca7f0cb1ec | [] | no_license | srenevic/ExData_Plotting1 | 52ec72accafab3a6052a518b2282d038165a182b | 6802e8d885d466c9e1ebde5f0ebc06e9abbc4d7b | refs/heads/master | 2021-01-16T20:33:18.516446 | 2015-03-08T16:48:25 | 2015-03-08T16:48:25 | 31,668,462 | 0 | 0 | null | 2015-03-04T16:57:24 | 2015-03-04T16:57:24 | null | UTF-8 | R | false | false | 1,310 | r | plot4.R | plot4 <- function(data_file) {
data <- read.table(pipe(paste('grep "^[1-2]/2/2007"'," ",data_file)),sep=";",na.strings="?")
names(data) <- unname(unlist(read.table(data_file,nrows=1,sep=";")[1,]))
data$dateTime <- strptime(paste(as.Date(data$Date,"%d/%m/%Y"),data$Time), "%Y-%m-%d %H:%M:%S")
png(file= "plot4.png",width=480,height=480)
par(mfrow=c(2,2))
plot(data$dateTime,data$Global_active_power,type="l",ylab="Global Active Power",xlab="")
plot(data$dateTime,data$Voltage,type="l",ylab="Voltage",xlab="datetime")
plot(data$dateTime,data$Sub_metering_1,type="l",col="black",ylim=c(0,40),ylab="Energy sub metering",xlab="", yaxt="n")
axis(2,at=c(0,10,20,30),labels=c(0,10,20,30))
par(new=TRUE)
plot(data$dateTime,data$Sub_metering_2,type="l",col="red",ylim=c(0,40),ylab="Energy sub metering",xlab="", yaxt="n")
par(new=TRUE)
plot(data$dateTime,data$Sub_metering_3,type="l",col="blue",ylim=c(0,40),ylab="Energy sub metering",xlab="", yaxt="n")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1),col=c("black","red","blue"),bty="n")
plot(data$dateTime,data$Global_reactive_power,type="l",ylab="Global_reactive_power",xlab="datetime",yaxt="n",ylim=c(0,0.5))
axis(2,cex.axis=0.9,at=c(0.0,0.1,0.2,0.3,0.4,0.5),label=c(0.0,0.1,0.2,0.3,0.4,0.5))
dev.off()
}
|
8d892bb44e9f53252ec7c520a2e530255a72b183 | 549315bab20fa1ae03b4f21d72dbd171dce30f18 | /R_in_action/5_advanced_data_management/1.R | 687e847ed29be992796a87ee37df65d6f33109dc | [
"MIT"
] | permissive | HuangStomach/machine-learning | ff94a4aa97f82a61fdbcd854170562a71ad16ffa | 47689469c431e2a833437b38832d3fdceda7b2b2 | refs/heads/main | 2021-08-16T05:37:01.343676 | 2021-08-09T13:09:24 | 2021-08-09T13:09:50 | 178,584,824 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 148 | r | 1.R | x <- c(1, 2, 3, 4, 5, 6, 7, 8)
mean(x)
sd(x)
# ⬇️冗长
n <- length(x)
meanx <- sum(x) / n
css <- sum((x - meanx)^2)
sdx <- sqrt(css / (n - 1)) |
1182dff064023a2c5e556c65e8cc77e299fbace8 | 3bba52743cc37cd587df45d1e3cfb25fde65ce2b | /src/datacleansing/combine.R | 364b94eecf25a779a2011f378dedb42c2eda9b5a | [
"MIT"
] | permissive | kim3-sudo/march_madness_analysis | 5b4229453ccba27e6ce543c19691b2c06d0ab469 | 8d9c0b72f50e3fb1c0a7d8241a82afd4b63639ce | refs/heads/main | 2023-04-08T09:51:18.644655 | 2021-04-21T18:25:31 | 2021-04-21T18:25:31 | 342,004,796 | 0 | 0 | MIT | 2021-03-16T04:01:57 | 2021-02-24T19:01:00 | Python | UTF-8 | R | false | false | 4,397 | r | combine.R | ### March Madness Analysis
### Steven Lucas & Sejin Kim
### STAT 306 S21 @ Kenyon College
# Purpose ----
### This file will grab all of the PBP data from the remote repo
### and create a single R dataset as an export to the directory
### of your choice
# Set your output directory here! ----
outputDir <- "/home/kim3/march_madness_data"
# Get all of the player data from remote repo ----
player2010 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2010/Players_2010.rds?raw=true'))
player2011 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2011/Players_2011.rds?raw=true'))
player2012 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2012/Players_2012.rds?raw=true'))
player2013 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2013/Players_2013.rds?raw=true'))
player2014 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2014/Players_2014.rds?raw=true'))
player2015 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2015/Players_2015.rds?raw=true'))
player2016 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2016/Players_2016.rds?raw=true'))
player2017 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2017/Players_2017.rds?raw=true'))
player2018 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2018/Players_2018.rds?raw=true'))
player2019 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2019/Players_2019.rds?raw=true'))
# Get all of the event data from remote repo ----
events2010 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2010/Events_2010.rds?raw=true'))
events2011 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2011/Events_2011.rds?raw=true'))
events2012 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2012/Events_2012.rds?raw=true'))
events2013 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2013/Events_2013.rds?raw=true'))
events2014 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2014/Events_2014.rds?raw=true'))
events2015 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2015/Events_2015.rds?raw=true'))
events2016 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2016/Events_2016.rds?raw=true'))
events2017 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2017/Events_2017.rds?raw=true'))
events2018 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2018/Events_2018.rds?raw=true'))
events2019 <- readRDS(url('https://github.com/kim3-sudo/march_madness_data/blob/main/PlayByPlay_2019/Events_2019.rds?raw=true'))
# Bind all of the player data ----
players = rbind(player2010, player2011)
players = rbind(players, player2012)
players = rbind(players, player2013)
players = rbind(players, player2014)
players = rbind(players, player2015)
players = rbind(players, player2016)
players = rbind(players, player2017)
players = rbind(players, player2018)
players = rbind(players, player2019)
# Bind all of the event data ----
events = rbind(events2010, events2011)
events = rbind(events, events2012)
events = rbind(events, events2013)
events = rbind(events, events2014)
events = rbind(events, events2015)
events = rbind(events, events2016)
events = rbind(events, events2017)
events = rbind(events, events2018)
events = rbind(events, events2019)
# Proof the data ----
print("Showing the full dataframes")
print("Also printing the heads in the console")
View(players)
View(events)
head(players)
head(events)
if (file.exists(outputDir)) {
playerOut <- paste(outputDir, "/players.rds", sep = "")
saveRDS(players, file.path(playerOut))
eventsOut <- paste(outputDir, "/events.rds", sep = "")
saveRDS(events, file.path(eventsOut))
} else {
dir.create(outputDir)
playerOut <- paste(outputDir, "/players.rds", sep = "")
saveRDS(players, file.path(playerOut))
eventsOut <- paste(outputDir, "/events.rds", sep = "")
saveRDS(events, file.path(eventsOut))
}
print("All done!")
|
4e8ff87d88c536cd04e0fe0e6af0103d0e05950d | 815581b69b43ff3caac4f614b2345a044b186e25 | /GettingAndCleaningData/run_analysis.R | 9c64cdd06d28db38987324112fb80f07451e0905 | [] | no_license | gosuchoi/datasciencecoursera | fda1bcbf0efddba86694f1e7d0c0a8c395a8c751 | c2d9815d985c20adcb89d5fa5ed3358728c3f82d | refs/heads/master | 2021-01-23T13:37:10.799280 | 2015-04-26T02:49:03 | 2015-04-26T02:49:03 | 30,798,994 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,162 | r | run_analysis.R | # function : run_analysis
# Author : Wooseog Choi
# Date : 2015. 4. 24
#
# This program is to generate tidy dataset from raw data given by
# the Getting and Cleaning Data class in Coursera.
#
run_analysis<-function()
{
library(dplyr)
feature<-read.table("features.txt")
# Get only mean and standard deviation
featureflag<-lapply(data.frame(t(feature)), function(elt) grep("*mean\\(\\)*|*std\\(\\)*", elt[2]))
selfeature<-subset(feature, featureflag == 1)
cvector<-as.vector(selfeature[,1]) # column indices
cnamevector<-as.vector(selfeature[,2]) # column names
# Read test data
testdata<-read.table("test/X_test.txt")
# Select only mean and standard deviation
testmeanstd<-select(testdata, cvector)
# Add column names
colnames(testmeanstd)<-cnamevector
testmeanstd<-mutate(testmeanstd, DataType="test")
testmeanstd<-select(testmeanstd,c(ncol(testmeanstd), 1:(ncol(testmeanstd) -1)))
# Read activity file
testact<-read.table("test/y_test.txt")
# Assign column name
colnames(testact)<-c("Activity")
# merge the activity table to data table
testmeanstd<-cbind(testact, testmeanstd)
# Read volunteers file
testsub<-read.table("test/subject_test.txt")
# Assign column name
colnames(testsub)<-c("Subject")
# merge the subject table to data table
testmeanstd<-cbind(testsub, testmeanstd)
#read training data
traindata<-read.table("train/X_train.txt")
trainmeanstd<-select(traindata, cvector)
colnames(trainmeanstd)<-cnamevector
trainmeanstd<-mutate(trainmeanstd, DataType="train")
trainmeanstd<-select(trainmeanstd,c(ncol(trainmeanstd), 1:(ncol(trainmeanstd) -1)))
# Read activity file
trainact<-read.table("train/y_train.txt")
# Assign column name
colnames(trainact)<-c("Activity")
# merge the activity table to data table
trainmeanstd<-cbind(trainact, trainmeanstd)
# Read volunteers file
trainsub<-read.table("train/subject_train.txt")
# Assign column name
colnames(trainsub)<-c("Subject")
# merge the subject table to data table
trainmeanstd<-cbind(trainsub, trainmeanstd)
# Finally merge two data sets
onedata<-merge(trainmeanstd, testmeanstd, all=TRUE)
# replace the activity code with more descriptive words
actlabels<-read.table("activity_labels.txt")
onedata[,2]<-sapply(onedata[,2], function(etl) as.character(actlabels[etl, 2]))
# sort the tabed by subject and activity
arrange(onedata, Subject, Activity) %>%
write.table(file="merged_data.txt", row.names=FALSE)
# generate new table with mean of all columns
aggregate(onedata[, 4:69], by=list(Subject=onedata$Subject, Activity=onedata$Activity, DataType=onedata$DataType), mean) %>%
arrange( Subject, Activity) %>%
write.table(file="result_mean.txt", row.names=FALSE)
} |
396e6510668d1709a72e8c8278be8f193f5fa43b | 5d335b0908b199e03f88421ec2f72fb0eb5b1e6b | /plot4.R | 7aec7a1e52f0ed1a1c1787cf684cea7814aa1efc | [] | no_license | arun843p/ExData_Plotting1 | f19b12040c5258ff6f29ac5e7546fa9c6d790d68 | cb5e63e0de411f6784fcece48c39e35586b8e7dc | refs/heads/master | 2021-01-18T00:30:03.187068 | 2015-04-12T21:47:04 | 2015-04-12T21:47:04 | 33,519,963 | 0 | 0 | null | 2015-04-07T03:31:12 | 2015-04-07T03:31:10 | null | UTF-8 | R | false | false | 2,469 | r | plot4.R | plot4 <- function() {
##Get Required data for charting
df <- read_req_data()
##Convert Required Chart elements to Numeric class
df$Global_active_power <- as.numeric(df$Global_active_power)
df$Voltage <- as.numeric(df$Voltage)
df$Global_reactive_power <- as.numeric(df$Global_reactive_power)
df$Sub_metering_1 <- as.numeric(df$Sub_metering_1)
df$Sub_metering_2 <- as.numeric(df$Sub_metering_2)
df$Sub_metering_3 <- as.numeric(df$Sub_metering_3)
##Open & Create Blank png file
png(file= "plot4.png")
##Setting Plotting area for 4 plots
par(mfrow=c(2,2),mar=c(4,4,2,1),oma=c(2,0,0,1))
##1st Plot
with(df, plot(y= Global_active_power,x=Time, type = "l", ylab = "Gloabl Active Power",xlab=""))
##2nd Plot
with(a1, plot(y= Voltage,x=Time, type = "l", ylab = "Voltage",xlab="datetime"))
##3rd Plot
with(df, plot(y= Sub_metering_1,x=Time, type = "n", ylab = "",xlab=""))
with(df, lines(y= Sub_metering_1,x=Time, lty=1,lwd=1, ylab = "",xlab=""))
with(df, lines(y= Sub_metering_2,x=Time, lty=1, lwd=1, ylab = "",xlab="",col = "red"))
with(df, lines(y= Sub_metering_3,x=Time, lty=1, lwd=1, ylab = "",xlab="",col = "blue"))
##Legends
legend("topright",bty="n", pch=c(NA,NA,NA), col = c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd=1, cex=0.8)
##Label
title(ylab="Energy Sub Metering")
##4th Plot
with(df, plot(y= Global_reactive_power,x=Time, type = "l", xlab = "datetime"))
##Close PNG file
dev.off()
}
read_req_data <- function(){
##Reading Main Data
df_main <- read.csv2("C:/Users/arpurohit/Desktop/R/Working Directory/Data/exdata-data-household_power_consumption/household_power_consumption.txt", header = TRUE, stringsAsFactors = FALSE)
##Formatting Date
df_main$Date <- as.Date(df_main$Date, format = "%d/%m/%Y")
##Subset 1 for date 2007-02-01
df1 <- subset(df_main, Date == "2007-02-01")
df2 <- subset(df_main, Date == "2007-02-02")
df_req <- rbind(df1, df2)
df_req$Time <- strptime(paste(df_req$Date,df_req$Time),"%Y-%m-%d %H:%M:%S")
df_req$Weekday <- weekdays(df_req$Date, abbreviate=TRUE)
df_req
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.