blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aedbc65ff55d7b428d5a6dad2abd1195419eda90 | 9f7baaad4b6028ab5ed2fd20d8d1ca18045ffff0 | /man/Decumulate.Rd | e9b32beff1b481881e42bec69782d4998293f822 | [] | no_license | ArnaudBu/ReservingLad | d294adde36af6d498d02be023ed46a87715c4ff2 | 59e9854d90825e2acf0e7770e7e3ca9ce19c6519 | refs/heads/master | 2021-09-26T08:54:56.473788 | 2021-09-19T15:19:04 | 2021-09-19T15:19:04 | 102,781,474 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 421 | rd | Decumulate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Decumulate.R
\name{Decumulate}
\alias{Decumulate}
\title{Decumulates a triangle}
\usage{
Decumulate(triangle)
}
\arguments{
\item{triangle}{Cumulated triangle as a matrix}
}
\value{
The decumulated triangle as a matrix
}
\description{
\code{Decumulate} decumulates a triangle
}
\examples{
decTriangle <- Decumulate(triangleExampleEngland)
}
|
758e3300105bc8efa5b6f638731e7c28ec80925b | 109734b597c2d760725a1a050174a5d11b3c1a9b | /man/clickbox.Rd | 9f24f0fea23aaa2865903ba2bc84645000470746 | [] | no_license | rubak/spatstat | c293e16b17cfeba3e1a24cd971b313c47ad89906 | 93e54a8fd8276c9a17123466638c271a8690d12c | refs/heads/master | 2020-12-07T00:54:32.178710 | 2020-11-06T22:51:20 | 2020-11-06T22:51:20 | 44,497,738 | 2 | 0 | null | 2020-11-06T22:51:21 | 2015-10-18T21:40:26 | R | UTF-8 | R | false | false | 1,329 | rd | clickbox.Rd | \name{clickbox}
\alias{clickbox}
\title{Interactively Define a Rectangle}
\description{
Allows the user to specify a rectangle by
point-and-click in the display.
}
\usage{
clickbox(add=TRUE, \dots)
}
\arguments{
\item{add}{
Logical value indicating whether to create a new plot
(\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}).
}
\item{\dots}{
Graphics arguments passed to \code{\link[graphics]{polygon}} to plot the
box.
}
}
\value{
A window (object of class \code{"owin"}) representing the
selected rectangle.
}
\details{
This function allows the user to create a rectangular window
by interactively clicking on the screen display.
The user is prompted to point the mouse at any desired locations
for two corners of the rectangle,
and click the left mouse button to add each point.
The return value is a window (object of class \code{"owin"})
representing the rectangle.
This function uses the \R command \code{\link[graphics]{locator}} to
input the mouse clicks. It only works on screen devices such as
\sQuote{X11}, \sQuote{windows} and \sQuote{quartz}.
}
\seealso{
\code{\link{clickpoly}},
\code{\link{clickppp}},
\code{\link{clickdist}},
\code{\link[graphics]{locator}}
}
\author{
\spatstatAuthors.
}
\keyword{spatial}
\keyword{iplot}
|
fa9bb6a3ab0af4cd4a6f466d930b0fbc5c023b00 | 1357fa0b4f0fbf49d09eb7a0b772c5742640701e | /myrpkg/tests/testthat/test__init.R | 6bb21d3b1928888caf7182c4909a9edcc08ba7bc | [] | no_license | wenweiP4/rpkg_test | 321478588bf3e0b15acf80247a825be95883d994 | 73608d82b738a41c327423bebdf322fde96266a1 | refs/heads/master | 2023-03-18T23:46:52.634464 | 2021-03-02T19:27:07 | 2021-03-02T19:27:07 | 283,297,978 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 351 | r | test__init.R | context("Test initialization")
# # Only set the option with the 'name' if not set
try_set_option = function(name, func, overwrite = FALSE) {
if(is.null(options(name)) || overwrite){
cat(glue::glue("\nSet the option '{name}'\n"))
args = as.list(setNames(func(), name))
do.call(options, args)
}
}
try_set_option('ans', function() 42)
|
f8d66d98bafc59a4ed6c18fa654e76670d855985 | ff33fdc95e43f00f3bfeab883a3e3dc6d4e9f50d | /vacation/R/Basic R Programing/ch04/ex2.R | a999ad9541a221b651b80e582ae66bc10e71369b | [
"Apache-2.0"
] | permissive | define16/Class | e2da95b744e2be8fc40342d127f0f401787b9a6f | 8b0771a348b2bcb19ba338ebff94326828a293ea | refs/heads/master | 2021-06-27T20:08:05.578408 | 2019-09-08T15:49:57 | 2019-09-08T15:49:57 | 148,975,197 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 267 | r | ex2.R | kor <- c(80, 60, 90, 70)
eng <- c(70, 50, 100, 80)
math <- c(95, 70, 95)
totStu <- kor + eng + math
cat("totStu : "); print(totStu)
res <- kor / math # 벡터의 길이가 다르면 짧은 길이를 갖는 벡터가 앞부터 반복된다.
cat("res : "); print(res)
|
6183cab91e45cf07a60ca788927c690b6cde2115 | b435d2302878789d64d3448f8d6535888770f659 | /mwHashtags.R | 21daa07cf942873d97e26811da1c28763d7f5366 | [] | no_license | PeerChristensen/PossessivePronounsTwitter | 28e9c30e7a12e7163e14ff94cfeafd88e825177a | b6fe471bd7cad333c039378bc710f4d96f0459bf | refs/heads/master | 2020-04-11T17:03:29.662218 | 2019-01-02T20:29:23 | 2019-01-02T20:29:23 | 161,946,470 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,344 | r | mwHashtags.R | #Hashtags
library(tidyverse)
library(tidytext)
library(udpipe)
library(wordcloud)
df <- read_csv("mwTweets.csv") %>%
select(X1,screen_name,text) %>%
mutate(text = tolower(text))
remove_reg <- "&|<|>"
df <- df %>%
filter(!str_detect(text, "^RT")) %>%
mutate(text = str_remove_all(text, remove_reg)) %>%
unnest_tokens(hashtag, text, token = "tweets") %>%
filter(!hashtag %in% stop_words$word,
!hashtag %in% str_remove_all(stop_words$word, "'")) %>%
filter(str_detect(hashtag, "^#")) %>%
mutate(hashtag = str_remove(hashtag,"#")) %>%
filter(str_detect(hashtag,"men|women")) %>%
filter(hashtag != "men", hashtag != "women",hashtag != "mens", hashtag != "womens", !str_detect(hashtag,"ment"))
tags <- df %>%
group_by(hashtag) %>%
count() %>%
arrange(desc(n))
tags <- tags %>%
mutate(gender = case_when(str_detect(hashtag,"women") ~ "f",
!str_detect(hashtag,"women") ~ "m"))
menTags <- tags %>%
filter(gender == "m")
womenTags <- tags %>%
filter(gender == "f")
wordcloud(words = menTags$hashtag, freq = menTags$n, min.freq = 3, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(9,"Blues")[4:9])
wordcloud(words = womenTags$hashtag, freq = womenTags$n, min.freq = 3, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(9,"Reds")[4:9])
|
2387d4af807fb976b85d45988fd2e7b6c37dbc58 | 62eb9ded6a2704d077b62fd7e3b7378044d22c13 | /MySQL_create_tables_scripts/CREATE_patient_pkt.ANTHROPOMETRICS_CDC_NHANES_WHO.R | b796d783b6b45fe04c3b5dae3cfbcaa524146711 | [] | no_license | borumlab/db_mngr | 56acda296d814a0e0e38c9fc8130d35bb52615d0 | fea7332ff17d8b7e6b05396f27c2b74dcd206290 | refs/heads/master | 2020-05-21T13:55:30.542595 | 2017-03-14T20:34:49 | 2017-03-14T20:34:49 | 47,643,621 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,150 | r | CREATE_patient_pkt.ANTHROPOMETRICS_CDC_NHANES_WHO.R | #Script to upload Anthropometrics references to MySQL
#Step 1: set directory to filepath where the most up to date references are stored
setwd('G:/Grad Notebooks_E/e6/growth')
#Step 2: upload CDC reference file
CDC <- read.csv('ANTHROPOMETRICS_CDC_REFERENCES_SOURCE_08012016.txt', header=TRUE, sep="\t", na.strings=c("","NA"))
NHANES <- read.csv('ANTHROPOMETRICS_NHANES_REFERENCES_SOURCE_11182016.txt', header=TRUE, sep="\t", na.strings=c("","NA"))
WHO <- read.csv('ANTHROPOMETRICS_WHO_REFERENCES_SOURCE_08012016.txt', header=TRUE, sep="\t", na.strings=c("","NA"))
#Step 3: upload references into MySQL
library(RMySQL)
#put in your user info here
connect <- dbConnect(MySQL(),user='jurate162001',password='Taurage54',dbname='patient_pkt',host='if-srvv-borum')
dbWriteTable(connect,value=CDC,name="ANTHROPOMETRICS_CDC_REFERENCES_SOURCE_08012016",append=TRUE)
dbWriteTable(connect,value=NHANES,name="ANTHROPOMETRICS_NHANES_REFERENCES_SOURCE_11182016",append=TRUE)
dbWriteTable(connect,value=WHO,name="ANTHROPOMETRICS_WHO_REFERENCES_SOURCE_08012016",append=TRUE)
all_cons <- dbListConnections(MySQL())
for (con in all_cons) {
dbDisconnect(con)
} |
9247cb91b21ff035ebde9870faba70f44e33af17 | 6221d64a98fa5d0bb559878254a387984a03ac37 | /RF_newyork.R | 08396bd807c76d0ea2c2647e29843ab5aebec4cc | [] | no_license | deepstuff/census_linkage_1900_1910 | 5d21fb1f5735f22bd66bec32cc7dd00069a3b9e0 | 9cba96f285f291c2f8c27441e4616afb72a900dc | refs/heads/master | 2021-01-19T03:52:19.629713 | 2017-04-17T14:54:17 | 2017-04-17T14:54:17 | 87,340,211 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,151 | r | RF_newyork.R | library(randomForest)
library(dplyr)
library(mltools)
dir <- "R:/JoePriceResearch/record_linking/projects/deep_learning/training_sets"
setwd(dir)
df <- read.csv("ny_numeric.csv")
df <- df[complete.cases(df),]
#now lets mix up the test and training sets
rfSample <- function(ratio, trees, threshold = .5) {
N <- length(df$match)
training_size <- floor(ratio*N)
sample <- sample(1:N, N)
train <- df[sample[1:training_size],]
test <- df[sample[(training_size + 1):N],]
#rf
rf <- randomForest(match ~ racematch + yeardif + year +
given_match + surname_match +
statematch + countymatch + relationshipmatch +
female + father_birth_match + mother_birth_match + marital_status_match,
data = train, ntree=trees, type="prob", importance=TRUE)
test$rf_pred <- predict(rf, newdata = test)
#accuracy
test$rf_match <- test$rf_pred >= i
accuracy <- sum(test$rf_match == test$match)/length(test$match)
return(list(test, rf, accuracy))
}
#grab sample testing set and model
sample <- rfSample(.8, 50,threshold=.5)
test <- sample[[1]]
model <- sample[[2]]
accuracy <- sample[[3]]
importance <- as.data.frame(importance(model))
importance <- importance[-2]
importance$`%IncMSE` <- round(importance$`%IncMSE`,3)
importance$features <- rownames(importance)
#make those 2x2 matricies
matrix_data <- c(nrow(filter(test,match & rf_match)),
nrow(filter(test,match & !rf_match)),
nrow(filter(test,!match & rf_match)),
nrow(filter(test,!match & !rf_match)))
evaluation_matrix <- as.data.frame(matrix(matrix_data,ncol=2))
names(evaluation_matrix) <- c("Actual Match", "Actual Non-match")
rownames(evaluation_matrix) <- c("Predicted Match", "Predicted Non-match")
false_neg <- nrow(filter(test,match & !rf_match))/nrow(test)
false_pos <- nrow(filter(test,!match & rf_match))/nrow(test)
evaluation_matrix
accuracy
false_neg
false_pos
ggplot(data=thresh_test,aes(x=threshold,y=accuracy)) +
#geom_smooth(se=FALSE,color="black") +
geom_point() +
theme_tufte()
|
cb3626162755dac27fcaa14b6197dfeb3b895f4d | 4617424ce62a896f3b1245a17d5f8f891b796b81 | /man/text_sentiment.Rd | a7b8504facc830f9a531bbc128d2bfd39f1daab5 | [] | no_license | cspenn/cognizer | f43f12a263339aaf8c5870e1a7bd3796810aa2dc | 39c598da051956a0218ab6b76a452ef743515b6d | refs/heads/master | 2020-04-09T03:56:17.063645 | 2018-12-02T01:37:08 | 2018-12-02T01:37:08 | 160,003,499 | 0 | 0 | null | 2018-12-02T01:34:40 | 2018-12-02T01:34:40 | null | UTF-8 | R | false | true | 5,146 | rd | text_sentiment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text_cognizers.R
\name{text_sentiment}
\alias{text_concept}
\alias{text_emotion}
\alias{text_entity}
\alias{text_keywords}
\alias{text_language}
\alias{text_relations}
\alias{text_sentiment}
\alias{text_taxonomy}
\title{Process text with IBM Alchemy Language algorithms}
\usage{
text_sentiment(text, api_key, output_mode = "json", show_source = 0,
keep_data = "true", callback = NULL)
text_keywords(text, api_key, output_mode = "json", show_source = 0,
keep_data = "true", callback = NULL, max_retrieve = 50,
knowledge_graph = 0, sentiment = 0)
text_emotion(text, api_key, output_mode = "json", show_source = 0,
keep_data = "true", callback = NULL)
text_language(text, api_key, output_mode = "json", show_source = 0,
keep_data = "true", callback = NULL)
text_entity(text, api_key, output_mode = "json", show_source = 0,
keep_data = "true", callback = NULL, max_retrieve = 50,
knowledge_graph = 0, sentiment = 0, model = "ie-en-news",
coreference = 1, disambiguate = 1, linked_data = 1, quotations = 0,
structured_entity = 1)
text_concept(text, api_key, output_mode = "json", show_source = 0,
keep_data = "true", callback = NULL, max_retrieve = 8,
knowledge_graph = 0, linked_data = 1)
text_relations(text, api_key, output_mode = "json", show_source = 0,
keep_data = "true", callback = NULL, model = "ie-en-news")
text_taxonomy(text, api_key, output_mode = "json", show_source = 0,
keep_data = "true", callback = NULL, max_retrieve = 50,
knowledge_graph = 0, sentiment = 0, model = "ie-en-news",
coreference = 1, disambiguate = 1, linked_data = 1, quotations = 0,
structured_entity = 1)
}
\arguments{
\item{text}{Character vector containing strings to be processed.}
\item{api_key}{Character scalar containing api key obtained from Watson services.}
\item{output_mode}{Character scalar specifying returned data structure.
Alternative is xml.}
\item{show_source}{Intenger scalar specifying whether to send text
string back or not.}
\item{keep_data}{Character scalar specifying whether to share your data with
Watson services for the purpose of training their models.}
\item{callback}{Function that can be applied to responses to examine http status,
headers, and content, to debug or to write a custom parser for content.
The default callback parses content into a data.frame while dropping other
response values to make the output easily passable to tidyverse packages like
dplyr or ggplot2. For further details or debugging one can pass a fail or a
more compicated function.}
\item{max_retrieve}{Integer scalar fixing the number of keywords to extract
from text.}
\item{knowledge_graph}{Integer scalar indicating whether to grab a knowledge
graph associated with keywords. This is an additional transaction.}
\item{sentiment}{Integer scalar indicating whether to infer sentiment of
keywords, expressed as category and number. This is an additional transaction.}
\item{model}{Character scalar specifying one of three models which will extract
entities. Alternatives are 'ie-es-news', 'ie-ar-news' or a custom model.}
\item{coreference}{Integer scalar specifying whether to resolve coreferences into
detected entities.}
\item{disambiguate}{Integer scalar specifying whether to disambiguate
detected entities.}
\item{linked_data}{Integer scalar specifying whether to include links for
related data.}
\item{quotations}{Integer scalar specifying whether to include quotes related
to detected entities.}
\item{structured_entity}{Integer scalar specifying whether to extract structured
entities, such as Quantity, EmailAddress, TwitterHandle, Hashtag, and IPAddress.}
}
\value{
Data.frame containing parsed content in a tidy fashion.
}
\description{
\bold{text_sentiment}: Takes a vector of text and sends to Watson
services for various analyses. Requires basic authentication using api key.
\bold{text_keywords}: Keywords analysis extracts keywords from text, and
can optionally provide their sentiment and/or associated knowledge graph.
\bold{text_emotion}: Emotion analysis of text infers
scores for 7 basic emotions.
\bold{text_language}: Language detection infers
language of the provided text. Works best with at least 100 words.
\bold{text_entity}: Entity analysis extracts names of people,
products, places from the provided text. Additional arguments can provide
sentiment, knowledge graphs and quotations related to inferred entities.
\bold{text_concept}: Concept analysis infers categories based on
the text, but that are not necessarily in the text. Additional arguments can
provide sentiment and/or knowledge graphs related to inferred concepts.
\bold{text_relations}: Relation analysis infers associations among
entities.
\bold{text_taxonomy}: Taxonomy analysis infers hierarchical relations
among entities upto 5 levels deep.
}
\seealso{
Check \url{http://www.ibm.com/watson/developercloud/alchemy-language.html}
for further documentation, and \url{https://alchemy-language-demo.mybluemix.net/?cm_mc_uid=70865809903714586773519&cm_mc_sid_50200000=1468266111}
for a web demo.
}
|
aff54754fef48a785f17eb2fb4e289e0ceb83c56 | 5805523d88d318da0eb9483a43191d84b0439255 | /predict.R | 3ef8cc145a43177350fef594361e61da0e11db0e | [] | no_license | nattsp/ShinyTextPrediction | 99479bb6d93a9c4944be4a772b135d6138158351 | 79716c70ad88663c4b88e71c518deb367476c6fd | refs/heads/master | 2021-08-23T01:37:45.413988 | 2017-12-02T06:26:32 | 2017-12-02T06:26:32 | 111,287,331 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,668 | r | predict.R | ## Predict
#library(tm)
#library(readr)
library(quanteda)
library(tidytext)
library(stringr)
#library(dplyr)
library(data.table)
## Predict
tokensFun <- function(xt){
txtCorp <- corpus(xt)
txtTok <- tokens(txtCorp
#, tolower = TRUE
, remove_numbers = TRUE
, remove_punct = TRUE
, remove_separators = TRUE
, remove_twitter = TRUE
, remove_url = TRUE
, verbose = TRUE)
txtTok <- tokens_tolower(txtTok)
txtTok <- tokens_select(txtTok
, profanity
, selection = "remove"
, verbose = quanteda_options("verbose"))
return(txtTok)
}
preceedingWords <- function(xt, num){
txtWords <- tokens_ngrams(xt, n = num, concatenator = " ")
txtWords <- tail(txtWords[[1]], 1)
return(txtWords)
}
ngramPredict <- function(xt, ngramDT){
ngramPredict <- ngramDT[phrase == xt][order(-prob)]
ngramPredict[, c("ngram", "phrase", "docfreq") := NULL]
return(ngramPredict)
}
predictWordFromNgrams <- function(quin, quad, tri, bi){
l = list(quin, quad, tri, bi)
noGuess <- 10
wordsTemp = as.data.table(rbindlist(l))
words = wordsTemp[, .(score = max(prob)), by = .(predict)][1:noGuess]
words
}
ngramGenerator <- function(xt){
txtCorpus <- tokensFun(xt)
nWords <- ntoken(txtTokens)[[1]]
if (nWords > 3){
txtQuad <- preceedingWords(txtCorpus, 4)
} else { txtQuad <- "a a a a"}
if (nWords >2){
txtTri <- preceedingWords(txtCorpus, 3)
} else {txtTri <- "a a a"}
txtBi <- preceedingWords(txtCorpus, 2)
txtUni <- preceedingWords(txtCorpus, 1)
list(txtQuad = txtQuad, txtTri = txtTri, txtBi = txtBi, txtUni = txtUni)
}
ngramsPredict <- function(ngrams, quingramDT, quadgramDT, trigramDT, bigramDT){
quinPredict <- ngramPredict(ngrams$txtQuad, quingramDT)
quadPredict <- ngramPredict(ngrams$txtTri, quadgramDT)
triPredict <- ngramPredict(ngrams$txtBi, trigramDT)
biPredict <- ngramPredict(ngrams$txtUni, bigramDT)
#print(quinPredict)
#print(quadPredict)
#print(triPredict)
#print(biPredict)
quadPredict[, prob := 0.4 * prob]
triPredict[, prob := 0.4 * 0.4 * prob]
biPredict[, prob := 0.4 * 0.4 * 0.4 * prob]
predictWord <- predictWordFromNgrams(quinPredict, quadPredict, triPredict, biPredict)
return(predictWord)
}
mainPrediction <- function(txt, quingramDT, quadgramDT, trigramDT, bigramDT){
l <- ngramGenerator(txt)
predictWord <- ngramsPredict(l, quingramDT, quadgramDT, trigramDT, bigramDT)
return(predictWord)
}
# Test input text
txt = "The guy in front of me just bought a pound of bacon, a bouquet, and a case of"
txt = "#greatday this is a tweet fuck"
txt = "You're the reason why I smile everyday. Can you follow me please? It would mean the"
txt = "Hey sunshine, can you follow me and make me the"
txt = "Very early observations on the Bills game: Offense still struggling but the"
txt = "Go on a romantic date at the"
txt = "Well I'm pretty sure my granny has some old bagpipes in her garage I'll dust them off and be on my"
txt = "Ohhhhh #PointBreak is on tomorrow. Love that film and haven't seen it in quite some"
txt = "After the ice bucket challenge Louis will push his long wet hair out of his eyes with his little"
txt = "Be grateful for the good times and keep the faith during the"
txt = "If this isn't the cutest thing you've ever seen, then you must be"
txt = "When you breathe, I want to be the air for you. I'll be there for you, I'd live and I'd"
## Convert to ngrams using the last words in the sentence
txtCorpus <- corpus(txt)
txtTokens <- tokens(txtCorpus
#, tolower = TRUE
, remove_numbers = TRUE
, remove_punct = TRUE
, remove_separators = TRUE
, remove_twitter = TRUE
, remove_url = TRUE
, verbose = TRUE)
head(txtTokens)
txtTokens <- tokens_select(txtTokens
, profanity
, selection = "remove"
, verbose = quanteda_options("verbose"))
txtTokens
wordCount <- ntoken(txtTokens)
# Prepare the text
# Need to match the ngram data.tables
txtQuad <- tokens_ngrams(txtTokens, n = 4, concatenator = " ")
txtTri <- tokens_ngrams(txtTokens, n = 3, concatenator = " ")
txtBi <- tokens_ngrams(txtTokens, n = 2, concatenator = " ")
txtQuad <- tail(txtQuad[[1]], 1)
txtTri <- tail(txtTri[[1]], 1)
txtBi <- tail(txtBi[[1]], 1)
txtUni <- tail(txtTokens[[1]], 1)
txtQuad
txtTri
txtBi
txtUni
quinPredict <- quingramDT[phrase == txtQuad][order(-prob)]
quadPredict <- quadgramDT[phrase == txtTri][order(-prob)]
triPredict <- trigramDT[phrase == txtBi][order(-prob)]
biPredict <- bigramDT[phrase == txtUni][order(-docfreq)]
quinPredict[, c("ngram", "phrase", "docfreq") := NULL]
quadPredict[, c("ngram", "phrase", "docfreq") := NULL]
triPredict[, c("ngram", "phrase", "docfreq") := NULL]
biPredict[, c("ngram", "phrase", "docfreq") := NULL]
quadPredict[, prob := 0.4 * prob]
triPredict[, prob := 0.4 * 0.4 * prob]
biPredict[, prob := 0.4 * 0.4 * 0.4 * prob]
quinPredict
quadPredict
triPredict
biPredict
l = list(quinPredict, quadPredict, triPredict, biPredict)
predictWordTemp = as.data.table(rbindlist(l))
predictWord = predictWordTemp[, .(score = max(prob)), by = .(predict)][1:5]
predictWord
class(predictWord)
key(predictWord)
key(quinPredict)
key(quingramDT)
### Steps using functions
txtCorpus <- tokensFun(txt)
wordCount <- ntoken(txtTokens)[[1]]
if (wordCount > 3){
txtQuad <- preceedingWords(txtCorpus, 4)
}
if (wordCount >2){
txtTri <- preceedingWords(txtCorpus, 3)
}
txtBi <- preceedingWords(txtCorpus, 2)
txtUni <- preceedingWords(txtCorpus, 1)
if (wordCount > 3){
quinPredict <- ngramPredict(txtQuad, quingramDT)
}
if (wordCount > 2){
quadPredict <- ngramPredict(txtTri, quadgramDT)
}
triPredict <- ngramPredict(txtBi, trigramDT)
biPredict <- ngramPredict(txtUni, bigramDT)
predictWord <- predictWordFromNgrams(quinPredict, quadPredict, triPredict, biPredict)
l <- ngramGenerator(txt)
predictWord <- ngramsPredict(l, quingramDT, quadgramDT, trigramDT, bigramDT)
### Even more steps in functions
predictWord <- mainPrediction(txt, quingramDT, quadgramDT, trigramDT, bigramDT)
predictWord
predictWord[,predict]
##as it will appear in the shiny app
class(mainPrediction(txt, quingramDT, quadgramDT, trigramDT, bigramDT)[, .(predict)])
|
4196c333fcb5d018d0744e2c2468c2c73675ee9e | f9d4c59898b827e6548cd108c4ca2190fabadb8e | /man/as.list.tracks.Rd | 5883f03b89c32bf3c377ff216ebb585ee1346176 | [] | no_license | ingewortel/celltrackR | d272a6328c748cfe57a86b91f9e0b803d9d6cb4b | e8cc8b301c7026c3a5f68802fe2f0076fba83577 | refs/heads/master | 2022-07-21T07:09:07.687728 | 2022-07-18T12:44:51 | 2022-07-18T12:44:51 | 188,240,556 | 12 | 3 | null | 2021-04-12T18:47:01 | 2019-05-23T13:33:47 | R | UTF-8 | R | false | true | 623 | rd | as.list.tracks.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-structures.R
\name{as.list.tracks}
\alias{as.list.tracks}
\title{Convert from Tracks to List}
\usage{
\method{as.list}{tracks}(x, ...)
}
\arguments{
\item{x}{the \code{tracks} object to be coerced to a list.}
\item{...}{further arguments to be passed from or to other methods.}
}
\value{
A generic list of single tracks, where each track is a matrix with
\code{t/delta.t} rows and 4 columns. This looks a lot like a tracks object,
except that its class is not "tracks" anymore.
}
\description{
Coerces a \code{tracks} object to a list.
}
|
ecbdebf9d2c905164a59af3364100fd4de5b2d0e | 9c56ebf6d04be0f0666675e11b0856431c0bd375 | /man/patch_tag.Rd | f41d435585a55d37b6c530f881323b58e94bbad0 | [] | no_license | AndyZHGai/ggpatch | b307b1e715201258793bfd9b2469a60b70b5616b | edb488242d35978a873d0b2d498cacfcc642c8cb | refs/heads/main | 2023-08-22T04:58:53.935853 | 2021-10-31T01:07:48 | 2021-10-31T01:07:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 484 | rd | patch_tag.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/patch.R
\name{patch_tag}
\alias{patch_tag}
\title{Format the tag test of Figures, using with patchwork usually}
\usage{
patch_tag(vjust = -1)
}
\arguments{
\item{vjust}{adjust the y_position of the tags}
}
\value{
}
\description{
Format the tag test of Figures, using with patchwork usually
}
\examples{
library(ggplot2)
ggplot(mtcars) + geom_point(aes(mpg, disp)) + patch_tag()
}
\author{
Zhonghui Gai
}
|
f803f89b6737e36c8fda4010251d66a8dfa11cd8 | 8186662824b6f675f92358b1e37dbb3d0ca3636d | /src/plot_code/make_figures.r | aca6b6cca281ada1bdc579adb4d624718fb0bdaf | [
"MIT"
] | permissive | davidanthoff/paper-2017-sccprioritarianism | 812e43fa055e15e8fc49e3bea7f70839cd22db95 | a998d35b79a27d4a08661000ae47c9ccf64a80ff | refs/heads/master | 2021-01-20T01:59:40.058575 | 2017-03-23T19:06:34 | 2017-03-23T19:06:34 | 82,596,759 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,011 | r | make_figures.r | #################################################
#
# make_figures.r 16 FEB 2017
#
# Generates figures 2-5 in "Priority for the
# worse off and the Social Cost of Carbon".
#
#################################################
# Directories
scriptdir <- "."
datadir <- "../../results"
plotdir <- "../../plots"
# Libraries
library(RColorBrewer)
library(lattice)
library(fields)
source(paste(scriptdir, "/put_fig_letter.r", sep=""))
source(paste(scriptdir, "/oat_plot_functions.r", sep=""))
source(paste(scriptdir, "/contour_plot_functions.r", sep=""))
# Load the data
infile <- paste(datadir, "/output-scc.csv", sep="")
my.data <- read.csv(infile, header=T)
# List of the normalization regions of interest
norm.regions <- c("Africa", "US", "Global", "World-Fair")
# Generate the figures
source(paste(scriptdir, "/make_figure_2.r", sep=""))
source(paste(scriptdir, "/make_figure_3.r", sep=""))
source(paste(scriptdir, "/make_figure_4.r", sep=""))
source(paste(scriptdir, "/make_figure_5.r", sep=""))
# Done! |
a8f3fb1afddcfa3b169876bd5697680b6b06849c | 14c80ea0be9edea6bda1114a5bcf55cec002309c | /Scripts/Matching/Helper/GenerateMatches.R | 801934211054effdbf112dd1396a2d008e6943b2 | [] | no_license | fahmidah/bridge.collapses | 3c0b7bae9a854096d2848c4e690ac746480a5ca3 | 3710a254a67575f77e96a4e447fd50eebe14c187 | refs/heads/master | 2022-02-26T17:24:53.110648 | 2019-10-28T14:35:07 | 2019-10-28T14:35:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,833 | r | GenerateMatches.R | # Note: relies on MatchTarget source data existing in global environment
GenerateMatches <- function(MatchEntry, # 1-row data.frame for which to find matches
MatchType, # type of match to be performed (bin, stream, road, route, gage, dam)
MatchSource = "fail", # source of MatchEntry data
MatchTarget = "nbi", # source of MatchTarget data (potential matches)
maxStringDist= 3, # max string distance passed to PerformMatch
SAVE = FALSE, # save PossibleMatchRows to file if TRUE
OutPath = getwd(),# path to save output files of potential matches
LoadFromFile = FALSE, # loads potential matches from existing file if TRUE
LoadType = "r", # will look for ID#-"LoadType", so e.g., this will find "ID#-route" and "ID#-road"
LoadPath = getwd(),# path to loading potential matches from existing file
capCandPct = 0.5, # pass to PerformMatch, if nPossMatch > capPossPct*nMatchRows, return "-"
capCandN = 200, # pass to PerformMatch, if nPossMatch > capN, return "-"
VERBOSE = FALSE){ # print progress to screen
require(stringdist)
# check input data ----------------
if(class(MatchEntry)!="data.frame") MatchEntry <- as.data.frame(MatchEntry)
if(nrow(MatchEntry)!=1){
warning("Only one entry may be matched at a time.")
stop()
}
MatchTypes <- c("bin", "stream", "road", "route", "gage", "dam")
if(all(!(grepl(MatchType,MatchTypes)))){
warning("Match type not supported.")
stop()
}
MatchSources <- c("fail", "nbi", "gage", "dam")
if(all(!(grepl(MatchSource,MatchSources)))){
warning("Match source not supported.")
stop()
}
MatchTargets <- c("fail", "nbi", "gage", "dam")
if(all(!(grepl(MatchTarget,MatchTargets)))){
warning("Match target not supported.")
stop()
}
# setup matching particulars depending on data source and match type -------
MatchColOrigName <- c(bin = "BIN",
stream = "FEAT_UND",
road = "LOCATION",
route = "LOCATION",
gage = "FEAT_UND",
dam = "FEAT_UND",
nbiGage = "STANAME")[MatchType]
MatchIDENT <- c(fail = "ID",
gage = "STAID",
dam = "ID",
nbi = "OBJECTID_new")[MatchSource]
MatchEntry$IDENT <- MatchEntry[1,MatchIDENT[MatchSource]]
CountyCols <- list(fail = c("FIPS_1","FIPS_2", "FIPS_FROM_CITY_1", "FIPS_FROM_CITY_2"),#, "FIPS_FROM_CITY_3"),
gage = NA,
dam = NA,
nbi = "COUNTY_CODE_003")[[MatchSource]]
CountyCols <- CountyCols[MatchEntry[,CountyCols]!="" & !is.na(MatchEntry[,CountyCols]) & !is.null(MatchEntry[,CountyCols])]
if (VERBOSE) print(paste0("ID of MatchEntry is: ",MatchEntry$IDENT, ", and MatchType is: ", MatchType))
# set up match targets dataframe ---------
MatchTargetData <- c(fail = "df.Fail",
gage = "df.USgages",
dam = "df.GRanD",
nbi = "df.NBI")[MatchTarget]
if(!(MatchTargetData %in% ls(globalenv()))){
warning(paste0('Data frame of match target,',MatchTargetData,', not present in environment'))
stop()
}
assign("MatchTargetData",get(MatchTargetData))
PossibleMatches <- ""
# load possible match rows from file if specified --------
if(LoadFromFile){
if(VERBOSE) print(" Loading from file")
pattern <- paste0("IDENT",MatchEntry$IDENT,"-",LoadType)
matchFile <- list.files(path=LoadPath, pattern = pattern)[1]
load(file.path(LoadPath,matchFile)) # loads PossibleMatchRows
if(!(PossibleMatchRows %in% ls())){
warning('Unable to find file from which to load.')
stop()
}
SubsetPossibleMatches <- PossibleMatchRows[!grepl("IDENT",PossibleMatchRows) & !grepl("-",PossibleMatches)]
if(length(SubsetPossibleMatches==0)) {
PossibleMatchRows <- paste0("IDENT",MatchEntry$IDENT)
}
else{ #PossibleMatchRows remains as-is
MatchTargetData <- MatchTargetData[gsub("\\<[[:alnum:]]{2}[.]","",SubsetPossibleMatches),]
if(VERBOSE) print(" Successfully used subset from file.")
}
}
else PossibleMatchRows <- paste0("IDENT",MatchEntry$IDENT)
# check that data is present before proceeding -------------
if(is.na(MatchEntry[1,MatchColOrigName]) | MatchEntry[1,MatchColOrigName] == "" | is.null(MatchEntry[1,MatchColOrigName])){
if(VERBOSE) print(" No data in original field")
if(!LoadFromFile){
PossibleMatchRows <- c(PossibleMatchRows, "-")
}
if(SAVE) save(PossibleMatchRows, file=file.path(OutPath,paste0(MatchTarget,"-","IDENT",MatchEntry$ID,"-",MatchType,".RData")))
if (VERBOSE) print("*****")
return(PossibleMatchRows)
}
# limit possible match rows to state-only unless gage or dam (should not affect load from file)---------
TargetStates <- ifelse(MatchType %in% c("bin","road","route","stream"),
MatchEntry$STFIPS,
c(MatchEntry$STFIPS,unlist(ls.Adj.STFIPS[as.character(MatchEntry$STFIPS)])))
MatchTargetData <- MatchTargetData[MatchTargetData$STFIPS %in% TargetStates,]
PossibleMatches <- ""
# start with county matches (will skip if no counties present)
for (j in CountyCols){
if(VERBOSE) print(paste("Checking county with FIPS", MatchEntry[1,j]))
MatchTargetsCounty <- MatchTargetData[MatchTargetData$COUNTY_CODE_003 == MatchEntry[,j],]
if(nrow(MatchTargetsCounty)==0){
if(VERBOSE) print(" No bridges in county")
next
}
PossibleMatchRows<- c(PossibleMatchRows, paste0("IDENT", MatchEntry$IDENT,"-FIPS",MatchEntry[,j]))
PossibleMatches <- PerformMatch(MatchEntry, MatchTargetsCounty, MatchType,
maxStringDist = maxStringDist, capCandPct = capCandPct, capCandN = capCandN,
VERBOSE = VERBOSE)
if(grepl("-",PossibleMatches[1]) & VERBOSE) print(" No county matches")
# record matches
PossibleMatchRows <- c(PossibleMatchRows, PossibleMatches)
}
# if no matches from county (or no county), try state(s)
if (all(grepl("IDENT",PossibleMatchRows) | grepl("^-",PossibleMatchRows))){
if(PossibleMatches[1]=="" | grepl("-",PossibleMatches[1])){
if(VERBOSE) print(" No county-matches, checking state")
PossibleMatchRows<- c(PossibleMatchRows, paste0("IDENT", MatchEntry$IDENT,"-STFIPS"))
PossibleMatches <- PerformMatch(MatchEntry, MatchTargetData, MatchType,
maxStringDist = maxStringDist, capCandPct = capCandPct, capCandN = capCandN,
VERBOSE = VERBOSE)
}
if((PossibleMatches[1]=="" | grepl("-",PossibleMatches[1])) & VERBOSE) print(paste(" No matches in state", MatchEntry[1,"STATE_CODE"]))
# record matches
PossibleMatchRows <- c(PossibleMatchRows, PossibleMatches)
}
if (VERBOSE) print("*****")
if (all(grepl("IDENT",PossibleMatchRows) | grepl("^-",PossibleMatchRows)) & LoadFromFile == TRUE){
# if had matches, but then the second match run was unsuccessful, return the original matches
if (length(SubsetPossibleMatchRows>=1)) load(file.path(LoadPath,matchFile))
}
if(SAVE) save(PossibleMatchRows, file=file.path(OutPath,paste0(MatchTarget,"-","IDENT",MatchEntry$ID,"-",MatchType,".RData")))
return(PossibleMatchRows)
}
|
30066df9506afa834470ca73464d54921747a993 | 1d9d3356e820606e5dd5a465dd80428cc6388f68 | /script/mycophyloplot.R | a4c29ee85c11e380cf7aca1a08e3aa8ff7265f10 | [
"MIT"
] | permissive | hettling/phylo-primates-sim | 1506d821a54ac3acdd1c89c4f8fb3983b51ca98f | 62ddbc8ee8e9eb549989cddfe4b0c9f96c49c016 | refs/heads/master | 2021-01-10T10:43:12.398387 | 2016-04-21T13:11:16 | 2016-04-21T13:11:16 | 45,845,003 | 0 | 1 | null | 2016-04-15T11:35:12 | 2015-11-09T14:57:40 | R | UTF-8 | R | false | false | 9,860 | r | mycophyloplot.R | ## This script defines a modified version of ape's 'cophyloplot', named
## 'mycolphyloplot'. The modified function can take an additional set of taxa,
## branches and taxon names for these taxa are then colored in red.
mycophyloplot <- function (x, y, assoc = NULL, use.edge.length = FALSE, space = 0,
length.line = 1, gap = 2, type = "phylogram", rotate = FALSE,
col = par("fg"), lwd = par("lwd"), lty = par("lty"), show.tip.label = TRUE,
font = 3, ...)
{
if (is.null(assoc)) {
assoc <- matrix(ncol = 2)
print("No association matrix specified. Links will be omitted.")
}
if (rotate == TRUE) {
cat("\n Click on a node to rotate (right click to exit)\n\n")
repeat {
res <- myplotCophylo2(x, y, assoc = assoc, use.edge.length = use.edge.length,
space = space, length.line = length.line, gap = gap,
type = type, return = TRUE, col = col, lwd = lwd,
lty = lty, show.tip.label = show.tip.label, font = font, exemplars=exemplars)
click <- identify(res$c[, 1], res$c[, 2], n = 1)
if (click < length(res$a[, 1]) + 1) {
if (click > res$N.tip.x)
x <- rotate(x, click)
}
else if (click < length(res$c[, 1]) + 1) {
if (click > length(res$a[, 1]) + res$N.tip.y)
y <- rotate(y, click - length(res$a[, 1]))
}
}
on.exit(cat("done\n"))
}
else myplotCophylo2(x, y, assoc = assoc, use.edge.length = use.edge.length,
space = space, length.line = length.line, gap = gap,
type = type, return = FALSE, col = col, lwd = lwd, lty = lty,
show.tip.label = show.tip.label, font = font, exemplars=exemplars)
}
get_terminals <- function(tree, node) {
queue = c(node)
terminals = vector();
while (length(queue) > 0) {
## pop element
current <- queue[1]
queue <- queue[-which(queue==current)]
if ( current <= length(tree$tip.label) ) {
terminals = c(terminals, tree$tip.label[current])
} else {
queue = c(queue, get_children(tree, current))
}
}
return (terminals)
}
get_children <- function(tree, node) {
return (tree$edge[tree$edge[,1]==node,2])
}
myplotCophylo2 <- function (x, y, assoc = assoc, use.edge.length = use.edge.length,
space = space, length.line = length.line, gap = gap, type = type,
return = return, col = col, lwd = lwd, lty = lty, show.tip.label = show.tip.label,
font = font, exemplars=vector(), ...)
{
res <- list()
left <- max(nchar(x$tip.label, type = "width")) + length.line
right <- max(nchar(y$tip.label, type = "width")) + length.line
space.min <- left + right + gap * 2
if ((space <= 0) || (space < space.min))
space <- space.min
N.tip.x <- Ntip(x)
N.tip.y <- Ntip(y)
res$N.tip.x <- N.tip.x
res$N.tip.y <- N.tip.y
a <- plotPhyloCoor(x, use.edge.length = use.edge.length,
type = type)
res$a <- a
b <- plotPhyloCoor(y, use.edge.length = use.edge.length,
direction = "leftwards", type = type)
a[, 2] <- a[, 2] - min(a[, 2])
b[, 2] <- b[, 2] - min(b[, 2])
res$b <- b
b2 <- b
b2[, 1] <- b[1:nrow(b), 1] * (max(a[, 1])/max(b[, 1])) +
space + max(a[, 1])
b2[, 2] <- b[1:nrow(b), 2] * (max(a[, 2])/max(b[, 2]))
res$b2 <- b2
c <- matrix(ncol = 2, nrow = nrow(a) + nrow(b))
c[1:nrow(a), ] <- a[1:nrow(a), ]
c[nrow(a) + 1:nrow(b), 1] <- b2[, 1]
c[nrow(a) + 1:nrow(b), 2] <- b2[, 2]
res$c <- c
plot(c, type = "n", xlim = NULL, ylim = NULL, log = "", main = NULL,
sub = NULL, xlab = NULL, ylab = NULL, ann = FALSE, axes = FALSE,
frame.plot = FALSE)
if (type == "cladogram") {
for (i in 1:(nrow(a) - 1)){
segments(a[x$edge[i, 1], 1],
a[x$edge[i, 1], 2], a[x$edge[i, 2], 1], a[x$edge[i,
2], 2] , lwd=1)
}
for (i in 1:(nrow(b) - 1)) {
mycolor="black"
child = y$edge[i, 2]
terminals = get_terminals(y, child)
if (any (terminals %in% exemplars)) {
mycolor="red"
}
segments(b2[y$edge[i, 1],
1], b2[y$edge[i, 1], 2], b2[y$edge[i, 2], 1], b2[y$edge[i,
2], 2], col=mycolor, lwd=lwd)
}
}
if (type == "phylogram") {
for (i in (N.tip.x + 1):nrow(a)) {
l <- length(x$edge[x$edge[, 1] == i, ][, 1])
for (j in 1:l) {
segments(a[x$edge[x$edge[, 1] == i, ][1, 1],
1], a[x$edge[x$edge[, 1] == i, 2], 2][1], a[x$edge[x$edge[,
1] == i, ][1, 1], 1], a[x$edge[x$edge[, 1] ==
i, 2], 2][j], lwd=lwd)
segments(a[x$edge[x$edge[, 1] == i, ][1, 1],
1], a[x$edge[x$edge[, 1] == i, 2], 2][j], a[x$edge[x$edge[,
1] == i, 2], 1][j], a[x$edge[x$edge[, 1] ==
i, 2], 2][j], lwd=lwd)
}
}
for (i in (N.tip.y + 1):nrow(b)) {
## l is the number of children
l <- length(y$edge[y$edge[, 1] == i, ][, 1])
mycolor="black"
terminals = get_terminals(y, i)
parent <- unique(y$edge[y$edge[, 1] == i, ][, 1])
children <- y$edge[y$edge[, 1] == i, ][, 2]
for (j in 1:l) {
child <- children[j]
current.terminals <- get_terminals(y, child)
if (any (current.terminals %in% exemplars)) {
mycolor="red"
}
else {
mycolor="black"
}
## vertical lines
x0 <- b2[y$edge[y$edge[, 1] == i, ][1, 1],1]
y0 <- b2[y$edge[y$edge[, 1] == i, 2], 2][1]
x1 <- b2[y$edge[y$edge[, 1] == i, ][1, 1], 1]
y1 <- b2[y$edge[y$edge[,1] == i, 2], 2][j]
y.from <- b2[y$edge[y$edge[,1] == i, 2], 2][j]
y.to <- mean(b2[y$edge[y$edge[,1] == i, 2], 2])
segments(x0,y.from,x1,y.to, col=mycolor, lwd=lwd)
## horizontal lines
xx0 <- b2[y$edge[y$edge[, 1] == i, ][1, 1],1]
yy0 <- b2[y$edge[y$edge[, 1] == i, 2], 2][j]
xx1 <- b2[y$edge[y$edge[, 1] == i, 2], 1][j]
yy1 <- b2[y$edge[y$edge[,1] == i, 2], 2][j]
segments(xx0, yy0, xx1, yy1, col=mycolor, lwd=lwd)
}
}
}
if (show.tip.label) {
text(a[1:N.tip.x, ], cex = 0, font = font, pos = 4, labels = x$tip.label)
text(b2[1:N.tip.y, ], cex = 0, font = font, pos = 2,
labels = y$tip.label, col=ifelse(y$tip.label %in% exemplars, "red", "black"))
}
lsa <- 1:N.tip.x
lsb <- 1:N.tip.y
decx <- array(nrow(assoc))
decy <- array(nrow(assoc))
if (length(col) == 1)
colors <- c(rep(col, nrow(assoc)))
else if (length(col) >= nrow(assoc))
colors <- col
else colors <- c(rep(col, as.integer(nrow(assoc)/length(col)) +
1))
if (length(lwd) == 1)
lwidths <- c(rep(lwd, nrow(assoc)))
else if (length(lwd) >= nrow(assoc))
lwidths <- lwd
else lwidths <- c(rep(lwd, as.integer(nrow(assoc)/length(lwd)) +
1))
if (length(lty) == 1)
ltype <- c(rep(lty, nrow(assoc)))
else if (length(lty) >= nrow(assoc))
ltype <- lty
else ltype <- c(rep(lty, as.integer(nrow(assoc)/length(lty)) +
1))
for (i in 1:nrow(assoc)) {
if (show.tip.label) {
decx[i] <- strwidth(x$tip.label[lsa[x$tip.label ==
assoc[i, 1]]])
decy[i] <- strwidth(y$tip.label[lsb[y$tip.label ==
assoc[i, 2]]])
}
else {
decx[i] <- decy[i] <- 0
}
if (length.line) {
segments(a[lsa[x$tip.label == assoc[i, 1]], 1] +
decx[i] + gap, a[lsa[x$tip.label == assoc[i,
1]], 2], a[lsa[x$tip.label == assoc[i, 1]], 1] +
gap + left, a[lsa[x$tip.label == assoc[i, 1]],
2], col = colors[i], lwd = lwidths[i], lty = ltype[i])
segments(b2[lsb[y$tip.label == assoc[i, 2]], 1] -
(decy[i] + gap), b2[lsb[y$tip.label == assoc[i,
2]], 2], b2[lsb[y$tip.label == assoc[i, 2]],
1] - (gap + right), b2[lsb[y$tip.label == assoc[i,
2]], 2], col = colors[i], lwd = lwidths[i], lty = ltype[i])
}
segments(a[lsa[x$tip.label == assoc[i, 1]], 1] + gap +
left, a[lsa[x$tip.label == assoc[i, 1]], 2], b2[lsb[y$tip.label ==
assoc[i, 2]], 1] - (gap + right), b2[lsb[y$tip.label ==
assoc[i, 2]], 2], col = colors[i], lwd = lwidths[i],
lty = ltype[i])
}
if (return == TRUE)
return(res)
}
|
765b06d5c4fc1bb8ffe267d5a22a072c64700e73 | 2655fcbde895737e36a1f2283e0cd51765e98168 | /Taxonomy/R/remove_attributes.R | 2c12b2a6cfb14754aec23d14c36303179a3d719b | [] | no_license | DDTD-IS/DDTD-IS | 5b7128df844289fa804bc9a3750c73898001bfb4 | eb21f343a7224793af823cd580f206d2fb48b604 | refs/heads/master | 2020-09-21T19:21:24.316497 | 2019-11-29T17:38:21 | 2019-11-29T17:38:21 | 224,897,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 818 | r | remove_attributes.R | #' @title Simple function that removes all attributes associated with a feature
#' @description This function simply removes any attributes from the features of a data frame (comments, class, ...)
#' @param dataset \code{data.frame}\cr
#' The dataframe of which the attributes shall be removed
#' @return \code{dataset}\cr
#' A dataset without attributes
#' @family Helper
#' @export
remove_attributes <-
function(dataset) {
for (i in colnames(dataset)) {
attr(dataset[, deparse(as.name(i))], "comment") <- NULL
attr(dataset[, deparse(as.name(i))], "T") <- NULL
attr(dataset[, deparse(as.name(i))], "F") <- NULL
if (length(class(dataset[, i])) >= 2) {
attr(dataset[, deparse(as.name(i))], "class") <-
class(dataset[, i])[2]
}
}
return(dataset)
}
|
cafaf1345f04c6afeeec9d8688c741e3aad76f59 | 10c97b033b7d93d500a4dd563234eef128dc43ab | /tests/testthat/www.fleaflicker.com/api/FetchLeagueTransactions-925711.R | ca34eef56936ad676891668984105c7b983be2de | [
"MIT"
] | permissive | tonyelhabr/ffscrapr | f38e7c87bb65ddbf6e1c9736c16e56944760af46 | 4e0944da56d8890c441c4abe9c25bc2477a1e388 | refs/heads/main | 2023-03-10T08:48:01.840281 | 2020-12-16T06:19:07 | 2020-12-16T06:19:07 | 328,791,006 | 0 | 0 | NOASSERTION | 2021-01-11T23:59:24 | 2021-01-11T21:03:44 | null | UTF-8 | R | false | false | 99,936 | r | FetchLeagueTransactions-925711.R | structure(list(
url = "https://www.fleaflicker.com/api/FetchLeagueTransactions?sport=NFL&league_id=206154&team_id=1373475&result_offset=330",
status_code = 200L, headers = structure(list(
date = "Tue, 24 Nov 2020 01:19:58 GMT",
`content-type` = "application/json;charset=utf-8", vary = "accept-encoding",
`content-encoding` = "gzip"
), class = c(
"insensitive",
"list"
)), all_headers = list(list(
status = 200L, version = "HTTP/2",
headers = structure(list(
date = "Tue, 24 Nov 2020 01:19:58 GMT",
`content-type` = "application/json;charset=utf-8",
vary = "accept-encoding", `content-encoding` = "gzip"
), class = c(
"insensitive",
"list"
))
)), cookies = structure(list(
domain = logical(0),
flag = logical(0), path = logical(0), secure = logical(0),
expiration = structure(numeric(0), class = c(
"POSIXct",
"POSIXt"
)), name = logical(0), value = logical(0)
), row.names = integer(0), class = "data.frame"),
content = charToRaw("{\"items\":[{\"timeEpochMilli\":\"1539165600000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":8475,\"nameFull\":\"Nick Perry\",\"nameShort\":\"N. Perry\",\"proTeamAbbreviation\":\"FA\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/8475.png\",\"nameFirst\":\"Nick\",\"nameLast\":\"Perry\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"LB\",\"LB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538827200000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13010,\"nameFull\":\"Montae Nicholson\",\"nameShort\":\"M. Nicholson\",\"proTeamAbbreviation\":\"FA\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13010.png\",\"nameFirst\":\"Montae\",\"nameLast\":\"Nicholson\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"S\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538827200000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":3501,\"nameFull\":\"Darren Sproles\",\"nameShort\":\"D. Sproles\",\"proTeamAbbreviation\":\"FA\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/3501.png\",\"nameFirst\":\"Darren\",\"nameLast\":\"Sproles\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"RB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538560800000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":8596,\"nameFull\":\"Robert Turbin\",\"nameShort\":\"R. Turbin\",\"proTeamAbbreviation\":\"FA\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/8596.png\",\"nameFirst\":\"Robert\",\"nameLast\":\"Turbin\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"RB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538560800000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":9419,\"nameFull\":\"Kenjon Barner\",\"nameShort\":\"K. Barner\",\"proTeamAbbreviation\":\"TB\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/9419.png\",\"nflByeWeek\":13,\"news\":[{\"timeEpochMilli\":\"1606175610000\",\"contents\":\"Barner is active for Monday's game versus the Rams.\",\"analysis\":\"Barner was elevated from the practice squad for Monday's game. His presence will force Ke'Shawn Vaughn to serve as a healthy scratch for a third straight game. Barner is expected to add depth as the No. 4 running back in the rotation, and he could chip in as a kick or punt returner as well.\",\"title\":\"Suiting up Monday\"}],\"nameFirst\":\"Kenjon\",\"nameLast\":\"Barner\",\"proTeam\":{\"abbreviation\":\"TB\",\"location\":\"Tampa Bay\",\"name\":\"Buccaneers\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6311,\"away\":{\"abbreviation\":\"LAR\",\"location\":\"Los Angeles\",\"name\":\"Rams\"},\"home\":{\"abbreviation\":\"TB\",\"location\":\"Tampa Bay\",\"name\":\"Buccaneers\"},\"startTimeEpochMilli\":\"1606180500000\",\"status\":\"IN_PROGRESS\",\"segment\":1,\"segmentSecondsRemaining\":790,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"stateFootball\":{\"down\":2,\"distance\":8,\"fieldLine\":40,\"fieldLineAbsolute\":40,\"description\":\"2nd & 8 at TB 40\"}},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"statsProjected\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":0.2,\"formatted\":\"0.2\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"hasPossession\":true}],\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":0.2,\"formatted\":\"0.2\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":887,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":152,\"formatted\":\"152\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.65,\"formatted\":\"1.65\"},\"duration\":1},{\"value\":{\"value\":1.65,\"formatted\":\"1.65\"},\"duration\":3},{\"value\":{\"value\":1.65,\"formatted\":\"1.65\"},\"duration\":5}],\"seasonTotal\":{\"value\":1.65,\"formatted\":\"1.65\"},\"seasonAverage\":{\"value\":1.65,\"formatted\":\"1.65\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538325170000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":7883,\"nameFull\":\"Ron Parker\",\"nameShort\":\"R. Parker\",\"proTeamAbbreviation\":\"FA\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/7883.png\",\"nameFirst\":\"Ron\",\"nameLast\":\"Parker\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"S\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538325170000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11317,\"nameFull\":\"Neal Sterling\",\"nameShort\":\"N. Sterling\",\"proTeamAbbreviation\":\"FA\",\"position\":\"TE\",\"nameFirst\":\"Neal\",\"nameLast\":\"Sterling\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"TE\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538161750000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":13859,\"nameFull\":\"Troy Fumagalli\",\"nameShort\":\"T. Fumagalli\",\"proTeamAbbreviation\":\"DEN\",\"position\":\"TE\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13859.png\",\"news\":[{\"timeEpochMilli\":\"1606015946000\",\"contents\":\"Fumagalli was promoted to Denver's active roster from the practice squad Saturday, Nick Kosmider of The Athletic reports.\",\"analysis\":\"Fumagalli will join the active roster as an extra player for the second straight contest. The 25-year-old had two catches for 12 yards on 12 offensive snaps last week and could play a similar role Sunday with Noah Fant (ribs) still less than 100 percent.\",\"title\":\"Elevated for Week 11\"}],\"nameFirst\":\"Troy\",\"nameLast\":\"Fumagalli\",\"proTeam\":{\"abbreviation\":\"DEN\",\"location\":\"Denver\",\"name\":\"Broncos\"},\"positionEligibility\":[\"TE\"]},\"requestedGames\":[{\"game\":{\"id\":6413,\"away\":{\"abbreviation\":\"MIA\",\"location\":\"Miami\",\"name\":\"Dolphins\"},\"home\":{\"abbreviation\":\"DEN\",\"location\":\"Denver\",\"name\":\"Broncos\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":13,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":12.08,\"formatted\":\"12.1\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.05,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":12.08,\"formatted\":\"12.1\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.05,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":827,\"positions\":[{\"position\":{\"label\":\"TE\",\"group\":\"START\",\"eligibility\":[\"TE\"],\"colors\":[\"DRAFT_BOARD_YELLOW\"]},\"ordinal\":94,\"formatted\":\"94\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":3.7,\"formatted\":\"3.7\"},\"duration\":1},{\"value\":{\"value\":3.7,\"formatted\":\"3.7\"},\"duration\":3},{\"value\":{\"value\":3.7,\"formatted\":\"3.7\"},\"duration\":5}],\"seasonTotal\":{\"value\":3.7,\"formatted\":\"3.7\"},\"seasonAverage\":{\"value\":3.7,\"formatted\":\"3.7\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538161721000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":12013,\"nameFull\":\"Vernon Hargreaves\",\"nameShort\":\"V. Hargreaves\",\"proTeamAbbreviation\":\"HOU\",\"position\":\"CB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12013.png\",\"nflByeWeek\":8,\"nameFirst\":\"Vernon\",\"nameLast\":\"Hargreaves\",\"proTeam\":{\"abbreviation\":\"HOU\",\"location\":\"Houston\",\"name\":\"Texans\"},\"positionEligibility\":[\"CB\"]},\"requestedGames\":[{\"game\":{\"id\":6300,\"away\":{\"abbreviation\":\"NE\",\"location\":\"New England\",\"name\":\"Patriots\"},\"home\":{\"abbreviation\":\"HOU\",\"location\":\"Houston\",\"name\":\"Texans\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":20,\"homeScore\":27,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":3.0,\"formatted\":\"3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.71,\"formatted\":\"0.7\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.94,\"formatted\":\"2.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.71,\"formatted\":\"0.7\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.94,\"formatted\":\"2.9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":243,\"positions\":[{\"position\":{\"label\":\"CB\",\"group\":\"START\",\"eligibility\":[\"CB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":44,\"formatted\":\"44\",\"rating\":\"RATING_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":11.0,\"formatted\":\"11\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":9.67,\"formatted\":\"9.67\"},\"duration\":3},{\"value\":{\"value\":9.6,\"formatted\":\"9.6\"},\"duration\":5}],\"seasonTotal\":{\"value\":78.0,\"formatted\":\"78\"},\"seasonAverage\":{\"value\":8.666667,\"formatted\":\"8.67\"},\"seasonsStandartDeviation\":{\"value\":3.3665009,\"formatted\":\"3.37\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1538000064000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":9419,\"nameFull\":\"Kenjon Barner\",\"nameShort\":\"K. Barner\",\"proTeamAbbreviation\":\"TB\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/9419.png\",\"nflByeWeek\":13,\"news\":[{\"timeEpochMilli\":\"1606175610000\",\"contents\":\"Barner is active for Monday's game versus the Rams.\",\"analysis\":\"Barner was elevated from the practice squad for Monday's game. His presence will force Ke'Shawn Vaughn to serve as a healthy scratch for a third straight game. Barner is expected to add depth as the No. 4 running back in the rotation, and he could chip in as a kick or punt returner as well.\",\"title\":\"Suiting up Monday\"}],\"nameFirst\":\"Kenjon\",\"nameLast\":\"Barner\",\"proTeam\":{\"abbreviation\":\"TB\",\"location\":\"Tampa Bay\",\"name\":\"Buccaneers\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6311,\"away\":{\"abbreviation\":\"LAR\",\"location\":\"Los Angeles\",\"name\":\"Rams\"},\"home\":{\"abbreviation\":\"TB\",\"location\":\"Tampa Bay\",\"name\":\"Buccaneers\"},\"startTimeEpochMilli\":\"1606180500000\",\"status\":\"IN_PROGRESS\",\"segment\":1,\"segmentSecondsRemaining\":790,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"stateFootball\":{\"down\":2,\"distance\":8,\"fieldLine\":40,\"fieldLineAbsolute\":40,\"description\":\"2nd & 8 at TB 40\"}},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"statsProjected\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":0.2,\"formatted\":\"0.2\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"hasPossession\":true}],\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":0.2,\"formatted\":\"0.2\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":887,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":152,\"formatted\":\"152\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.65,\"formatted\":\"1.65\"},\"duration\":1},{\"value\":{\"value\":1.65,\"formatted\":\"1.65\"},\"duration\":3},{\"value\":{\"value\":1.65,\"formatted\":\"1.65\"},\"duration\":5}],\"seasonTotal\":{\"value\":1.65,\"formatted\":\"1.65\"},\"seasonAverage\":{\"value\":1.65,\"formatted\":\"1.65\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1537956000000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13055,\"nameFull\":\"Xavier Woods\",\"nameShort\":\"X. Woods\",\"proTeamAbbreviation\":\"DAL\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13055.png\",\"nflByeWeek\":10,\"nameFirst\":\"Xavier\",\"nameLast\":\"Woods\",\"proTeam\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"game\":{\"id\":6309,\"away\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"home\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.51,\"formatted\":\"1.5\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.43,\"formatted\":\"1.4\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.51,\"formatted\":\"1.5\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.43,\"formatted\":\"1.4\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":355,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":61,\"formatted\":\"61\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.0,\"formatted\":\"7\"},\"duration\":1},{\"value\":{\"value\":3.83,\"formatted\":\"3.83\"},\"duration\":3},{\"value\":{\"value\":4.7,\"formatted\":\"4.7\"},\"duration\":5}],\"seasonTotal\":{\"value\":57.0,\"formatted\":\"57\"},\"seasonAverage\":{\"value\":6.3333335,\"formatted\":\"6.33\"},\"seasonsStandartDeviation\":{\"value\":2.95334,\"formatted\":\"2.95\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1537932608000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":12186,\"nameFull\":\"Kavon Frazier\",\"nameShort\":\"K. Frazier\",\"proTeamAbbreviation\":\"MIA\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12186.png\",\"nflByeWeek\":7,\"nameFirst\":\"Kavon\",\"nameLast\":\"Frazier\",\"proTeam\":{\"abbreviation\":\"MIA\",\"location\":\"Miami\",\"name\":\"Dolphins\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"game\":{\"id\":6413,\"away\":{\"abbreviation\":\"MIA\",\"location\":\"Miami\",\"name\":\"Dolphins\"},\"home\":{\"abbreviation\":\"DEN\",\"location\":\"Denver\",\"name\":\"Broncos\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":13,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":766,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":128,\"formatted\":\"128\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":2.17,\"formatted\":\"2.17\"},\"duration\":3},{\"value\":{\"value\":2.17,\"formatted\":\"2.17\"},\"duration\":5}],\"seasonTotal\":{\"value\":6.5,\"formatted\":\"6.5\"},\"seasonAverage\":{\"value\":2.1666667,\"formatted\":\"2.17\"},\"seasonsStandartDeviation\":{\"value\":0.942809,\"formatted\":\"0.94\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1537719179000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":13036,\"nameFull\":\"Damontae Kazee\",\"nameShort\":\"D. Kazee\",\"proTeamAbbreviation\":\"ATL\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13036.png\",\"nflByeWeek\":10,\"injury\":{\"typeAbbreviaition\":\"IR\",\"description\":\"Achilles\",\"severity\":\"OUT\",\"typeFull\":\"Injured Reserve\"},\"nameFirst\":\"Damontae\",\"nameLast\":\"Kazee\",\"proTeam\":{\"abbreviation\":\"ATL\",\"location\":\"Atlanta\",\"name\":\"Falcons\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"game\":{\"id\":6304,\"away\":{\"abbreviation\":\"ATL\",\"location\":\"Atlanta\",\"name\":\"Falcons\"},\"home\":{\"abbreviation\":\"NO\",\"location\":\"New Orleans\",\"name\":\"Saints\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":536,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":86,\"formatted\":\"86\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":5.5,\"formatted\":\"5.5\"},\"duration\":1},{\"value\":{\"value\":7.67,\"formatted\":\"7.67\"},\"duration\":3},{\"value\":{\"value\":7.25,\"formatted\":\"7.25\"},\"duration\":5}],\"seasonTotal\":{\"value\":29.0,\"formatted\":\"29\"},\"seasonAverage\":{\"value\":7.25,\"formatted\":\"7.25\"},\"seasonsStandartDeviation\":{\"value\":2.193741,\"formatted\":\"2.19\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1537719179000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":14009,\"nameFull\":\"Richie James\",\"nameShort\":\"R. James\",\"proTeamAbbreviation\":\"SF\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14009.png\",\"nflByeWeek\":11,\"nameFirst\":\"Richie\",\"nameLast\":\"James\",\"proTeam\":{\"abbreviation\":\"SF\",\"location\":\"San Francisco\",\"name\":\"49ers\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"isBye\":true,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373970,\"name\":\"Bamenda Herd\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373970_0_150x150.jpg\",\"initials\":\"BH\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":466,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":111,\"formatted\":\"111\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":4.1,\"formatted\":\"4.1\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":12.48,\"formatted\":\"12.48\"},\"duration\":3},{\"value\":{\"value\":9.88,\"formatted\":\"9.88\"},\"duration\":5}],\"seasonTotal\":{\"value\":39.5,\"formatted\":\"39.5\"},\"seasonAverage\":{\"value\":9.875,\"formatted\":\"9.88\"},\"seasonsStandartDeviation\":{\"value\":12.457051,\"formatted\":\"12.46\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1537678532000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":12186,\"nameFull\":\"Kavon Frazier\",\"nameShort\":\"K. Frazier\",\"proTeamAbbreviation\":\"MIA\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12186.png\",\"nflByeWeek\":7,\"nameFirst\":\"Kavon\",\"nameLast\":\"Frazier\",\"proTeam\":{\"abbreviation\":\"MIA\",\"location\":\"Miami\",\"name\":\"Dolphins\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"game\":{\"id\":6413,\"away\":{\"abbreviation\":\"MIA\",\"location\":\"Miami\",\"name\":\"Dolphins\"},\"home\":{\"abbreviation\":\"DEN\",\"location\":\"Denver\",\"name\":\"Broncos\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":13,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":766,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":128,\"formatted\":\"128\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":2.17,\"formatted\":\"2.17\"},\"duration\":3},{\"value\":{\"value\":2.17,\"formatted\":\"2.17\"},\"duration\":5}],\"seasonTotal\":{\"value\":6.5,\"formatted\":\"6.5\"},\"seasonAverage\":{\"value\":2.1666667,\"formatted\":\"2.17\"},\"seasonsStandartDeviation\":{\"value\":0.942809,\"formatted\":\"0.94\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1537678532000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":8532,\"nameFull\":\"Tavon Wilson\",\"nameShort\":\"T. Wilson\",\"proTeamAbbreviation\":\"IND\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/8532.png\",\"nflByeWeek\":7,\"nameFirst\":\"Tavon\",\"nameLast\":\"Wilson\",\"proTeam\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"game\":{\"id\":6305,\"away\":{\"abbreviation\":\"GB\",\"location\":\"Green Bay\",\"name\":\"Packers\"},\"home\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.49,\"formatted\":\"0.5\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.26,\"formatted\":\"1.3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.49,\"formatted\":\"0.5\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.26,\"formatted\":\"1.3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":571,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":91,\"formatted\":\"91\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":3,\"underPerforming\":true},{\"value\":{\"value\":4.8,\"formatted\":\"4.8\"},\"duration\":5}],\"seasonTotal\":{\"value\":24.0,\"formatted\":\"24\"},\"seasonAverage\":{\"value\":4.8,\"formatted\":\"4.8\"},\"seasonsStandartDeviation\":{\"value\":6.6,\"formatted\":\"6.6\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1537444800000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":12930,\"nameFull\":\"Curtis Samuel\",\"nameShort\":\"C. Samuel\",\"proTeamAbbreviation\":\"CAR\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12930.png\",\"nflByeWeek\":13,\"news\":[{\"timeEpochMilli\":\"1606082146000\",\"contents\":\"Samuel secured eight of 10 targets for 70 yards and a touchdown and rushed once for four yards in the Panthers' 20-0 win over the Lions on Sunday.\",\"analysis\":\"The versatile wideout finished with the team lead in receptions while checking in second in both receiving yards and targets to DJ Moore. Samuel hauled in spot starter P.J. Walker's first career NFL touchdown on a nicely thrown touch pass in the right corner of the end zone early in the third quarter. The productive performance was a welcome resurgence for Samuel, who'd slumped to 3-8-0 line in Week 10 versus the Buccaneers after producing a season-best nine receptions and 125 yards the contest prior against the Chiefs. He'll look to make it back-to-back fantasy-friendly outings in a Week 12 road matchup against the Vikings.\",\"title\":\"Tallies scoring grab in win\"},{\"timeEpochMilli\":\"1606086710000\",\"contents\":\"Carolina Panthers wide receiver Curtis Samuel ended Week 11 with eight receptions for 70 yards and a score on 10 targets along with one carry for four yards. Samuel once again had a good game after managing only eight receiving yards last week. Nonetheless, the WR is very inconsistent and he is the WR3 on the team, so big games cannot be expected each week. Still, his modest role in this offense along with the potential for a big game any given week puts Samuel in the WR3/flex tier going ...\",\"url\":\"https://www.rotoballer.com/player-news/curtis-samuel-has-70-yards/806850\",\"title\":\"Curtis Samuel Has 70 Yards\"}],\"nameFirst\":\"Curtis\",\"nameLast\":\"Samuel\",\"proTeam\":{\"abbreviation\":\"CAR\",\"location\":\"Carolina\",\"name\":\"Panthers\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6302,\"away\":{\"abbreviation\":\"DET\",\"location\":\"Detroit\",\"name\":\"Lions\"},\"home\":{\"abbreviation\":\"CAR\",\"location\":\"Carolina\",\"name\":\"Panthers\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":80.0,\"formatted\":\"8/10\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":70.0,\"formatted\":\"70\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"4/4\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":45.22,\"formatted\":\"45.2\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.34,\"formatted\":\"0.3\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"4/4\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":45.22,\"formatted\":\"45.2\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.34,\"formatted\":\"0.3\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":120,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":32,\"formatted\":\"32\",\"rating\":\"RATING_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":3.2,\"formatted\":\"3.2\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":16.3,\"formatted\":\"16.3\"},\"duration\":3},{\"value\":{\"value\":15.22,\"formatted\":\"15.22\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":105.79999,\"formatted\":\"105.8\"},\"seasonAverage\":{\"value\":11.755554,\"formatted\":\"11.76\"},\"seasonsStandartDeviation\":{\"value\":6.8700843,\"formatted\":\"6.87\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"bidAmount\":6}},{\"timeEpochMilli\":\"1537444800000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11344,\"nameFull\":\"Marcus Murphy\",\"nameShort\":\"M. Murphy\",\"proTeamAbbreviation\":\"FA\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11344.png\",\"nameFirst\":\"Marcus\",\"nameLast\":\"Murphy\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"RB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1536746400000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":11344,\"nameFull\":\"Marcus Murphy\",\"nameShort\":\"M. Murphy\",\"proTeamAbbreviation\":\"FA\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11344.png\",\"nameFirst\":\"Marcus\",\"nameLast\":\"Murphy\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"RB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"bidAmount\":4}},{\"timeEpochMilli\":\"1536746400000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":14185,\"nameFull\":\"Mike Boone\",\"nameShort\":\"M. Boone\",\"proTeamAbbreviation\":\"MIN\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14185.png\",\"nflByeWeek\":7,\"nameFirst\":\"Mike\",\"nameLast\":\"Boone\",\"proTeam\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6309,\"away\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"home\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"statsProjected\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":8.05,\"formatted\":\"8.1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":0.83,\"formatted\":\"0.8\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.06,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":8.05,\"formatted\":\"8.1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":0.83,\"formatted\":\"0.8\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.06,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":669,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":106,\"formatted\":\"106\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":0.5,\"formatted\":\"0.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":0.78,\"formatted\":\"0.78\"},\"duration\":3,\"underPerforming\":true},{\"value\":{\"value\":2.05,\"formatted\":\"2.05\"},\"duration\":5}],\"seasonTotal\":{\"value\":13.25,\"formatted\":\"13.25\"},\"seasonAverage\":{\"value\":1.8928572,\"formatted\":\"1.89\"},\"seasonsStandartDeviation\":{\"value\":1.3733246,\"formatted\":\"1.37\"},\"seasonConsistency\":\"RATING_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1536198132000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":11317,\"nameFull\":\"Neal Sterling\",\"nameShort\":\"N. Sterling\",\"proTeamAbbreviation\":\"FA\",\"position\":\"TE\",\"nameFirst\":\"Neal\",\"nameLast\":\"Sterling\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"TE\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1536198132000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":3092,\"nameFull\":\"Derrick Johnson\",\"nameShort\":\"D. Johnson\",\"proTeamAbbreviation\":\"FA\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/3092.png\",\"nameFirst\":\"Derrick\",\"nameLast\":\"Johnson\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"LB\",\"LB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1536141600000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":14185,\"nameFull\":\"Mike Boone\",\"nameShort\":\"M. Boone\",\"proTeamAbbreviation\":\"MIN\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14185.png\",\"nflByeWeek\":7,\"nameFirst\":\"Mike\",\"nameLast\":\"Boone\",\"proTeam\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6309,\"away\":{\"abbreviation\":\"DAL\",\"location\":\"Dallas\",\"name\":\"Cowboys\"},\"home\":{\"abbreviation\":\"MIN\",\"location\":\"Minnesota\",\"name\":\"Vikings\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"statsProjected\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":8.05,\"formatted\":\"8.1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":0.83,\"formatted\":\"0.8\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.06,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":8.05,\"formatted\":\"8.1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":0.83,\"formatted\":\"0.8\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.06,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":669,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":106,\"formatted\":\"106\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":0.5,\"formatted\":\"0.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":0.78,\"formatted\":\"0.78\"},\"duration\":3,\"underPerforming\":true},{\"value\":{\"value\":2.05,\"formatted\":\"2.05\"},\"duration\":5}],\"seasonTotal\":{\"value\":13.25,\"formatted\":\"13.25\"},\"seasonAverage\":{\"value\":1.8928572,\"formatted\":\"1.89\"},\"seasonsStandartDeviation\":{\"value\":1.3733246,\"formatted\":\"1.37\"},\"seasonConsistency\":\"RATING_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1536141600000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":13899,\"nameFull\":\"Daurice Fountain\",\"nameShort\":\"D. Fountain\",\"proTeamAbbreviation\":\"IND\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13899.png\",\"nflByeWeek\":7,\"nameFirst\":\"Daurice\",\"nameLast\":\"Fountain\",\"proTeam\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6305,\"away\":{\"abbreviation\":\"GB\",\"location\":\"Green Bay\",\"name\":\"Packers\"},\"home\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":3.33,\"formatted\":\"3.3\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":3.33,\"formatted\":\"3.3\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1374315,\"name\":\"Shanghai Communists\",\"initials\":\"SC\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":814,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":186,\"formatted\":\"186\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"formatted\":\"0\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":2.15,\"formatted\":\"2.15\"},\"duration\":3},{\"value\":{\"value\":2.15,\"formatted\":\"2.15\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":4.3,\"formatted\":\"4.3\"},\"seasonAverage\":{\"value\":2.15,\"formatted\":\"2.15\"},\"seasonsStandartDeviation\":{\"value\":2.15,\"formatted\":\"2.15\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1535536800000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13894,\"nameFull\":\"Jaleel Scott\",\"nameShort\":\"J. Scott\",\"proTeamAbbreviation\":\"NYJ\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13894.png\",\"nflByeWeek\":10,\"nameFirst\":\"Jaleel\",\"nameLast\":\"Scott\",\"proTeam\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6414,\"away\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"home\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":28,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1534950239000\",\"transaction\":{\"type\":\"TRANSACTION_TRADE\",\"draftPick\":{\"season\":2019,\"round\":4},\"team\":{\"id\":1371776,\"name\":\"Winter Hill Black Shamrocks\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1371776_0_150x150.jpg\",\"initials\":\"WH\"},\"tradeId\":4171803}},{\"timeEpochMilli\":\"1534950239000\",\"transaction\":{\"type\":\"TRANSACTION_TRADE\",\"player\":{\"proPlayer\":{\"id\":13900,\"nameFull\":\"Nyheim Hines\",\"nameShort\":\"N. Hines\",\"proTeamAbbreviation\":\"IND\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13900.png\",\"nflByeWeek\":7,\"news\":[{\"timeEpochMilli\":\"1606104439000\",\"contents\":\"Hines recorded six rushes for two yards in Week 11 against the Packers. He added three receptions for 31 yards.\",\"analysis\":\"Hines recorded 115 total yards in Week 10 and worked as the lead back early on against the Packers as a result. However, he could not carry over his efficiency and was regularly stuffed behind the line on his rushing attempts. Hines did remain involved as a pass-catcher and had a six-yard receiving score nullified by a holding penalty. Though his strength as a receiver out of the backfield will keep him involved in the offense, it's unlikely that Hines will see a lead-back workload regularly as the season comes to a close.\",\"title\":\"Can't follow up big performance\"},{\"timeEpochMilli\":\"1606095151000\",\"contents\":\"Indianapolis Colts running back Nyheim Hines reverted back to his usual self in a Week 11 win against the Packers. He had just six carries for two yards and caught three passes for 31 yards on four targets. Hines had an encouraging performance last week against the Titans, but the Colts offense went back to rookie Jonathan Taylor for the bulk of the workload. Hines was close to having a decent fantasy day with a touchdown catch in the second half, but it was called back due to a holding ...\",\"url\":\"https://www.rotoballer.com/player-news/nyheim-hines-catches-three-passes/806933\",\"title\":\"Nyheim Hines Catches Three Passes\"}],\"nameFirst\":\"Nyheim\",\"nameLast\":\"Hines\",\"hasLockedPremiumContent\":true,\"proTeam\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6305,\"away\":{\"abbreviation\":\"GB\",\"location\":\"Green Bay\",\"name\":\"Packers\"},\"home\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":75.0,\"formatted\":\"3/4\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":31.0,\"formatted\":\"31\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":23.06,\"formatted\":\"23.1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"3/3\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":27.87,\"formatted\":\"27.9\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.3,\"formatted\":\"0.3\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":23.06,\"formatted\":\"23.1\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"3/3\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":27.87,\"formatted\":\"27.9\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.3,\"formatted\":\"0.3\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":85,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":17,\"formatted\":\"17\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":30.5,\"formatted\":\"30.5\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":20.17,\"formatted\":\"20.17\"},\"duration\":3},{\"value\":{\"value\":14.38,\"formatted\":\"14.38\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":119.799995,\"formatted\":\"119.8\"},\"seasonAverage\":{\"value\":13.3111105,\"formatted\":\"13.31\"},\"seasonsStandartDeviation\":{\"value\":9.93109,\"formatted\":\"9.93\"},\"seasonConsistency\":\"RATING_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"tradeId\":4171803}},{\"timeEpochMilli\":\"1534950239000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":8603,\"nameFull\":\"Trumaine Johnson\",\"nameShort\":\"T. Johnson\",\"proTeamAbbreviation\":\"FA\",\"position\":\"CB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/8603.png\",\"nameFirst\":\"Trumaine\",\"nameLast\":\"Johnson\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"CB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1534932000000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":14009,\"nameFull\":\"Richie James\",\"nameShort\":\"R. James\",\"proTeamAbbreviation\":\"SF\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14009.png\",\"nflByeWeek\":11,\"nameFirst\":\"Richie\",\"nameLast\":\"James\",\"proTeam\":{\"abbreviation\":\"SF\",\"location\":\"San Francisco\",\"name\":\"49ers\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"isBye\":true,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373970,\"name\":\"Bamenda Herd\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373970_0_150x150.jpg\",\"initials\":\"BH\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":466,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":111,\"formatted\":\"111\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":4.1,\"formatted\":\"4.1\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":12.48,\"formatted\":\"12.48\"},\"duration\":3},{\"value\":{\"value\":9.88,\"formatted\":\"9.88\"},\"duration\":5}],\"seasonTotal\":{\"value\":39.5,\"formatted\":\"39.5\"},\"seasonAverage\":{\"value\":9.875,\"formatted\":\"9.88\"},\"seasonsStandartDeviation\":{\"value\":12.457051,\"formatted\":\"12.46\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1534932000000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":13894,\"nameFull\":\"Jaleel Scott\",\"nameShort\":\"J. Scott\",\"proTeamAbbreviation\":\"NYJ\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13894.png\",\"nflByeWeek\":10,\"nameFirst\":\"Jaleel\",\"nameLast\":\"Scott\",\"proTeam\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6414,\"away\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"home\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":28,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}}],\"resultOffsetNext\":360}"),
date = structure(1606180798, class = c("POSIXct", "POSIXt"), tzone = "GMT"), times = c(
redirect = 0, namelookup = 3.2e-05,
connect = 3.5e-05, pretransfer = 0.000123, starttransfer = 0.045123,
total = 0.045464
)
), class = "response")
|
733be40bee369d6aca845a39a57d175797ff49cb | 9ee9a94e65e11b67d8d5dc22e9cfcffa82ce9a59 | /分开导入.R | 67afdb4c7b9503769d01e5924ac9df9bce5af52a | [] | no_license | Jasoncbh/scRNA | b501c9ea605ef3ecbddfeb6b11976a4c1469f9d8 | 31c54616dcf3bf327cfa3246eb301b8a34a8a752 | refs/heads/main | 2023-08-15T15:41:25.376608 | 2021-10-20T02:56:28 | 2021-10-20T02:56:28 | 419,165,182 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 30,418 | r | 分开导入.R | rm(list = ls())
library(monocle3)
library(tidyverse)
library(imputeTS)
library(Seurat)
library(RColorBrewer)
library(Matrix)
library(cowplot)
library(future)
require(scales)
library(ggthemes)
library(sctransform)
library(patchwork)
library(harmony)
values <- c(brewer.pal(9,"Set1"),
brewer.pal(8,"Dark2"))
abc <- values[1:14]
tem_raw <- list.files(path = "../sc_RNA/Single-cell RNA-seq Rawdata/",pattern="*_gene_exon_tagged.dge.txt.gz")
tem_raw
temp <- tem_raw
temp
name <- character()
filter_umi <- 450
#########批量导入数据#######
for(i in 1:length(temp)){
name[i] <- unlist(strsplit(temp[i],"_out_gene_exon_tagged.dge.txt.gz"))[1]
message(paste(name[i], "is loading"))
tmpvalue<-read.table(paste0("../sc_RNA/Single-cell RNA-seq Rawdata/", temp[i]), sep = "\t", quote = "", row.names = 1, header = T)
message(paste(name[i], "is loaded, now is adding name"))
colnames(tmpvalue) <- paste0(name[i], "-", colnames(tmpvalue))
message(paste0(name[i], "'s name added, now filtering ", filter_umi))
tmpvalue <- tmpvalue[,colSums(tmpvalue) >= filter_umi]
message(paste(name[i], "cells above", filter_umi, "filtered"))
assign(name[i], tmpvalue)
rm(tmpvalue)
}
message("data loading done, and strat merge counts file")
###########################################...metadata
metadata_AE_1 <- data.frame(
matrix(unlist(strsplit(colnames(`1st_AML1_ETO`),"-")), ncol = 2, byrow = T),
row.names = colnames(`1st_AML1_ETO`)
)
colnames(metadata_AE_1) <- c("tech","UMI")
metadata_AE_1$celltype <- "undefined"
metadata_AE_1$type <- "AE"
metadata_AE_1$batch <- "1st"
metadata_AE_1$orig.ident <- "AE_1"
message("metadata_AE_1 done")
###
metadata_Mig_1 <- data.frame(
matrix(unlist(strsplit(colnames(`1st_MigR1`),"-")), ncol = 2, byrow = T),
row.names = colnames(`1st_MigR1`)
)
colnames(metadata_Mig_1) <- c("tech","UMI")
metadata_Mig_1$celltype <- "undefined"
metadata_Mig_1$type <- "MigR1"
metadata_Mig_1$orig.ident <- "MigR1_1"
metadata_Mig_1$batch <- "1st"
message("Metadata_Mig_1 done")
###
metadata_AE_2 <- data.frame(
matrix(unlist(strsplit(colnames(`2nd_AML1_ETO`),"-")), ncol = 2, byrow = T),
row.names = colnames(`2nd_AML1_ETO`)
)
colnames(metadata_AE_2) <- c("tech","UMI")
metadata_AE_2$celltype <- "undefined"
metadata_AE_2$type <- "AE"
metadata_AE_2$orig.ident <- "AE_2"
metadata_AE_2$batch <- "2nd"
message("Metadata_AE_2 done")
###
metadata_Mig_2 <- data.frame(
matrix(unlist(strsplit(colnames(`2nd_MigR1`),"-")), ncol = 2, byrow = T),
row.names = colnames(`2nd_MigR1`)
)
colnames(metadata_Mig_2) <- c("tech","UMI")
metadata_Mig_2$celltype <- "undefined"
metadata_Mig_2$type <- "MigR1"
metadata_Mig_2$orig.ident <- "MigR1_2"
metadata_Mig_2$batch <- "2nd"
message("Metadata_Mig_2 done")
###
metadata_AE_3 <- data.frame(
matrix(unlist(strsplit(colnames(`3rd_AML1_ETO`),"-")), ncol = 2, byrow = T),
row.names = colnames(`3rd_AML1_ETO`)
)
colnames(metadata_AE_3) <- c("tech","UMI")
metadata_AE_3$celltype <- "undefined"
metadata_AE_3$type <- "AE"
metadata_AE_3$orig.ident <- "AE_3"
metadata_AE_3$batch <- "3rd"
message("Metadata_AE_3 done")
###
metadata_Mig_3 <- data.frame(
matrix(unlist(strsplit(colnames(`3rd_MigR1`),"-")), ncol = 2, byrow = T),
row.names = colnames(`3rd_MigR1`)
)
colnames(metadata_Mig_3) <- c("tech","UMI")
metadata_Mig_3$celltype <- "undefined"
metadata_Mig_3$type <- "MigR1"
metadata_Mig_3$orig.ident <- "MigR1_3"
metadata_Mig_3$batch <- "3rd"
message("Metadata_Mig_3 done")
###
metadata_AE_4 <- data.frame(
matrix(unlist(strsplit(colnames(`4th_AML1_ETO`),"-")), ncol = 2, byrow = T),
row.names = colnames(`4th_AML1_ETO`)
)
colnames(metadata_AE_4) <- c("tech","UMI")
metadata_AE_4$celltype <- "undefined"
metadata_AE_4$type <- "AE"
metadata_AE_4$orig.ident <- "AE_4"
metadata_AE_4$batch <- "4th"
message("Metadata_AE_4 done")
###
metadata_Mig_4 <- data.frame(
matrix(unlist(strsplit(colnames(`4th_MigR1`),"-")), ncol = 2, byrow = T),
row.names = colnames(`4th_MigR1`)
)
colnames(metadata_Mig_4) <- c("tech","UMI")
metadata_Mig_4$celltype <- "undefined"
metadata_Mig_4$type <- "MigR1"
metadata_Mig_4$orig.ident <- "MigR1_4"
metadata_Mig_4$batch <- "4th"
message("Metadata_Mig_4 done")
#######################################
#######################################
filter_gene = 350
filter_cell = 5
##########构建seurat对象###############
pbmc_AE_1 <- CreateSeuratObject(counts = `1st_AML1_ETO`,
project = "AE",
min.features = filter_gene,
min.cells = filter_cell,
meta.data = metadata_AE_1)
pbmc_Mig_1 <- CreateSeuratObject(counts = `1st_MigR1`,
project = "Mig",
min.features = filter_gene,
min.cells = filter_cell,
meta.data = metadata_Mig_1)
#############################################################
pbmc_AE_2 <- CreateSeuratObject(counts = `2nd_AML1_ETO`,
project = "AE",
min.features = filter_gene,
min.cells = filter_cell,
meta.data = metadata_AE_2)
pbmc_Mig_2 <- CreateSeuratObject(counts = `2nd_MigR1`,
project = "Mig",
min.features = filter_gene,
min.cells = filter_cell,
meta.data = metadata_Mig_2)
#############################################################
pbmc_AE_3 <- CreateSeuratObject(counts = `3rd_AML1_ETO`,
project = "AE",
min.features = filter_gene,
min.cells = filter_cell,
meta.data = metadata_AE_3)
pbmc_Mig_3 <- CreateSeuratObject(counts = `3rd_MigR1`,
project = "Mig",
min.features = filter_gene,
min.cells = filter_cell,
meta.data = metadata_Mig_3)
#############################################################
pbmc_AE_4 <- CreateSeuratObject(counts = `4th_AML1_ETO`,
project = "AE",
min.features = filter_gene,
min.cells = filter_cell,
meta.data = metadata_AE_4)
pbmc_Mig_4 <- CreateSeuratObject(counts = `4th_MigR1`,
project = "Mig",
min.features = filter_gene,
min.cells = filter_cell,
meta.data = metadata_Mig_4)
##############################################################
# The [[ operator can add columns to object metadata. This is a great place to stash QC stats
pbmc_AE_1[["percent.mt"]] <- PercentageFeatureSet(pbmc_AE_1, pattern = "^MT-")
pbmc_AE_1 <- subset(pbmc_AE_1, subset = nFeature_RNA > 350 & nFeature_RNA < 2000 & percent.mt < 7)
########################
pbmc_AE_2[["percent.mt"]] <- PercentageFeatureSet(pbmc_AE_2, pattern = "^MT-")
pbmc_AE_2 <- subset(pbmc_AE_2, subset = nFeature_RNA > 350 & nFeature_RNA < 2000 & percent.mt < 6)
########################
pbmc_AE_3[["percent.mt"]] <- PercentageFeatureSet(pbmc_AE_3, pattern = "^MT-")
pbmc_AE_3 <- subset(pbmc_AE_3, subset = nFeature_RNA > 350 & nFeature_RNA < 2000 & percent.mt < 10)
########################
pbmc_AE_4[["percent.mt"]] <- PercentageFeatureSet(pbmc_AE_4, pattern = "^MT-")
pbmc_AE_4 <- subset(pbmc_AE_4, subset = nFeature_RNA > 350 & percent.mt < 7)
########################
pbmc_Mig_1[["percent.mt"]] <- PercentageFeatureSet(pbmc_Mig_1, pattern = "^MT-")
pbmc_Mig_1 <- subset(pbmc_Mig_1, subset = nFeature_RNA > 350 & nFeature_RNA < 2000 & percent.mt < 7)
########################
pbmc_Mig_2[["percent.mt"]] <- PercentageFeatureSet(pbmc_Mig_2, pattern = "^MT-")
pbmc_Mig_2 <- subset(pbmc_Mig_2, subset = nFeature_RNA > 350 & nFeature_RNA < 2000 & percent.mt < 6)
########################
pbmc_Mig_3[["percent.mt"]] <- PercentageFeatureSet(pbmc_Mig_3, pattern = "^MT-")
pbmc_Mig_3 <- subset(pbmc_Mig_3, subset = nFeature_RNA > 350 & nFeature_RNA < 2000 & percent.mt < 10)
########################
pbmc_Mig_4[["percent.mt"]] <- PercentageFeatureSet(pbmc_Mig_4, pattern = "^MT-")
pbmc_Mig_4 <- subset(pbmc_Mig_4, subset = nFeature_RNA > 350 & percent.mt < 7)
#########################################################################################################
object.list <- as.list(c(pbmc_AE_1,pbmc_Mig_1,pbmc_AE_2,pbmc_Mig_2,pbmc_AE_3,pbmc_Mig_3,pbmc_AE_4,pbmc_Mig_4))
sample_name <- c("AE1","Mig1","AE2","Mig2","AE3","Mig3","AE4","Mig4")
names(object.list) <- sample_name
# saveRDS(object.list,file = "object_list.rds")
object.list <- readRDS("object_list.rds")
##PCA降维
scRNA_harmony <- merge(object.list[[1]], y=c(object.list[[2]], object.list[[3]], object.list[[4]], object.list[[5]],
object.list[[6]], object.list[[7]], object.list[[8]]))
table(scRNA_harmony$batch)
scRNA_harmony <- NormalizeData(scRNA_harmony) %>% FindVariableFeatures() %>% ScaleData() %>% RunPCA(verbose=FALSE)
# scRNA_harmony <- JackStraw(scRNA_harmony, num.replicate = 100)
##整合
# system.time({scRNA_harmony <- RunHarmony(scRNA_harmony, group.by.vars = "orig.ident")})
scRNA_harmony <- RunHarmony(scRNA_harmony, group.by.vars = "orig.ident")
scRNA_harmony <- RunHarmony(scRNA_harmony, group.by.vars = "orig.ident", kmeans_init_nstart=1, kmeans_init_iter_max=30)
#降维聚类
scRNA_harmony <- RunTSNE(scRNA_harmony,reduction = "harmony",dims = 1:50,tsne.method = "Rtsne",reduction.name = "tsne")
# scRNA_harmony <- RunUMAP(scRNA_harmony, reduction = "harmony", dims = 1:30)
scRNA_harmony <- FindNeighbors(scRNA_harmony, reduction = "harmony", dims = 1:50) %>% FindClusters(resolution = 0.5)
##作图
#group_by_cluster
DimPlot(scRNA_harmony, reduction = "tsne", label=T)
#group_by_sample
DimPlot(scRNA_harmony, reduction = "tsne", group.by='orig.ident',cols = values)
DimPlot(scRNA_harmony, reduction = "tsne", group.by='type')
DimPlot(scRNA_harmony, reduction = "tsne", split.by ='orig.ident',label = T,ncol = 4,cols = values)
DimPlot(scRNA_harmony, reduction = "tsne", split.by ='type',label = T,pt.size = 1,cols = values,ncol = 1)
par(mfrow=c(2,4))
FeaturePlot(scRNA_harmony, features = c("TM4SF1"),split.by = "type",reduction = "tsne",label = T,ncol = 1,pt.size = 1)
cluster3.markers <- FindMarkers(scRNA_harmony, ident.1 = 3, logfc.threshold = 0.25, test.use = "roc", only.pos = TRUE)
marker_gene<- FindConservedMarkers(scRNA_harmony, ident.1 = 3, grouping.var = "type", verbose = FALSE)
write.csv(cluster3.markers,file = "marker.csv")
#combinate
plotc <- plot1+plot2
ggsave("scRNA_harmony_batch.png", plot = plotc, width = 10, height = 5)
saveRDS(scRNA_harmony, 'scRNA_harmony_batch.rds')
# plot3 = DimPlot(scRNA_harmony, reduction = "umap", group.by=)
#combinate
scRNA_harmony_type <- RunHarmony(scRNA_harmony, group.by.vars = "type")
scRNA_harmony_type <- RunUMAP(scRNA_harmony_type, reduction = "harmony", dims = 1:30)
scRNA_harmony_type <- FindNeighbors(scRNA_harmony_type, reduction = "harmony", dims = 1:30) %>% FindClusters()
plot3 = DimPlot(scRNA_harmony, reduction = "umap", group.by="type")
plot =DimPlot(scRNA_harmony, reduction = "umap", label=T)
markergene_harmony <- FindConservedMarkers(scRNA_harmony_type, ident.1 = 3, grouping.var = "type", verbose = FALSE)
head(markergene_harmony)
###################################################################
#........................................................................去除第一次实验
object.list_none1 <- as.list(c(pbmc_AE_2,pbmc_Mig_2,pbmc_AE_3,pbmc_Mig_3,pbmc_AE_4,pbmc_Mig_4))
scRNA_harmony_none1 <- merge(object.list_none1[[1]], y=c(object.list_none1[[2]], object.list_none1[[3]], object.list_none1[[4]], object.list_none1[[5]],
object.list_none1[[6]]))
table(scRNA_harmony_none1$batch)
scRNA_harmony_none1 <- NormalizeData(scRNA_harmony_none1) %>% FindVariableFeatures() %>% ScaleData() %>% RunPCA(verbose=FALSE)
##整合
# system.time({scRNA_harmony <- RunHarmony(scRNA_harmony, group.by.vars = "orig.ident")})
scRNA_harmony_none1 <- RunHarmony(scRNA_harmony_none1, group.by.vars = "type")
#降维聚类
scRNA_harmony_none1 <- RunUMAP(scRNA_harmony_none1, reduction = "harmony", dims = 1:30)
scRNA_harmony_none1 <- FindNeighbors(scRNA_harmony_none1, reduction = "harmony", dims = 1:30) %>% FindClusters()
##作图
#group_by_cluster
plot4 = DimPlot(scRNA_harmony_none1, reduction = "umap", label=T)
#group_by_sample
plot5 = DimPlot(scRNA_harmony_none1, reduction = "umap", group.by='type')
#combinate
plotc_batch <- plot4+plot5
markergene_harmony <- FindConservedMarkers(batch_1.combined, ident.1 = 3, grouping.var = "orig.ident", verbose = FALSE)
head(markers_1)
###################################################################
#######seurat#####
for (i in 1:length(object.list)) {
object.list[[i]] <- NormalizeData(object.list[[i]])
object.list[[i]] <- FindVariableFeatures(object.list[[i]])
}
scRNA.anchors <- FindIntegrationAnchors(object.list = object.list)
scRNA_seurat <- IntegrateData(anchorset = scRNA.anchors)
scRNA_seurat <- ScaleData(scRNA_seurat) %>% RunPCA(verbose=FALSE)
scRNA_seurat <- RunUMAP(scRNA_seurat, dims = 1:30)
scRNA_seurat <- FindNeighbors(scRNA_seurat, dims = 1:30) %>% FindClusters()
#group_by_cluster
plot4 = DimPlot(scRNA_seurat, reduction = "tSNE", label=T)
#group_by_sample
plot5 = DimPlot(scRNA_seurat, reduction = "umap", group.by='orig.ident')
#combinate
plotcs <- plot4+plot5
marker_gene <- FindConservedMarkers(batch_1.combined, ident.1 = 3, grouping.var = "orig.ident", verbose = FALSE)
head(markers_1)
#######################################################################
#..........................................................................................Harmony批次间差异比较
######################################################################
object.list_1 <- as.list(c(pbmc_AE_1,pbmc_Mig_1))
object.list_2 <- as.list(c(pbmc_AE_2,pbmc_Mig_2))
object.list_3 <- as.list(c(pbmc_AE_3,pbmc_Mig_3))
object.list_4 <- as.list(c(pbmc_AE_4,pbmc_Mig_4))
#######################################################################
scRNA_harmony_batch1 <- merge(object.list[[1]], object.list[[2]])
scRNA_harmony_batch2 <- merge(object.list[[3]], object.list[[4]])
scRNA_harmony_batch3 <- merge(object.list[[5]], object.list[[6]])
scRNA_harmony_batch4 <- merge(object.list[[7]], object.list[[8]])
#######################################################################
table(scRNA_harmony_batch1$orig.ident)
scRNA_harmony_batch1 <- NormalizeData(scRNA_harmony_batch1) %>% FindVariableFeatures() %>% ScaleData() %>% RunPCA(verbose=FALSE)
##整合
scRNA_harmony_batch1 <- RunHarmony(scRNA_harmony_batch1, group.by.vars = "orig.ident")
#降维聚类
scRNA_harmony_batch1 <- RunUMAP(scRNA_harmony_batch1, reduction = "harmony", dims = 1:30)
# scRNA_harmony_batch1 <- RunTSNE(scRNA_harmony_batch1, reduction = "harmony", dims = 1:30)
scRNA_harmony_batch1<- RunTSNE(scRNA_harmony_batch1,reduction = "harmony",dims = 1:30,seed.use = 1,tsne.method = "Rtsne",reduction.name = "tsne")
scRNA_harmony_batch1 <- FindNeighbors(scRNA_harmony_batch1, reduction = "harmony", dims = 1:30) %>% FindClusters(resolution = 1)
#group_by_cluster
plot_batch1_c= DimPlot(scRNA_harmony_batch1, reduction = "tsne", label=T)
DimPlot(object = scRNA_harmony_batch1, pt.size = 1, reduction = "tsne", group.by = "orig.ident", cols =abc)
DimPlot(object = scRNA_harmony_batch1, pt.size = 1, reduction = "tsne", cols = "Set1")
FeaturePlot(scRNA_harmony_batch1, features = c("TM4SF1"), reduction = "tsne",min.cutoff = "q9")
DimPlot(scRNA_harmony_batch1, reduction = "tsne",pt.size = 1,cols = abc, split.by = "orig.ident",label=T)
#group_by_sample
plot_batch1_s = DimPlot(scRNA_harmony_batch1, reduction = "tsne", group.by='orig.ident')
##..........................................................................................
scRNA_harmony_batch2 <- NormalizeData(scRNA_harmony_batch2) %>% FindVariableFeatures() %>% ScaleData() %>% RunPCA(verbose=FALSE)
scRNA_harmony_batch2 <- RunHarmony(scRNA_harmony_batch2, group.by.vars = "orig.ident")
# scRNA_harmony_batch2 <- RunUMAP(scRNA_harmony_batch2, reduction = "harmony", dims = 1:30)
scRNA_harmony_batch2<- RunTSNE(scRNA_harmony_batch2,reduction = "harmony",dims = 1:30,seed.use = 1,tsne.method = "Rtsne",reduction.name = "tsne")
scRNA_harmony_batch2 <- FindNeighbors(scRNA_harmony_batch2, reduction = "harmony", dims = 1:30) %>% FindClusters(resolution = 1)
plot_batch2_c= DimPlot(scRNA_harmony_batch2, pt.size = 1, reduction = "tsne", group.by = "orig.ident", cols =abc, label=T)
DimPlot(object = scRNA_harmony_batch2, pt.size = 1, reduction = "tsne", group.by = "orig.ident", cols =abc)
DimPlot(object = scRNA_harmony_batch2, pt.size = 1, reduction = "tsne", cols =abc)
DimPlot(scRNA_harmony_batch2, reduction = "tsne",pt.size = 1,cols = abc, split.by = "orig.ident",label=T)
# plot_batch2_s = DimPlot(scRNA_harmony_batch2, reduction = "umap", group.by='orig.ident')
FeaturePlot(scRNA_harmony_batch2, features = c("TM4SF1"), reduction = "tsne",min.cutoff = "q9")
###########################################################################################
scRNA_harmony_batch3 <- NormalizeData(scRNA_harmony_batch3) %>% FindVariableFeatures() %>% ScaleData() %>% RunPCA(verbose=FALSE)
scRNA_harmony_batch3 <- RunHarmony(scRNA_harmony_batch3, group.by.vars = "orig.ident")
scRNA_harmony_batch3 <- RunUMAP(scRNA_harmony_batch3, reduction = "harmony", dims = 1:30)
scRNA_harmony_batch3<- RunTSNE(scRNA_harmony_batch3,reduction = "harmony",dims = 1:30,seed.use = 1,tsne.method = "Rtsne",reduction.name = "tsne")
scRNA_harmony_batch3 <- FindNeighbors(scRNA_harmony_batch3, reduction = "harmony", dims = 1:30) %>% FindClusters(resolution = 1)
plot_batch3_c= DimPlot(scRNA_harmony_batch3, reduction = "tsne", label=T)
plot_batch3_s = DimPlot(scRNA_harmony_batch3, pt.size = 1, reduction = "tsne", group.by = "orig.ident", cols =abc)
FeaturePlot(scRNA_harmony_batch3, features = c("TM4SF1"), reduction = "tsne")
DimPlot(scRNA_harmony_batch3, reduction = "tsne",pt.size = 1,cols = abc, split.by = "orig.ident",label=T)
###########################################################################################
scRNA_harmony_batch4 <- NormalizeData(scRNA_harmony_batch4) %>% FindVariableFeatures() %>% ScaleData() %>% RunPCA(verbose=FALSE)
scRNA_harmony_batch4 <- RunHarmony(scRNA_harmony_batch4, group.by.vars = "orig.ident")
# scRNA_harmony_batch4 <- RunUMAP(scRNA_harmony_batch4, reduction = "harmony", dims = 1:30)
scRNA_harmony_batch4<- RunTSNE(scRNA_harmony_batch4,reduction = "harmony",dims = 1:30,seed.use = 1,tsne.method = "Rtsne",reduction.name = "tsne")
scRNA_harmony_batch4 <- FindNeighbors(scRNA_harmony_batch4, reduction = "harmony", dims = 1:30) %>% FindClusters(resolution = 1)
plot_batch4_c= DimPlot(scRNA_harmony_batch4, reduction = "tsne", label=T,cols =abc)
plot_batch4_s = DimPlot(scRNA_harmony_batch4,reduction = "tsne", group.by='orig.ident',cols =abc)
FeaturePlot(scRNA_harmony_batch4, features = c("TM4SF1"), reduction = "tsne")
DimPlot(scRNA_harmony_batch4, reduction = "tsne",pt.size = 1,cols = abc, split.by = "orig.ident",label=T)
############################################################################################
############################################################################################
#seurat
object.list_1 <- c(object.list[[1]],object.list[[2]])
object.list_1 <- lapply(X = object.list_1, FUN = function(x) {
x <- NormalizeData(x)
x <- FindVariableFeatures(x, selection.method = "vst", nfeatures = 2000)
})
# select features that are repeatedly variable across datasets for integration
features <- SelectIntegrationFeatures(object.list = object.list_1)
batch_1.anchors <- FindIntegrationAnchors(object.list = object.list_1, anchor.features = features)
# this command creates an 'integrated' data assay
batch_1.combined <- IntegrateData(anchorset = batch_1.anchors)
DefaultAssay(batch_1.combined) <- "integrated"
batch_1.combined <- ScaleData(batch_1.combined, verbose = FALSE)
batch_1.combined <- RunPCA(batch_1.combined, npcs = 30, verbose = FALSE)
batch_1.combined<- RunTSNE(batch_1.combined,reduction = "pca",dims = 1:30,cells = NULL,features = NULL,tsne.method = "Rtsne",reduction.name = "tsne")
# batch_1.combined <- RunUMAP(batch_1.combined, reduction = "pca", dims = 1:30)
batch_1.combined <- FindNeighbors(batch_1.combined, reduction = "pca", dims = 1:30)
batch_1.combined <- FindClusters(batch_1.combined, resolution = 0.5)
p1 <- DimPlot(batch_1.combined, reduction = "tsne", group.by = "orig.ident")
p2 <- DimPlot(batch_1.combined, reduction = "tsne", label = TRUE, repel = TRUE)
p1 + p2
DimPlot(batch_1.combined,reduction = "tsne", group.by='orig.ident',cols =abc)
DimPlot(batch_1.combined,reduction = "tsne", split.by ='orig.ident',cols =abc,label=T)
FeaturePlot(batch_1.combined, features = c("TM4SF1"), reduction = "tsne",label=T)
#..........................................................
object.list_2 <- c(object.list[[3]],object.list[[4]])
object.list_2 <- lapply(X = object.list_2, FUN = function(x) {
x <- NormalizeData(x)
x <- FindVariableFeatures(x, selection.method = "vst", nfeatures = 2000)
})
# select features that are repeatedly variable across datasets for integration
features <- SelectIntegrationFeatures(object.list = object.list_2)
batch_2.anchors <- FindIntegrationAnchors(object.list = object.list_2, anchor.features = features)
# this command creates an 'integrated' data assay
batch_2.combined <- IntegrateData(anchorset = batch_2.anchors)
DefaultAssay(batch_2.combined) <- "integrated"
batch_2.combined <- ScaleData(batch_2.combined, verbose = FALSE)
batch_2.combined <- RunPCA(batch_2.combined, npcs = 50, verbose = FALSE)
# batch_2.combined <- RunUMAP(batch_2.combined, reduction = "pca", dims = 1:30)
batch_2.combined<- RunTSNE(batch_2.combined,reduction = "pca",dims = 1:30,cells = NULL,features = NULL,tsne.method = "Rtsne",reduction.name = "tsne")
batch_2.combined <- FindNeighbors(batch_2.combined, reduction = "pca", dims = 1:30)
batch_2.combined <- FindClusters(batch_2.combined, resolution = 1)
p3 <- DimPlot(batch_2.combined, reduction = "tsne", group.by = "orig.ident")
p4 <- DimPlot(batch_2.combined, reduction = "tsne", label = TRUE, repel = TRUE)
p3 + p4
DimPlot(batch_2.combined,pt.size = 1,reduction = "tsne", split.by ='orig.ident',cols =abc,label=T)
FeaturePlot(batch_2.combined, features = c("TM4SF1"), reduction = "tsne")
object.list_2 <- lapply(X = object.list_2, FUN = SCTransform, method = "glmGamPoi")
features <- SelectIntegrationFeatures(object.list = object.list_2, nfeatures = 3000)
object.list_2 <- PrepSCTIntegration(object.list = object.list_2, anchor.features = features)
object.list_2 <- lapply(X = object.list_2, FUN = RunPCA, features = features)
batch_2.anchors <- FindIntegrationAnchors(object.list = object.list_2, normalization.method = "SCT",
anchor.features = features, dims = 1:30, reduction = "rpca", k.anchor = 20)
batch_2.combined <- IntegrateData(anchorset = batch_2.anchors, normalization.method = "SCT", dims = 1:30)
batch_2.combined <- RunPCA(batch_2.combined, verbose = FALSE)
batch_2.combined <- RunTSNE(batch_2.combined, reduction = "pca", dims = 1:30)
batch_2.combined <- FindNeighbors(batch_2.combined, dims = 1:30, verbose = FALSE)
batch_2.combined <- FindClusters(batch_2.combined, resolution = 0.3,verbose = FALSE)
DimPlot(batch_2.combined, label = TRUE) + NoLegend()
DimPlot(batch_2.combined,pt.size = 1, reduction = "FItSNE",label = T)
DimPlot(batch_2.combined, reduction = "FItSNE",pt.size = 1, split.by = "orig.ident", label = TRUE)
FeaturePlot(batch_2.combined, features = c("TM4SF1"), reduction = "FItSNE",label = T)
batch_2.combined <- RunTSNE(object = batch_2.combined,
reduction.name = "FItSNE",
reduction.key = "FItSNE_",
tsne.method = "FIt-SNE",
fast_tsne_path = "../sc_RNA/Flt-tSNE/FItSNE.exe")
#..........................................................
object.list_3 <- lapply(X = object.list_3, FUN = function(x) {
x <- NormalizeData(x)
x <- FindVariableFeatures(x, selection.method = "vst", nfeatures = 2000)
})
# select features that are repeatedly variable across datasets for integration
features <- SelectIntegrationFeatures(object.list = object.list_3)
batch_3.anchors <- FindIntegrationAnchors(object.list = object.list_3, anchor.features = features)
# this command creates an 'integrated' data assay
batch_3.combined <- IntegrateData(anchorset = batch_3.anchors)
DefaultAssay(batch_3.combined) <- "integrated"
batch_3.combined <- ScaleData(batch_3.combined, verbose = FALSE)
batch_3.combined <- RunPCA(batch_3.combined, npcs = 30, verbose = FALSE)
batch_3.combined <- RunUMAP(batch_3.combined, reduction = "pca", dims = 1:30)
batch_3.combined <- FindNeighbors(batch_3.combined, reduction = "pca", dims = 1:30)
batch_3.combined <- FindClusters(batch_3.combined, resolution = 0.5)
p5 <- DimPlot(batch_3.combined, reduction = "umap", group.by = "orig.ident")
p6 <- DimPlot(batch_3.combined, reduction = "umap", label = TRUE, repel = TRUE)
p5 + p6
object.list_3 <- c(object.list[[5]],object.list[[6]])
object.list_3 <- lapply(X = object.list_3, FUN = SCTransform, method = "glmGamPoi")
features <- SelectIntegrationFeatures(object.list = object.list_3, nfeatures = 3000)
object.list_3 <- PrepSCTIntegration(object.list = object.list_3, anchor.features = features)
object.list_3 <- lapply(X = object.list_3, FUN = RunPCA, features = features)
batch_3.anchors <- FindIntegrationAnchors(object.list = object.list_3, normalization.method = "SCT",
anchor.features = features, dims = 1:30, reduction = "rpca", k.anchor = 20)
batch_3.combined <- IntegrateData(anchorset = batch_3.anchors, normalization.method = "SCT", dims = 1:30)
batch_3.combined <- RunPCA(batch_3.combined, verbose = FALSE)
batch_3.combined <- RunTSNE(batch_3.combined, reduction = "pca", dims = 1:30)
batch_3.combined <- FindNeighbors(batch_3.combined, dims = 1:30, verbose = FALSE)
batch_3.combined <- FindClusters(batch_3.combined, resolution = 0.5,verbose = FALSE)
DimPlot(batch_3.combined, label = TRUE) + NoLegend()
DimPlot(batch_3.combined,pt.size = 1, reduction = "tsne",label = T)
DimPlot(batch_3.combined, reduction = "tsne", split.by = "orig.ident", label = TRUE)
FeaturePlot(batch_3.combined, features = c("TM4SF1"), reduction = "tsne",label = T)
#..........................................................
object.list_4 <- lapply(X = object.list_4, FUN = function(x) {
x <- NormalizeData(x)
x <- FindVariableFeatures(x, selection.method = "vst", nfeatures = 2000)
})
# select features that are repeatedly variable across datasets for integration
features <- SelectIntegrationFeatures(object.list = object.list_4)
batch_4.anchors <- FindIntegrationAnchors(object.list = object.list_4, anchor.features = features)
# this command creates an 'integrated' data assay
batch_4.combined <- IntegrateData(anchorset = batch_4.anchors)
DefaultAssay(batch_4.combined) <- "integrated"
batch_4.combined <- ScaleData(batch_4.combined, verbose = FALSE)
batch_4.combined <- RunPCA(batch_4.combined, npcs = 30, verbose = FALSE)
batch_4.combined <- RunUMAP(batch_4.combined, reduction = "pca", dims = 1:30)
batch_4.combined <- FindNeighbors(batch_4.combined, reduction = "pca", dims = 1:30)
batch_4.combined <- FindClusters(batch_4.combined, resolution = 0.5)
p7 <- DimPlot(batch_4.combined, reduction = "umap", group.by = "orig.ident")
p8 <- DimPlot(batch_4.combined, reduction = "umap", label = TRUE, repel = TRUE)
p7 + p8
merge_1<- DimPlot(batch_1.combined, reduction = "umap", split.by = "orig.ident")
merge_2<- DimPlot(batch_2.combined, reduction = "umap", split.by = "orig.ident")
merge_3<- DimPlot(batch_3.combined, reduction = "umap", split.by = "orig.ident")
merge_4<- DimPlot(batch_4.combined, reduction = "umap", split.by = "orig.ident")
markers_1 <- FindConservedMarkers(batch_1.combined, ident.1 = 3, grouping.var = "orig.ident", verbose = FALSE)
head(markers_1)
object.list_4 <- c(object.list[[7]],object.list[[8]])
object.list_4 <- lapply(X = object.list_4, FUN = SCTransform, method = "glmGamPoi")
features <- SelectIntegrationFeatures(object.list = object.list_4, nfeatures = 3000)
object.list_4 <- PrepSCTIntegration(object.list = object.list_4, anchor.features = features)
object.list_4 <- lapply(X = object.list_4, FUN = RunPCA, features = features)
batch_4.anchors <- FindIntegrationAnchors(object.list = object.list_4, normalization.method = "SCT",
anchor.features = features, dims = 1:30, reduction = "rpca")
batch_4.combined <- IntegrateData(anchorset = batch_4.anchors, normalization.method = "SCT", dims = 1:30)
batch_4.combined <- RunPCA(batch_4.combined, verbose = FALSE)
batch_4.combined <- RunTSNE(batch_4.combined, reduction = "pca", dims = 1:30)
batch_4.combined <- FindNeighbors(batch_4.combined, dims = 1:30, verbose = FALSE)
batch_4.combined <- FindClusters(batch_4.combined, resolution = 0.5,verbose = FALSE)
DimPlot(batch_4.combined, label = TRUE) + NoLegend()
DimPlot(batch_4.combined,pt.size = 1, reduction = "tsne")
DimPlot(batch_4.combined, reduction = "tsne", split.by = "orig.ident", pt.size = 1,label = TRUE,cols = values)
FeaturePlot(batch_4.combined)
|
4e6b73c0a8b1b9799d92d63c03cb159f8703491a | 8983a1817e77cc0162f5f1b1f9130d7b33fd86e7 | /MriCloudR/man/MriCloudR-class.Rd | 429f091ab8c8ed345440d795a7969cbea71686dd | [] | no_license | kb1ooo/MriCloudR | e31c0e1dfadac18ba8eb10edb89ac60bdf65c9f4 | 23c972b5d82de34e60b78eda6be381197e51c06c | refs/heads/master | 2020-05-27T15:02:56.084480 | 2016-11-19T06:33:15 | 2016-11-19T06:33:15 | 82,562,289 | 1 | 2 | null | null | null | null | UTF-8 | R | false | true | 574 | rd | MriCloudR-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MriCloudR.r
\docType{class}
\name{MriCloudR-class}
\alias{MriCloudR}
\alias{MriCloudR-class}
\title{A wrapper around the AnatomyWorks MriCloud Web API}
\description{
This class makes the MriCloud API functionality available in R,
encapsulating the http communications so that it behaves like a standard R
interface.
}
\section{Slots}{
\describe{
\item{\code{baseUrl}}{The root URL of the MRICloud API. Default is \url{https://braingps.mricloud.org}.}
\item{\code{verbose}}{Verbose output}
}}
|
b387eeeabef85c4a1513ca0737420ca83bbd3fe8 | ec6b73ff7f434ee5440ab62d067283c4c1ad8edd | /man/vec_default_cast.Rd | f87838c48bfbe6b3ed867933b97ebf47dfcc101d | [] | no_license | trinker/vctrs | 5d97d0d8674a29da7f6e8b0648d4d424eaf4e391 | fa323896984a37b772dbc6830a7228534bab26d9 | refs/heads/master | 2022-04-28T07:52:32.154094 | 2020-04-20T15:02:57 | 2020-04-20T15:02:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,231 | rd | vec_default_cast.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cast.R
\name{vec_default_cast}
\alias{vec_default_cast}
\title{Default cast method}
\usage{
vec_default_cast(x, to, x_arg = "", to_arg = "")
}
\arguments{
\item{x}{Vectors to cast.}
\item{to}{Type to cast to. If \code{NULL}, \code{x} will be returned as is.}
\item{x_arg}{Argument names for \code{x} and \code{to}. These are used
in error messages to inform the user about the locations of
incompatible types (see \code{\link[=stop_incompatible_type]{stop_incompatible_type()}}).}
\item{to_arg}{Argument names for \code{x} and \code{to}. These are used
in error messages to inform the user about the locations of
incompatible types (see \code{\link[=stop_incompatible_type]{stop_incompatible_type()}}).}
}
\description{
This function should typically be called from the default
\code{\link[=vec_cast]{vec_cast()}} method for your class, e.g. \code{vec_cast.myclass.default()}.
It does two things:
\itemize{
\item If \code{x} is an \link{unspecified} vector, it automatically casts it to
\code{to} using \code{\link[=vec_init]{vec_init()}}.
\item Otherwise, an error is thrown with \code{\link[=stop_incompatible_cast]{stop_incompatible_cast()}}.
}
}
|
dccc73f482765acfa628e5df440769fd7805fb9b | daff42d6df4932403b74bf9914e6fd238b77b024 | /mlr.R | d6b6bb72fa1ef200f34a5e486d363afad81274a7 | [] | no_license | Kienka/Occupancy-Detection | 4c6b28b94fdc8ea426f585f316bc400118bb0d2a | 1b6a67aff7480e8b7d3d48fb19b9b4e04e3c0998 | refs/heads/master | 2021-01-11T17:03:04.637881 | 2016-09-28T22:39:56 | 2016-09-28T22:39:56 | 69,508,801 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 45 | r | mlr.R | #Writing R codes for handling many examples
|
e8aee9357a4934a84cfdc71abb210a8da35c4c1e | 727b8af88b6d32bb1f4087537779ea269cf9cd07 | /mcp_estimation.R | 37f8a3c2696a981c88dcee146991d564574e5107 | [] | no_license | Rene-Gutierrez/boom_project | 3b13848465484f52fcafd2ecc740ee14029c26e3 | cd6af9024ed2b504918f6baa82d5a5205f975396 | refs/heads/main | 2023-06-18T04:28:11.370121 | 2021-07-17T17:16:18 | 2021-07-17T17:16:18 | 360,395,925 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,074 | r | mcp_estimation.R | mcp_estimation <- function(y, G, A){
# Problem Dimensions
n <- dim(G)[1] # Number of Observations
V <- dim(G)[2] # Voxel Size
P <- dim(G)[3] # Number of ROI's
# Auxiliary Variables
GX <- G
dim(GX) <- c(n, V * P)
AX <- A
dim(AX) <- c(n, P * P)
AX <- AX[, lower.tri(A[1,,])]
cvI <- matrix(data = sample(x = 1:n, size = n), nrow = n / 10, ncol = 10)
err <- matrix(data = NA, nrow = 10, ncol = 100)
for(i in 1:10){
val <- cvI[, i]
out <- ncvreg::ncvreg(X = cbind(GX[-val,], AX[-val,]),
y = y[-val])$beta[-1, ]
OSe <- colMeans((cbind(GX[val,], AX[val,]) %*% out - y[val])^2)
err[i, ] <- OSe
}
err <- colMeans(err)
lam <- which.min(err)
coe <- ncvreg::ncvreg(X = cbind(GX, AX),
y = y)$beta[-1, lam]
return(list(GX = GX,
AX = AX,
y = y,
cvI = cvI,
err = err,
lam = lam,
coe = coe))
} |
3d322bd00b9ca9fa580727bfb344bc6d2baa06eb | a7f3f05fa37b0c8e1362ae2d8027acd0e3794422 | /scrapeData.R | 64178d577a1cafd1b94ab889616377c96ae7f048 | [] | no_license | averyrobinson98/ST-series-Text-Mining | 2e57f1f631bc3e813a50cbe0ae40e5f0b16283e2 | 6e068bbf827663b9c0b274e4dbe2044f72cba496 | refs/heads/main | 2023-07-04T11:12:46.417128 | 2021-08-11T14:48:44 | 2021-08-11T14:48:44 | 302,234,821 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,387 | r | scrapeData.R | # Load libraries
library(dplyr)
library(rvest)
library(xml2)
library(stringr)
# get urls for all episode scripts
mainurl <- "https://transcripts.fandom.com/wiki/Stranger_Things"
epNames <- read_html(mainurl) %>% html_nodes("#mw-content-text a") %>% as.character()
epNames<-str_extract(epNames,".*(?=(title))") %>% str_sub(.,10) %>% str_remove_all(.,"\"") %>% trimws()
urls <- paste0("https://transcripts.fandom.com",epNames)
epNamesSimp <- str_remove_all(epNames,"(/wiki/)") %>% str_remove_all(.,"(%3F)")
# retrieve all scripts
mylist1<-list()
for(i in urls){
temp <- read_html(i) %>% html_nodes(.,"#mw-content-text")
mylist1 <- append(mylist1, list(temp))
}
# general clean up
scripts <- list()
for(i in 1:25){
temp <- mylist1[[i]]
temp<- gsub("<.*?>","",temp)
temp <- strsplit(temp,"\n\n") %>% unlist()
scripts <- append(scripts,list(temp))
}
mylist1<-NULL
#Scrape IMBD Data for episode ratings
base <- "https://www.imdb.com/title/tt4574334/episodes?season="
urls2 <- paste0(base,c(1:3))
ratings <-NULL
for(i in 1:3){
temp <- read_html(urls2[i]) %>% html_nodes(.,".ipl-rating-star.small .ipl-rating-star__rating") %>% html_text %>% as.numeric()
ratings <- append(ratings,temp)
}
ratings <- data.frame("Episode"=epNamesSimp ,"Ratings"=ratings)
|
8c1bc085441ff6a5bf8751837d2a38640bc3915d | ed4ce12c2b3290f19d1d6e4cfeb219ec0562c172 | /plot2.R | f6c4d75ef4fcd0bc29edaf911d7c663d1b929fcb | [] | no_license | burgstbt/ExData_Plotting1 | ad1c305f8833788a11a7a17ae1eb811eb4b26699 | f936f92685ce5b4172e07c30aaa1cdb64839940e | refs/heads/master | 2021-01-11T15:01:26.854558 | 2017-01-29T13:22:36 | 2017-01-29T13:22:36 | 80,281,521 | 0 | 0 | null | 2017-01-28T11:47:52 | 2017-01-28T11:47:52 | null | UTF-8 | R | false | false | 714 | r | plot2.R | # Read from Unzipped file only relevant lines
df <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", dec = ".", skip = 66637, nrows = 2880)
# Read and assign headers
header <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", dec = ".", nrows = 1)
names(df) <- names(header)
# set up and print plot
par(mfrow = c(1, 1))
xlab <- ""
ylab <- "Global Active Power (kilowatts)"
plot(1:length(df$Date), df$Global_active_power, type = "l", ylab = ylab, xlab = xlab, xaxt = "n")
axis(1, at = c(1, length(df$Date)/2, length(df$Date)), labels=c("Thu", "Fri", "Sat"))
# Write plot image to file
dev.copy(png, "plot2.png", width = 640, height = 640)
# close device
dev.off() |
978ed16c8cfbd79ffe9a6691279e398b6fdaa02d | 39d05ea98a2700794e5af537a016fadfce2f804d | /R/process_flu_city_log.R | 1e568d796c131b94fdfbb7cd79f6f2a553c6440e | [] | no_license | lwillem/IBMcourseTutorials | 269cbb6ddea0a34b0d3a3ec1d73c690677e0e50a | 18df35832de40311e686786b6619944d4045d384 | refs/heads/master | 2020-03-07T03:58:37.306004 | 2018-10-02T18:02:17 | 2018-10-02T18:02:17 | 127,252,783 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,614 | r | process_flu_city_log.R | ############################################################################
#
# This file is part of the course material for "Individual-based Modelling
# in Epidemiology" by Wim Delva and Lander Willem.
#
############################################################################
process_flu_city_log <- function(sim_data_log)
{
# set plot panels
par(mfrow=c(2,3))
## INFECTIONS OVER TIME (days)
plot(table(sim_data_log$day),type='b',xlab='days',ylab='new infections',main='incidence (per day)')
## INFECTIONS OVER TIME (hour of a day)
plot(table(sim_data_log$hour),type='b',xlab='time of day (hour)',ylab='new infections',main='incidence (time of day)')
abline(v=7.5,lty=2)
abline(v=16.5,lty=2)
abline(v=20.5,lty=2)
## GET AGE CLASSES
range(sim_data_log$age)
age_class_threshold <- c(0,18,65,90)
sim_data_log$age_class <- cut(sim_data_log$age,age_class_threshold,right = FALSE)
sim_data_log$age_infector_class <- cut(sim_data_log$age_infector,age_class_threshold,right = FALSE)
## CONTEXT
barplot(table(sim_data_log$context),main='context',xlab='frequency',las=2,horiz = T)
## INFECTIONS: TOTAL
print(table(sim_data_log$age_infector_class,sim_data_log$age_class,dnn=c('age infecter','age infected \t\t -- all infections --')))
## INFECTIONS: HOUSEHOLD
sel <- sim_data_log$context == 'household'
print(table(sim_data_log$age_infector_class[sel],sim_data_log$age_class[sel],dnn=c('age infector','age infected \t\t -- infections at home --')))
## INFECTIONS: SCHOOL
sel <- sim_data_log$context == 'school'
print(table(sim_data_log$age_infector_class[sel],sim_data_log$age_class[sel],dnn=c('age infector','age infected \t\t -- infections at school--')))
## INFECTIONS: WORKPLACE
sel <- sim_data_log$context == 'workplace'
print(table(sim_data_log$age_infector_class[sel],sim_data_log$age_class[sel],dnn=c('age infector','age infected \t\t -- infections at work --')))
## SECUNDARY CASES
barplot(table(table(sim_data_log$id_infector)),main='secundary cases',xlab='secundary cases',ylab='count')
## SECUNDARY CASES OVER TIME
sim_data_log$sec_cases <- NA
for(i in 1:dim(sim_data_log)[1])
{
sim_data_log$sec_cases[i] <- sum(sim_data_log$id_infector == sim_data_log$id[i])
}
boxplot(sim_data_log$sec_cases ~ sim_data_log$day, outline=F,xlab='days',ylab='secundary cases',main='secundary cases')
mean_cases <- aggregate(sec_cases ~ day, data=sim_data_log ,mean)
lines(mean_cases$day+1,mean_cases$sec_cases,type='l',lwd=2,col=2)
legend('topright','mean',lwd=2,col=2)
# set plot panels back to default
par(mfrow=c(1,1))
}
|
0aa1a03f1c869319dffac3d8bbef0cb7874583a5 | 399b71de13e2ef9015671ba83768a7542d8875ad | /R-tools/mergecuffdiff_denovo.R | 4456494dcf047bec9a0167c6517174198b44268a | [] | no_license | JonBarenboim/mugqic_tools | b73b4883c86cc3e47f7f0cb3bea5f06a571bb889 | 20801d48cf10f239799f0009e031a8b20f8bbe6a | refs/heads/master | 2021-01-20T04:43:31.660119 | 2017-04-28T16:26:40 | 2017-04-28T16:26:40 | 89,723,729 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,008 | r | mergecuffdiff_denovo.R | # Merges sample fpkm with cuffdiff denovo output file
# Maxime Caron - Jan 2012
# Arguments; output dir and design file
args <- commandArgs(trailingOnly = TRUE)
output_dir<-args[1]
input_dir<-args[2]
designFile<-args[3]
designs<-read.table(designFile, header=F, sep="\t", check.names=F)
# Iterate for each design
for(j in 2:ncol(designs)) {
designName=designs[1,j]
print(paste("design: ",designName, sep=""))
resultFile<-paste(output_dir,"/denovo/",designName,"/isoform_exp.diff",sep="")
mergedFile<-paste(output_dir,"/denovo/",designName,"/formated.merged.gtf", sep="")
result<-read.table(resultFile, header=T, sep="\t", stringsAsFactors=F)
merged<-read.table(mergedFile, header=F, sep="\t", quote='"', stringsAsFactors=F)
tconsmerged<-as.data.frame(matrix(unlist(strsplit(as.character(merged[,8]), " ")), nrow=nrow(merged), byrow=T))
oId<-as.data.frame(matrix(unlist(strsplit(as.character(merged[,9]), " ")), nrow=nrow(merged), byrow=T))
ref_id<-as.data.frame(matrix(unlist(strsplit(as.character(merged[,10]), " ")), nrow=nrow(merged), byrow=T))
#gene_Name<-as.data.frame(matrix(unlist(strsplit(as.character(merged[,11]), " ")), nrow=nrow(merged), byrow=T))
classcode<-as.data.frame(matrix(unlist(strsplit(as.character(merged[,12]), "info ")), nrow=nrow(merged), byrow=T))
tconsmerged<-as.data.frame(tconsmerged[,2])
oId<-as.data.frame(oId[,2])
ref_id<-as.data.frame(ref_id[,2])
#gene_Name<-as.data.frame(gene_Name[,2])
classcode<-as.data.frame(classcode[,2])
mergedFinal<-cbind(tconsmerged, oId, ref_id, classcode)
colnames(mergedFinal)=c("transcript_id", "oId", "nearest_ref", "classcode")
# Merge with result file (isoform_exp.diff)
writeIt<-merge(result, mergedFinal, by.x=1, by.y=1)
writeIt<- writeIt[,c(15,1,2,3,4,5,6,7,8,9,10,11,12,13,14,16,17)]
# Merge with sample FPKM
for(i in 2:nrow(designs)) {
#for(i in 2:2) {
sampleState=designs[i,j]
if(sampleState !=0) {
sampleName=designs[i,1]
transcriptFile=paste(input_dir,"/denovo/",sampleName, "/transcripts.gtf",sep="")
transcripts<-read.table(transcriptFile, header=F, sep="\t", quote='"', stringsAsFactors=F)
transcripts<-as.data.frame(transcripts[agrep("transcript", transcripts[,3]),9])
transcripts<-as.data.frame(matrix(unlist(strsplit(as.character(transcripts[,1]), ";")), nrow=nrow(transcripts), byrow=T))
tcons<-as.data.frame(matrix(unlist(strsplit(as.character(transcripts[,2]), " ")), nrow=nrow(transcripts), byrow=T))
tcons<-as.data.frame(tcons[,3])
fpkm<-as.data.frame(matrix(unlist(strsplit(as.character(transcripts[,3]), " ")), nrow=nrow(transcripts), byrow=T))
finalSample<-cbind(tcons, fpkm[,3])
colnames(finalSample)=c(paste("id.",sampleName,sep=""),paste("fpkm.",sampleName,sep=""))
writeIt<-merge(writeIt, finalSample, by.x=1, by.y=1)
#print(head(writeIt))
}
}
writeItSign=writeIt[writeIt$q_value <= 0.05,]
write.table(writeItSign, paste(args[1],"/denovo/", designs[1,j], "/isoform_exp.diff.with.fpkm.csv", sep=""), quote=F, row.names=F, sep="\t")
}
|
ddd22fc9e7834f897bf8332260975357255e9eb2 | a43a5f942ebf81cbc2bda2b8b5413efdedb03ed8 | /man/cfbd_metrics_ppa_teams.Rd | 3fdd54648caaf1073508a34333ae14ffc85e448a | [
"MIT"
] | permissive | Engy-22/cfbfastR | 7d6775943c8124c532c36728dec5cc7aee9ad4f5 | 92ebfdd0fb4a70bcb9f3cc2d11f3a61d863d9743 | refs/heads/master | 2023-08-30T11:08:09.332930 | 2021-10-26T17:34:33 | 2021-10-26T17:34:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,010 | rd | cfbd_metrics_ppa_teams.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfbd_metrics.R
\name{cfbd_metrics_ppa_teams}
\alias{cfbd_metrics_ppa_teams}
\title{\strong{Get team averages for predicted points added (PPA)}}
\usage{
cfbd_metrics_ppa_teams(
year = NULL,
team = NULL,
conference = NULL,
excl_garbage_time = FALSE
)
}
\arguments{
\item{year}{(\emph{Integer} optional): Year, 4 digit format (\emph{YYYY})}
\item{team}{(\emph{String} optional): D-I Team}
\item{conference}{(\emph{String} optional): Conference name - select a valid FBS conference\cr
Conference names P5: ACC, Big 12, Big Ten, SEC, Pac-12\cr
Conference names G5 and FBS Independents: Conference USA, Mid-American, Mountain West, FBS Independents, American Athletic\cr}
\item{excl_garbage_time}{(\emph{Logical} default FALSE): Select whether to exclude Garbage Time (TRUE or FALSE)}
}
\value{
\code{\link[=cfbd_metrics_ppa_teams]{cfbd_metrics_ppa_teams()}} - A data frame with 21 variables:
\describe{
\item{\code{season}: integer.}{.}
\item{\code{conference}: character.}{.}
\item{\code{team}: character.}{.}
\item{\code{off_overall}: character.}{Offense overall predicted points added (PPA).}
\item{\code{off_passing}: character.}{Offense passing predicted points added (PPA).}
\item{\code{off_rushing}: character.}{Offense rushing predicted points added (PPA).}
\item{\code{off_first_down}: character.}{Offense 1st down predicted points added (PPA).}
\item{\code{off_second_down}: character.}{Offense 2nd down predicted points added (PPA).}
\item{\code{off_third_down}: character.}{Offense 3rd down predicted points added (PPA).}
\item{\code{off_cumulative_total}: character.}{Offense cumulative total predicted points added (PPA).}
\item{\code{off_cumulative_passing}: character.}{Offense cumulative total passing predicted points added (PPA).}
\item{\code{off_cumulative_rushing}: character.}{Offense cumulative total rushing predicted points added (PPA).}
\item{\code{def_overall}: character.}{Defense overall predicted points added (PPA).}
\item{\code{def_passing}: character.}{Defense passing predicted points added (PPA).}
\item{\code{def_rushing}: character.}{Defense rushing predicted points added (PPA).}
\item{\code{def_first_down}: character.}{Defense 1st down predicted points added (PPA).}
\item{\code{def_second_down}: character.}{Defense 2nd down predicted points added (PPA).}
\item{\code{def_third_down}: character.}{Defense 3rd down predicted points added (PPA).}
\item{\code{def_cumulative_total}: character.}{Defense cumulative total predicted points added (PPA).}
\item{\code{def_cumulative_passing}: character.}{Defense cumulative total passing predicted points added (PPA).}
\item{\code{def_cumulative_rushing}: character.}{Defense cumulative total rushing predicted points added (PPA).}
}
}
\description{
\strong{Get team averages for predicted points added (PPA)}
}
\examples{
\donttest{
cfbd_metrics_ppa_teams(year = 2019, team = "TCU")
}
}
\keyword{Points}
\keyword{Predicted}
\keyword{Teams}
|
c1e2880f4b7203ad363d4765c3351c553cfd0fa7 | 52826e06350b265406f831756043d6c1afba6b63 | /Computación en Estadística y Optimización/Práctica Final/Ejercicio1.R | 02d771ed8026c3e07eccacf9871c6142a9d4bf52 | [] | no_license | MGijon/Learning-R | eea7b0ed377a5b2398c3c0acfe41061ba51e0539 | 9fb8778ba895a4d8003d5af7ee0743f95e932b01 | refs/heads/master | 2021-05-05T04:03:20.651493 | 2018-12-27T18:25:16 | 2018-12-27T18:25:16 | 105,207,704 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 322 | r | Ejercicio1.R | ## ============ ##
## EJERCICIO 1: ##
## ============ ##
# (a)
# ---
install.packages('RODBC')
library(RODBC)
# (b)
# ---
# (c)
# ---
# (d)
# ---
# (e)
# ---
# (f)
# ---
# (g)
# ---
# (h)
# ---
# (i)
# ---
# (j)
# ---
# (k)
# ---
# (l)
# ---
# (m)
# ---
# (n)
# ---
|
517d263cd7528c9b3378701b7986232e19662b64 | 6901d5e6f6b9775e305602394acb2ab1ab531a93 | /Data-Mining/DM Assignment5/Q14.R | 88a9cf2ca179a52614f2dc8d02940646e2f808a6 | [] | no_license | viswa9688/DataScience_2019501065 | 1f23f7ebde9407dc6c40d0f970f7094574efb5a4 | e29e4f509c87db02e7cb037d48d0590b9075627c | refs/heads/master | 2023-04-10T13:42:19.283471 | 2021-04-28T10:26:56 | 2021-04-28T10:26:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 484 | r | Q14.R |
setwd("C:\\Users\\HARIKA\\Desktop\\DataScience_2019501065\\Data-Mining\\DM Assignment5")
exams <- read.csv("spring2008exams.csv")
str(exams)
q1 = quantile(exams$Midterm.2, .25, na.rm = TRUE)
q3 = quantile(exams$Midterm.2, .75, na.rm = TRUE)
iqr <- q3 - q1
iqr
exams[(exams$Midterm.2 > q3 + 1.5 * iqr), 3]
exams[(exams$Midterm.2 > q1 - 1.5 * iqr), 3]
boxplot(exams$Midterm.1,exams$Midterm.2, col = "lightblue", main = "Exam Scores", names = c("Exam1","Exam 2"), ylab = "Exam Score") |
c0edaf4cb2745f36023603efeee1922cb0ffc816 | b6a8376f95fc89d100bb971800f37897d6ae084a | /Lending Club Prediction/LendingClubLoan.R | 667cd98354c591f9bc16f65cbaf56d958e015dbd | [] | no_license | Bekterra/My_ML_Projects | 393daa032815f982fec6663e1fff9cf19b231b1d | 2632b200c9607ff537f2d2d22351f21c76c5852f | refs/heads/master | 2020-04-02T15:22:44.290500 | 2018-10-24T20:35:00 | 2018-10-24T20:35:00 | 154,565,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,344 | r | LendingClubLoan.R | # Import file
loan <- read.csv("LoanStats.csv")
# Set missing data to NA
is.na(loan) <- loan == ""
# Create date variables that R recognizes as dates
loan$issue_d <- as.Date(paste('15', loan$issue_d), format='%d %b-%y')
loan$earliest_cr_line <- as.Date(paste('15', loan$earliest_cr_line), format='%d %b-%y')
# Identify loans that have already come to term
loan$term.months <- NA
loan$term.months[loan$term==" 36 months"] <- 36
loan$term.months[loan$term==" 60 months"] <- 60
# the "term" variable is redundant with the numerical term.months variable
loan$term <- NULL
library(lubridate)
loan$maturity.date <- loan$issue_d + months(loan$term.months)
today <- Sys.Date()
loan$mature <- ifelse(loan$maturity.date < today, 1, 0)
loan$maturity.date <- NULL
remove(today)
# subset data to select only mature loans
loan <- subset(loan, mature==1)
# Convert character percentages to numeric variables
loan$int_rate <- as.numeric(gsub("%" , "", loan$int_rate))
loan$revol_util <- as.numeric(gsub("%" , "", loan$revol_util))
# Convert character employment length to numeric variable
# This produces some missing data for values of emp_length that were "n/a"
loan$emp_length <- gsub(" years" , "", loan$emp_length)
loan$emp_length <- gsub(" year" , "", loan$emp_length)
loan$emp_length <- ifelse(loan$emp_length == "10+", 10, loan$emp_length)
loan$emp_length <- ifelse(loan$emp_length == "< 1", 0.5, loan$emp_length)
loan$emp_length <- as.numeric(loan$emp_length)
# Convert character to ordinal variable
loan$grade[loan$grade == ""] <- NA
loan$grade <- ordered(loan$grade)
# Remove variables where more than 20% of the observations are missing values
loan <- loan[, colMeans(is.na(loan)) <= .20]
# randomForest can only accommodate factor variables with 32 or fewer levels
# Remove factor vars with too many levels
too.many.levels <- function(x) {
is.factor(x) == TRUE & length(levels(x)) > 32
}
delete <- lapply(loan, too.many.levels)
loan <- loan[, delete == FALSE]
remove(too.many.levels, delete)
# Calculate the percentage of loan paid back
# This is the outcome variable we will be looking to model in the training data,
# and predict in the test data
loan$paid.back <- (loan$funded_amnt - loan$out_prncp)/loan$funded_amnt
hist(loan$paid.back)
range(loan$paid.back, na.rm = TRUE)
# Remove accounts with missing paid.back status
loan <- subset(loan, ! is.na(loan$paid.back))
# Remove variables that provide additional outcome data about the loan
loan$last_pymnt_amnt <- NULL # Last total payment amount received
loan$total_pymnt <- NULL # Payments received to date for total amount funded
loan$total_pymnt_inv <- NULL # Payments received to date for portion of total amount funded by investors
loan$total_rec_prncp <- NULL # total recovered principal
loan$out_prncp <- NULL # Remaining outstanding principal for total amount funded
loan$out_prncp_inv <- NULL # Remaining outstanding principal for portion of total amount funded by investors
loan$total_rec_int <- NULL # Interest received to date
loan$total_rec_late_fee <- NULL # Late fees received to date
loan$collection_recovery_fee <- NULL # post charge off collection fee
loan$recoveries <- NULL # amount recovered after loan is charged off
loan$loan_status <- NULL
loan$last_pymnt_d <- NULL # Last month payment was received
loan$next_pymnt_d <- NULL # Next scheduled payment date
loan$last_credit_pull_d <- NULL # most recent month LC pulled credit for this loan
# Remove variables where all values are the same
loan <- loan[sapply(loan, function(x) length(levels(factor(x)))>1)]
# check the amount of missing data in remaining dataset
lapply(loan, function(x) { sum(is.na(x)) })
##################################
# Create train and test datasets #
##################################
library(dplyr)
training.per <- 0.75
training.n <- round((nrow(loan)*training.per), 0)
train <- sample_n(loan, training.n)
remove(training.n, training.per)
train.nums <- unique(train$member_id)
test <- subset(loan, !(member_id %in% train.nums))
remove(train.nums)
# predict() won't work for cases that are missing any of the predictor variables
test <- test[complete.cases(test),]
row.names(test) <- NULL
# Remove ID number so randomForest doesn't try to use it as a predictor
train$member_id <- NULL
test$member_id <- NULL
##############################
# Random Forest - Regression #
##############################
library(randomForest)
rf.model <- randomForest(paid.back ~ .,
data = train,
ntree = 500,
type="regression",
importance=TRUE,
na.action=na.omit)
print(rf.model) # view results
importance <- as.data.frame(importance(rf.model)) # importance of each predictor
names(importance)[names(importance)=="%IncMSE"] <- "IncMSE"
importance <- importance[order(-importance$IncMSE),]
importance
# loan issue date is the most important predictor
library(ggplot2)
library(scales)
issue_d_plot <- ggplot(loan, aes(x=issue_d, y=paid.back)) + geom_point()
issue_d_plot <- issue_d_plot + scale_y_continuous(labels=percent)
issue_d_plot <- issue_d_plot + ylab("Percentage of Loan Paid Back")
issue_d_plot
remove(issue_d_plot)
# Graph error rate as a function of number of decision trees using ggplot
plot.data <- as.data.frame(plot(rf.model))
colnames(plot.data) <- c("Error")
plot.data$trees <- as.numeric(rownames(plot.data))
options(scipen = 999)
library(ggplot2)
library(scales)
rf.plot <- ggplot(plot.data, aes(x=plot.data$trees, y=plot.data$Error)) + geom_line(colour="#000099")
rf.plot <- rf.plot + xlab("Number of Decision Trees")
rf.plot <- rf.plot + ylab("Mean Squared Error")
rf.plot <- rf.plot + ggtitle("Mean Squared Error by Number of Decision Trees")
rf.plot
remove(rf.plot, plot.data)
# Use the model to predict outcomes in new data
rf.model.preds <- predict(object = rf.model, newdata = test) # Predict the test data
results <- data.frame(actual = round(test$paid.back, 2), predicted = round(rf.model.preds, 2))
remove(rf.model.preds)
# Examine mean squared error in test data
results$residual <- results$actual - results$predicted
results$residual2 <- results$residual^2
mean(results$residual2)
# Identify correct predictions
results$correct.prediction <- ifelse(results$actual==results$predicted, 1, 0)
table(results$correct.prediction)
|
0f4a19a5461ab6f0c859f061f5db93a27a9d5709 | 0eb22bf570e6a3f079b4e561568627c4f1fa815b | /man/inverse.Rd | d34fb2e5c6e86db531801a299c94a83ce0451bdd | [
"MIT"
] | permissive | lfpdroubi/appraiseR | 7e6795d3287dfeb8bcc50e38d33ae6c7bafdf5ca | 8ed726beb34c7e16005abda21582ebb5e46c5169 | refs/heads/master | 2022-05-26T20:43:45.722451 | 2022-03-19T16:00:20 | 2022-03-19T16:00:20 | 211,920,843 | 3 | 1 | NOASSERTION | 2020-10-15T11:49:17 | 2019-09-30T17:54:13 | R | UTF-8 | R | false | true | 811 | rd | inverse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{inverse}
\alias{inverse}
\title{Returns a vector generated with the inverse of the function f}
\usage{
inverse(x, func)
}
\arguments{
\item{x}{A vector or object of type}
\item{func}{a function of the box-cox family (rsqr(), rec(), rsqrt(), log(),
cubroot(), sqrt(), I() and sqr())}
}
\value{
a
}
\description{
Returns a vector generated with the inverse of the function f
}
\examples{
inverse(rsqr(10), "rsqr")
inverse(rec(10), "rec")
inverse(rsqrt(10), "rsqrt")
inverse(log(1), "log")
inverse(sqrt(4), "sqrt")
inverse(sqr(4), "sqr")
dados <- st_drop_geometry(centro_2015)
fit <- lm(log(valor) ~ ., data = dados)
aval <- new_data(fit)
Y <- predict(fit, newdata = aval, interval = "confidence")
inverse(Y, "log")
}
|
4d7f429a1fda59c99726332a69cffbccbcf19599 | d5e6a60ae67f953cc7ec2f02acfb08c9fa56f3c3 | /ggplotGraphics.R | 4184799a0f295404b079ab444bf59d922a2b8e51 | [] | no_license | anyasteinhart9898/BIOL-381 | 0e3643621eb451263f4c454779496db76e854583 | 56db1b0e14f0bafd5344111ed45727f2022e84b2 | refs/heads/master | 2021-05-11T19:31:39.397246 | 2018-05-02T18:00:26 | 2018-05-02T18:00:26 | 117,877,935 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,001 | r | ggplotGraphics.R | #Hadly Wickham, ggplot. And Winston Chang- R Graphics cookbook
#ggplot graphics
# 5 april 2018
# NJG
#preliminaries
library(ggplot2)
install.packages("ggthemes")
library(ggthemes)
library(patchwork)
library(TeachingDemos)
char2seed("10th Avenue Freeze-Out")
d<-mpg
str(d)
# qplots for use while coding
#Basic Histogram
qplot(x=d$hwy)
qplot(x=d$hwy, fill=I("khaki"), color=I("black"))
# density plot
qplot(x=d$hwy, geom="density")
# BAsic scatter plot
qplot(x=d$displ, y=d$hwy, geom=c("smooth", "point"))
qplot(x=d$displ, y=d$hwy, geom=c("smooth", "point"), method="lm") #linear regression has been added
#Basic Box plot
qplot(x=d$fl, y=d$cty, geom="boxplot", fill=I("green"))
#basic barplot
qplot(x=d$fl, geom="bar", fill=I("dark salmon"))
#Not what you expect
qplot(x=d$fl, geom="bar", fill="dark salmon") # this will map the variable, not the color we want, its considered an element. its red bc its the first color that comes up. CORRECT VARIABLES
#plotting curves and fucntions
myVec <- seq(1,100, by=0.1)
myFun <- function(x) sin(x) + 0.1*x
#Plor built in functions
qplot(x=myVec, y=sin(myVec), geom="line")
#Plot Density distributios
qplot(x=myVec, y=dgamma(myVec, shape=5, scale=3), geom="line")
#plot user defined distributions for probabilty funcitons
qplot(x=myVec, y=myFun(myVec), geom="line")
#--------------------------------------
p1 <- ggplot(data=d, mapping=aes(x=displ, y=cty)) + geom_point()
print(p1)
p1 + theme_classic() # gets rid of the grid screen in the back. better for publication
p1 + theme_linedraw()
p1 + theme_dark() #good for bright points
p1 + theme_base() #looks like base r
p1 + theme_par() #uses current par settings
p1 + theme_void() # just data points
p1 + theme_solarized() #good colors
p1 + theme_economist() # many specialized themes
p1 + theme_grey()
# use theme paraeters to modify font and font size
p1 + theme_classic(base_size = 30, base_family = "serif") # base_family= font, base_size= size of font
p2<- ggplot(data=d,
mapping=aes(x=fl, fill=fl)) + geom_bar()
print(p2)
#flip the two coordinate axes
p2 + coord_flip() + theme_grey(base_size = 20, base_family = "Courier")
# minor theme modification
p1 <- ggplot(data=d,
mapping=aes(x=displ, y=cty)) + geom_point(size=5, shape=21, color="black", fill="coral")
print(p1)
#See cheat sheet for the shapes and colors
p1 <- ggplot(data=d,
mapping=aes(x=displ, y=cty)) + geom_point(size=5, shape=21, color="black", fill="coral") + ggtitle("Hello")
print(p1)
p1 <- ggplot(data=d,mapping=aes(x=displ, y=cty)) + geom_point(size=5, shape=21, color="black", fill="coral") + ggtitle("Hello") + xlab("My x label") + ylab("My Y label") + xlim(0,4)+ ylim(0,20)
p1
g1 <- ggplot(data=d, mapping =aes(x=displ, y=cty)) +
geom_point() +
geom_smooth()
print(g1)
#second graph
g2 <- ggplot(data=d, mapping= aes(x=fl, fill=I("tomato"), color=I("black"))) +
geom_bar(stat="count") +
theme(legend.position = "none")
print(g2)
|
c92b86ec65d32f3b90ec7c1c7824d572ca392914 | 6a9472f0d627bb859ae0b424f61ddce0f0c06a29 | /problemSet2.R | 6430bf7ca18e8e1d0b93376c694486af8c92bb11 | [] | no_license | viren-velacheri/Statistics-Programs | 944ae53c4e4f979b4fa9e9831db5c92bc78038f2 | ccaaf3deea8741b4d71ca3121fd1aa013698bd1c | refs/heads/master | 2020-04-11T20:00:23.728836 | 2018-12-17T01:21:52 | 2018-12-17T01:21:52 | 162,056,171 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,569 | r | problemSet2.R | mlb <- read.csv(file.choose(), header = TRUE)
attach(mlb)
winpct_HR <- lm(WinPct ~ HR)
summary(winpct_HR)
winpct_Doubles <- lm(WinPct ~ Doubles)
summary(winpct_Doubles)
winpct_hitsallowed <- lm(WinPct ~ HitsAllowed)
summary(winpct_hitsallowed)
winpct_strikeouts <- lm(WinPct ~ StrikeOuts)
summary(winpct_strikeouts)
winpct_obp <- lm(WinPct ~ OBP)
summary(winpct_obp)
winpct_obp_hr <- lm(WinPct ~ OBP + HR)
summary(winpct_obp_hr)
winpct_obp_doubles <- lm(WinPct ~ OBP + Doubles)
summary(winpct_obp_doubles)
winpct_obp_hitsallowed <- lm(WinPct ~ OBP + HitsAllowed)
summary(winpct_obp_hitsallowed)
winpct_obp_strikeouts <- lm(WinPct ~ OBP + StrikeOuts)
summary(winpct_obp_strikeouts)
winpct_obp_hitsallowed_HR <- lm(WinPct ~ OBP + HitsAllowed + HR)
summary(winpct_obp_hitsallowed_HR)
winpct_obp_hitsallowed_doubles <- lm(WinPct ~ OBP + HitsAllowed + Doubles)
summary(winpct_obp_hitsallowed_doubles)
winpct_obp_hitsallowed_strikeouts <- lm(WinPct ~ OBP + HitsAllowed + StrikeOuts)
summary(winpct_obp_hitsallowed_strikeouts)
big_model <- lm(WinPct ~ HR + Doubles + HitsAllowed + StrikeOuts + OBP)
summary(big_model)
model2 <- lm(WinPct ~ HR + HitsAllowed + StrikeOuts + OBP)
summary(model2)
model3 <- lm(WinPct ~ HR + HitsAllowed + OBP)
summary(model3)
model4 <- lm(WinPct ~ HitsAllowed + OBP)
summary(model4)
library(leaps)
predictors <- cbind(HR, Doubles, HitsAllowed, StrikeOuts, OBP)
malocp <- leaps(predictors, WinPct, method = 'Cp')
malocp
cbind(malocp$Cp, malocp$which)
adjustedr <- leaps(predictors, WinPct, method = 'adjr2')
cbind(adjustedr$adjr2, adjustedr$which)
|
18fd09bf5c2faf7b67c7aaca01874cab8899228e | 3ddebe6b6a2db6a06a4458064de681e7c8ffb329 | /randomOrchard.R | 5b07ec1df46d696279ec8afb90eb7f6eb81f4653 | [] | no_license | pnandak/wagesOfSin | 7529a44983927d12d548c42cc615fbe48735d9a7 | 47792e3b6560d05f79b941c73b9ca3d2bbbdff36 | refs/heads/master | 2021-01-16T22:08:59.780683 | 2013-03-26T09:35:56 | 2013-03-26T09:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,432 | r | randomOrchard.R | #################################
# randomOrchard.R
# written by Jet Goodson
# started on 15 March 2013
# contact at jetgoodson@gmail.com
#
# handling of random forest alg on data
##################################
#set up and run random forest
randomOrchard <- function(trainFrame, samplingSize = 1000, folds = 2, repetitions = 5, tuneCount = 10, treeCount = 500) {
library("randomForest")
library("caret")
library('doMC')
#samplingSize <- 1000 # these arguments are commented out because I moved them to function arguments
rfMethod <- "parRF" #parRF == parallel random forest, in line is just rf
validationMethod <- "repeatedcv" #validation method for model
# folds <- 2 #number of folds in cv
# repetitions <- 1 #repetition for repeatedcv
# tuneCount <- 3 #number of tunings for mtry
# treeCount <- 500
trainController <- trainControl(
method = validationMethod,
number=folds,
repeats=repetitions,
returnResamp = "final",
classProbs = FALSE,
returnData = FALSE
)
if ( require("multicore", quietly = FALSE, warn.conflicts = TRUE) ) {
trainController$workers <- multicore:::detectCores()
trainController$computeFunction <- mclapply
trainController$computeArgs <- list(mc.preschedule = FALSE, mc.set.seed = FALSE)
cat(c(multicore:::detectCores(), " available cores\n"))
registerDoMC(cores = multicore:::detectCores())
}
cat("Training the orchard\n")
theOrchard <- train(trainFrame[,-1], trainFrame[,1], method = "parRF", tuneLength = tuneCount, trControl = trainController, scale = FALSE, keep.forest=TRUE, sampsize=samplingSize, nTree = treeCount, na.action=na.omit)
cat("Orchard trained\n")
return(theOrchard)
}#end of random orchard
#run model on validationFrame and check accuracy
validateOrchard <- function(model, validationData) {
library("randomForest")
library("caret")
cat("Validating RF:\n")
validate <-predict(model, validationData[,-1])
cat("RF Predicted head: \n")
print(head(validate))
cat("RF Actual head:\n")
print(head(validationData[,1]))
disparity <- validationData[,1] - validate
cat(c(" RF Total Disparity = ", sum(abs(disparity)), "\n"))
cat(c(" RF Average Disparity = ", sum(abs(disparity))/length(validate), "\n"))
cat(c(" RF SD of Disparity = ", sd(abs(disparity)), "\n"))
png(file="hist_salaryDisparity_RF.png")
hist(disparity, main="RF Disparity, Actual - Prediction", xlab="Disparity", ylab="entries", col="darkorchid4")
dev.off()
png(file="hist_salaryRelDisparity_RF.png")
hist(disparity/validationData[,1], main="RF Relative Disparity, (Actual - Prediction)/Actual", xlab="Relative Disparity", ylab="entries", col="darkorange1")
dev.off()
png(file="scatter_predictionVSactual_salary_RF.png")
plot(validationData[,1], validate, main="RF Predicted Salary versus Actual Salary", xlab="Actual Salary", ylab="Predicted Salary",col="cornflowerblue")
dev.off()
cat("Finished RF validation.\n")
}#end of validateOrchard
#run the model over test data to produce predition
orchardPredict <- function(model, testFrame, jobIds){
testPredict <- predict(model, newdata=testFrame, type="raw")
cat("RF Prediction finished\n")
results <- cbind(jobIds, testPredict)
colnames(results) <- c("Id", "SalaryNormalized")
}#end of orchard predict
|
767ebdf62138d7d07f1c4deff9a704c5e19fae64 | a4879863092102da8bf7185513f9a2d6e07eb336 | /man/darfur.Rd | 15bc054524d5a291c87c8f150f7cc06696247ced | [] | no_license | cran/sensemakr | 47be071b9994d898863d05752280e077ebd6f12a | fd10a6ce3bcc691547c07071f057d12da75640f5 | refs/heads/master | 2021-10-27T22:56:34.746995 | 2021-10-08T04:00:02 | 2021-10-08T04:00:02 | 200,671,548 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,116 | rd | darfur.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{darfur}
\alias{darfur}
\title{Data from survey of Darfurian refugees in eastern Chad.}
\format{
A data frame with 1276 rows and 14 columns.
\describe{
\item{wouldvote}{If elections were held in Darfur in the future, would you vote? (0/1)}
\item{peacefactor}{A measure of pro-peace attitudes, from a factor analysis of several questions. Rescaled such that 0 is minimally pro-peace and 1 is maximally pro-peace.}
\item{peace_formerenemies}{Would you be willing to make peace with your former enemies? (0/1)}
\item{peace_jjindiv}{Would you be willing to make peace with Janjweed individuals who carried out violence? (0/1)}
\item{peace_jjtribes}{Would you be willing to make peace with the tribes that were part of the Janjaweed? (0/1)}
\item{gos_soldier_execute}{Should Government of Sudan soldiers who perpetrated attacks on civilians be executed? (0/1)}
\item{directlyharmed}{A binary variable indicating whether the respondent
was personally physically injured during attacks on villages in Darfur
largely between 2003-2004. 529 respondents report being personally injured,
while 747 do not report being injured.}
\item{age}{Age of respondent in whole integer years. Ages in the data
range from 18 to 100.}
\item{farmer_dar}{The respondent was a farmer in
Darfur (0/1). 1,051 respondents were farmers, 225 were not.}
\item{herder_dar}{The respondent was a herder in
Darfur (0/1). 190 respondents were farmers, 1,086 were not.}
\item{pastvoted}{The respondent reported having
voted in a previous election before the conflict (0/1). 821 respondents reported
having voted in a previous election, 455 reported not having voted in a
previous election.}
\item{hhsize_darfur}{Household size while in Darfur.}
\item{village}{Factor variable indicating village of respondent. 486
unique villages are accounted for in the data.}
\item{female}{The respondent identifies as female (0/1). 582 respondents are female-identified, 694 are not.}
}
}
\usage{
darfur
}
\description{
Data on attitudes of Darfurian refugees in eastern Chad. The main "treatment"
variable is \code{directlyharmed}, which indicates that the individual was physically
injured during attacks on villages in Darfur, largely between 2003 and 2004.
The main outcome of interest is \code{peacefactor}, a measure of pro-peace
attitudes.
Key covariates include \code{herder_dar}
(whether they were a herder in Darfur), \code{farmer_dar} (whether they were a
farmer in Darfur), \code{age}, \code{female} (indicator for female), and
\code{past_voted} (whether they report having voted in an earlier election,
prior to the conflict).
}
\references{
Cinelli, C. and Hazlett, C. (2020), "Making Sense of Sensitivity: Extending Omitted Variable Bias." Journal of the Royal Statistical Society, Series B (Statistical Methodology).
Hazlett, Chad. (2019) "Angry or Weary? How Violence Impacts Attitudes toward Peace among Darfurian Refugees." Journal of Conflict Resolution: 0022002719879217.
}
\keyword{datasets}
|
330239223affc48bb92108be554aa3b5b8a34d9a | 74cd0f24030fbdf3c9db290be481560f91b9d62d | /plot1.R | 4d7d36f541d625dab62f47e8c79f5e1a1c77c42e | [] | no_license | dpshipley/ExData_Plotting1 | 6dbfda34c5a18f8cb6445775740820ddefbe9aa5 | e6c5fcecd1e2cf61ba3696a43a19c9fe34c6d8d3 | refs/heads/master | 2020-12-25T04:38:40.579420 | 2015-03-06T07:12:01 | 2015-03-06T07:12:01 | 31,729,802 | 0 | 0 | null | 2015-03-05T18:43:46 | 2015-03-05T18:43:46 | null | UTF-8 | R | false | false | 451 | r | plot1.R | library(datasets)
#Read file
dt <- read.table ("household_power_consumption.txt",header = TRUE, sep = ";",stringsAsFactors = F, na.strings="?")
#Create subset based on date range
dt<- subset(dt, (dt$Date == "1/2/2007" | dt$Date== "2/2/2007"))
#Build plot and create png
png("plot1.png", width=480, height= 480)
hist(dt$Global_active_power, col="red", main = "Global Active Power", xlab= "Global Active Power (kilowatts)", ylab= "Frequency")
dev.off()
|
2e21ab2991961a60535203896c898eb50d4f3501 | 9ac398969b1a6951b9aab5f2742445ada5b12ea3 | /movie_reviews_glove.R | b5931cc141a6e416df33e4aaed2140a9b9378806 | [] | no_license | pcode93/movie_reviews | 693334d01deace7196b3cbc427e4a94086c0ed06 | 883ed0516776c6546d3893ba172988d651a3d96f | refs/heads/master | 2021-01-25T10:50:20.108945 | 2017-06-09T18:01:24 | 2017-06-09T18:01:24 | 93,884,121 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,963 | r | movie_reviews_glove.R | source('movie_reviews_get_data.R')
result <- lapply(
list(
c(5,50),c(5,100),
c(7,50),c(7,100),
c(10,100),c(10,150),c(10,200),
c(12,150),c(12,200),
c(15,150),c(15,200),c(15,250),c(15,300),c(15,350),c(15,400),
c(20,300),c(20,350),c(20,400)
),
function(params) {
print(paste('Skip grams window = ', params[1], ', ', 'Word vector size', params[2]))
wordVectors <- getWordVectors(train$review, params[1], 10L, params[2], 20)
wvLen <- length(wordVectors[1,])
wordVectors <- prcomp(wordVectors, scale. = T)$x
train$review <- lapply(
train$review,
function(string) wordsToVectors(string, wordVectors)
)
test$review <- lapply(
test$review,
function(string) wordsToVectors(string, wordVectors)
)
train <- as.data.frame(train)
train <- train[2:3]
train[2:(wvLen+1)] <- t(apply(train[2],1,function(x) unlist(x)))
test <- as.data.frame(test)
test <- test[2:3]
test[2:(wvLen+1)] <- t(apply(test[2],1,function(x) unlist(x)))
lR <- cv.glmnet(x = as.matrix(train[2:(wvLen+1)]), y = train$sentiment, family = 'binomial', alpha = 0,type.measure = "auc",nfolds = 4,thresh = 1e-3,maxit = 1e3)
ac1 <- accuracy("LR", lR, as.matrix(test[2:(wvLen+1)]), test$sentiment)
linSVM <- svm(as.matrix(train[2:(wvLen+1)]), y=train$sentiment, type = 'C', kernel = 'linear')
ac2 <- accuracy("SVM", linSVM, test[2:(wvLen+1)], test$sentiment)
rF <- randomForest(as.factor(sentiment) ~ ., data = train)
ac3 <- accuracy("RF", rF, test[2:(wvLen+1)], test$sentiment)
nB <- naiveBayes(as.factor(sentiment) ~ ., data = train)
ac4 <- accuracy("NB", nB, test[2:(wvLen+1)], test$sentiment)
xgb <- xgboost(data = as.matrix(train[2:(wvLen+1)]), label = train$sentiment, nthread = 2, max_depth = 2, nrounds = 200, objective = "binary:logistic", verbose = 0)
ac5 <- accuracy("XGB", xgb, as.matrix(test[2:(wvLen+1)]), test$sentiment)
list(ac1,ac2,ac3,ac4,ac5)
})
result <- as.data.frame(matrix(unlist(result), nrow=length(unlist(result[1]))))
colnames(result) <- c(
'(5,50)','(5,100)',
'(7,50)','(7,100)',
'(10,100)','(10,150)','(10,200)',
'(12,150)','(12,200)',
'(15,150)','(15,200)','(15,250)','(15,300)','(15,350)','(15,400)',
'(20,300)','(20,350)','(20,400)'
)
rownames(result) <- c(
'LR', 'SVM', 'RF', 'NB', 'XGB'
)
ngrams <- factor(colnames(result), levels = colnames(result))
par(mar = c(6.5, 6.5, 0.5, 0.5), mgp = c(4, 1, 0))
plot(ngrams, result[1,], axes=F, col="blue", 'l', xlab = 'Skip gram window, vector size', ylab='AUC',ylim=c(0.5,1))
axis(2)
axis(1, at=seq_along(result[1,]),labels=as.character(ngrams), las=2)
lines(ngrams, result[2,], col="red")
lines(ngrams, result[3,], col="green")
lines(ngrams, result[4,], col="black")
lines(ngrams, result[5,], col="purple")
legend('topright', rownames(result), lty=c(1,1), col=c('blue','red','green','black','purple'), ncol=3)
|
85738061e6d4515a7682112108fee9fa419e46ee | c980f79cc06629ba0c5f2e970aeb60ac94edb048 | /forecasting.R | b0fe1085f1fe17a111462bfc5de7a38b1ea9ce25 | [] | no_license | claytonglasser/siuslaw-basin-precipitation | 605cd0dc11335eab240cff4a5e6121170167b30e | 799555de260cc5f236d027ea3b1153f132c950b8 | refs/heads/master | 2020-03-29T12:57:03.699506 | 2019-03-15T18:37:02 | 2019-03-15T18:37:02 | 149,930,031 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,085 | r | forecasting.R | library(tidyverse)
library(ggplot2)
install.packages('forecast')
library(forecast)
install.packages('tseries')
library(tseries)
install.packages('TSA')
library(TSA)
#Wrangling the data
##Read in cleaned up csv as dataframe
clean.csv <- read_csv("siuslaw_basin_climate_clean.csv")
##Convert observations to needed object types
clean.csv$STATION <-as.factor(clean.csv$STATION)
clean.csv$NAME <-as.factor(clean.csv$NAME)
clean.csv$LATITUDE <-as.numeric(clean.csv$LATITUDE)
clean.csv$LONGITUDE <-as.numeric(clean.csv$LONGITUDE)
clean.csv$ELEVATION <-as.numeric(clean.csv$ELEVATION)
clean.csv$PRCP <-as.numeric(clean.csv$PRCP)
clean.csv$water_date <-as.Date(clean.csv$water_date)
##Subset DF down to only those stations with viable PRCP density within the selected timeframe
siuslaw <- clean.csv %>% select(STATION, NAME, LATITUDE, LONGITUDE, ELEVATION, DATE, water_date, PRCP, SNOW) %>% group_by(NAME, water_date) %>% arrange(NAME) %>% filter(STATION == "US1ORLA0076" | STATION == "US1ORLA0003" | STATION == "US1ORLA0031" | STATION == "US1ORLA0091" | STATION == "USC00352973" | STATION == "USC00352972" | STATION == "USC00353995" | STATION == "US1ORLA0171" | STATION == "USC00355204" | STATION == "US1ORLA0132" | STATION == "USC00353995") %>% filter(DATE >= "2007-10-01" & DATE < "2017-10-01")
head(siuslaw)
#creating inidivdual objects for distinct locations
US1ORLA0076 <- siuslaw %>% filter(STATION == "US1ORLA0076")
US1ORLA0003 <- siuslaw %>% filter(STATION == "US1ORLA0003")
US1ORLA0031 <- siuslaw %>% filter(STATION == "US1ORLA0031")
US1ORLA0091 <- siuslaw %>% filter(STATION == "US1ORLA0091")
USC00352973 <- siuslaw %>% filter(STATION == "USC00352973")
USC00352972 <- siuslaw %>% filter(STATION == "USC00352972")
USC00353995 <- siuslaw %>% filter(STATION == "USC00353995")
US1ORLA0171 <- siuslaw %>% filter(STATION == "US1ORLA0171")
USC00355204 <- siuslaw %>% filter(STATION == "USC00355204")
US1ORLA0132 <- siuslaw %>% filter(STATION == "US1ORLA0132")
USC00353995 <- siuslaw %>% filter(STATION == "USC00353995")
#creating PRCP time series for each location (STATION)
US1ORLA0076_TS <- ts(US1ORLA0076$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0003_TS <- ts(US1ORLA0003$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0031_TS <- ts(US1ORLA0031$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0091_TS <- ts(US1ORLA0091$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
USC00352973_TS <- ts(USC00352973$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
USC00352972_TS <- ts(USC00352972$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0171_TS <- ts(US1ORLA0171$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
USC00355204_TS <- ts(USC00355204$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0132_TS <- ts(US1ORLA0132$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
USC00353995_TS <- ts(USC00353995$PRCP, start=c(2007, 10), end=c(2017, 9), frequency = 365)
#SNOW time series #not used
US1ORLA0076_TS_SNOW <- ts(US1ORLA0076$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0003_TS_SNOW <- ts(US1ORLA0003$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0031_TS_SNOW <- ts(US1ORLA0031$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0091_TS_SNOW <- ts(US1ORLA0091$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
USC00352973_TS_SNOW <- ts(USC00352973$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
USC00352972_TS_SNOW <- ts(USC00352972$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0171_TS_SNOW <- ts(US1ORLA0171$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
USC00355204_TS_SNOW <- ts(USC00355204$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
US1ORLA0132_TS_SNOW <- ts(US1ORLA0132$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
USC00353995_TS_SNOW <- ts(USC00353995$SNOW, start=c(2007, 10), end=c(2017, 9), frequency = 365)
sum(is.na(US1ORLA0076_TS))
#--------------------------------- Forecasting
#plot the series and visually examine it for any outliers, volatility, or irregularities.
autoplot(US1ORLA0076_TS)
#clean up outliers and impute missing values
US1ORLA0076_TS_Clean = tsclean(US1ORLA0076_TS, replace.missing = TRUE)
sum(is.na(US1ORLA0076_TS_Clean))
autoplot(US1ORLA0076_TS_Clean)
#exploratory moving average (not using as data set but Ive seen it done)
US1ORLA0076_TS_ma = ma(US1ORLA0076_TS_Clean, order=30)
plot(US1ORLA0076_TS_Clean, type="l", col="black")
lines(US1ORLA0076_TS_ma,col="red",lwd=3)
#decompose and deseasonolize the data
US1ORLA0076_decomp = stl(US1ORLA0076_TS_Clean, s.window="periodic") #decompose te cleaned up data
plot(US1ORLA0076_decomp) #strong seasonal pattern, visiable in residuals
US1ORLA0076_deseasonal_cnt <- seasadj(US1ORLA0076_decomp) #deseasonalize the decomposed data
plot(US1ORLA0076_deseasonal_cnt) #this is what is fed into the ARIMA
#test for stationarity
adf.test(US1ORLA0076_TS_Clean, alternative = "stationary") # Dickey-Fuller = -9.9361, Lag order = 15, p-value = 0.01 indicates data is stationary. Thus auto.arima does not suggest differencing.
#Autocorrelations and Choosing Model Order
#testing the clean time series
Acf(US1ORLA0076_TS_Clean, main='') #big first lag, sine wave pattern
Pacf(US1ORLA0076_TS_Clean, main='') #cuts off after first few lags
#Add differincing
US1ORLA0076_TS_d1 = diff(US1ORLA0076_deseasonal_cnt, differences = 1)
plot(US1ORLA0076_TS_d1) #differenced but did it really chnage anything? Overdifferenced?
adf.test(US1ORLA0076_TS_d1, alternative = "stationary") #Dickey-Fuller = -24.593, Lag order = 15, p-value = 0.01
Acf(US1ORLA0076_TS_d1, main='') #ery large spike at lags 1 and 2 and no other significant spikes, indicating that in the absence of differencing an AR(3) model should be used or an AR(3) with differencing
Pacf(US1ORLA0076_TS_d1, main='') #similar structure but with more spikes; might beneftit from some MA terns
#Fitting an ARIMA model
auto.arima(US1ORLA0076_deseasonal_cnt, seasonal=FALSE)
#------------------------------------------------------
#ARIMA(3,0,1) with non-zero mean
#Coefficients:
# ar1 ar2 ar3 ma1 mean
#1.3409 -0.3271 -0.0288 -0.9277 0.1251
#s.e. 0.0236 0.0276 0.0182 0.0166 0.0121
#sigma^2 estimated as 0.02349: log likelihood=1668.77
#AIC=-3325.54 AICc=-3325.51 BIC=-3288.32
#------------------------------------------------------
#Evaluate and Iterate
US1ORLA0076_fit<-auto.arima(US1ORLA0076_deseasonal_cnt, seasonal=FALSE)
tsdisplay(residuals(US1ORLA0076_fit), lag.max=750, main='auto.arima 3,0,1') #these residuals don't quite seem lik white noise, and there's alot of spikes, but this is the best one I found
US1ORLA0076_fit2 = arima(US1ORLA0076_deseasonal_cnt, order=c(2,0,1))
tsdisplay(residuals(US1ORLA0076_fit2), lag.max=750, main='ARIMA 2,0,1')
US1ORLA0076_fit3 = arima(US1ORLA0076_deseasonal_cnt, order=c(1,0,1))
tsdisplay(residuals(US1ORLA0076_fit3), lag.max=750, main='1,0,1')
#forcasting
US1ORLA0076_fcast <- forecast(US1ORLA0076_fit, h=365) #using auto.arima suggestion
plot(US1ORLA0076_fcast) #forecast sucks; very naive
#hold out set
US1ORLA0076_predictthis <- window(ts(US1ORLA0076_deseasonal_cnt), start=3001)
plot(US1ORLA0076_predictthis)
#leave in set
US1ORLA0076_fit_leavethis = arima(window(ts(US1ORLA0076_deseasonal_cnt), start=1, end=3000), order=c(3,0,1))
plot(window(ts(US1ORLA0076_deseasonal_cnt), start=1, end=3000))
#forecast
US1ORLA0076_fcast_predictthis <- forecast(US1ORLA0076_fit_leavethis,h=365)
plot(US1ORLA0076_fcast_predictthis, col="black")
lines(ts(US1ORLA0076_predictthis),col="red") #plots at the front of graph instead of chronologically
#Evaluate and Iterate
##adding seasnality back in
US1ORLA0076_fit_w_seasonality = auto.arima(US1ORLA0076_deseasonal_cnt, seasonal=TRUE)
#forecast
US1ORLA0076_seas_fcast <- forecast(US1ORLA0076_fit_w_seasonality, h=365)
plot(US1ORLA0076_seas_fcast) #forecast still sucks
#using ARIMAX to add SNOW data: not enough snowfall to make this worth it |
88b6dae7041f0f29ac7639be5c1fd89a2dadda8a | 9eac9f8e7495d916f7596c4444461521b1a39086 | /scripts/gtf_exons_union_to_gtf.R | 2c1ee502848f15d0af62221341f619700b2b4dbd | [
"Apache-2.0"
] | permissive | uniqueg/scripts | bbb42d455196f8e047df2681661a02d38e4a762f | 9fdcb93f740c0d353b8f9c0fe3ceab6a941af87d | refs/heads/master | 2023-04-08T17:08:00.911197 | 2023-03-16T08:46:40 | 2023-03-16T08:46:40 | 211,389,152 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,321 | r | gtf_exons_union_to_gtf.R | #!/usr/bin/Rscript
#==================#
# HEADER START #
#==================#
### Created: Nov 25, 2014
### Author: Alexander Kanitz
### Company: Zavolan Group, Biozentrum, University of Basel
#==================#
# HEADER END #
#==================#
#===================#
# OPTIONS START #
#===================#
#---> LOAD OPTIONS PARSER <---#
if ( suppressWarnings(suppressPackageStartupMessages(require("optparse"))) == FALSE ) { stop("Package 'optparse' required!\nExecution aborted.") }
#---> GET SCRIPT NAME <---#
script <- sub("--file=", "", basename(commandArgs(trailingOnly=FALSE)[4]))
#---> DESCRIPTION <---#
description <- "From a GTF gene set annotation file, generates a GTF file containing the 'pseudoexons' resulting from the union of all exons of all transcripts of each gene (one row per gene).\n"
author <- "Author: Alexander Kanitz, Biozentrum, University of Basel"
created <- "Created: 25-NOV-2014"
version <- "Version: 1.0 (25-NOV-2014)"
requirements <- "Requires: optparse, rtracklayer"
msg <- paste(description, author, created, version, requirements, sep="\n")
#---> DEFINE COMMAND-LINE OPTIONS <---#
option_list <- list(
make_option(c("-i", "--gtf-in"), action="store", type="character", default="", help="REQUIRED: GTF input filename", metavar="file"),
make_option(c("-o", "--gtf-out"), action="store", type="character", default="", help="REQUIRED: GTF output filename", metavar="file"),
make_option(c("-h", "--help"), action="store_true", default=FALSE, help="Show this information and die"),
make_option(c("-u", "--usage"), action="store_true", default=FALSE, dest="help", help="Show this information and die"),
make_option(c("-v", "--verbose"), action="store_true", default=TRUE, help="Print log messages [DEFAULT]"),
make_option(c("-q", "--quiet"), action="store_false", dest="verbose", help="Shut up!")
)
#---> PARSE COMMAND-LINE OPTIONS <---#
opt_parser <- OptionParser(usage="Usage: %prog (OPTIONS) --gtf-in [FILE] --gtf-out [FILE]\n", option_list = option_list, add_help_option=FALSE, description=msg)
opt <- parse_args(opt_parser)
#---> VALIDATE COMMAND-LINE OPTIONS <---#
## Die if any required arguments are missing...
if ( opt$`gtf-in` == "" || opt$`gtf-out` == "" ) {
write("[ERROR] Required argument(s) missing!\n\n", stderr())
stop(print_help(opt_parser))
}
#===================#
# OPTIONS END #
#===================#
#================#
# MAIN START #
#================#
#---> START MESSAGE <---#
if ( opt$verbose ) cat("Starting '", script, "'...\n\n", sep="")
#---> LOAD LIBRARIES <---#
# Print status message
if ( opt$verbose ) cat("Loading required libraries...\n")
# Load libraries
if ( suppressWarnings(suppressPackageStartupMessages(require("rtracklayer"))) == FALSE ) { stop("Package 'rtracklayer' required!\nExecution aborted.") }
#---> IMPORT GTF <---#
# Print status message
if ( opt$verbose ) cat("Reading input file '", basename(opt$`gtf-in`), "'...\n", sep="")
# Use rtracklayer::import method to import GTF file to GRanges object
gr <- import(con=opt$`gtf-in`, format="gtf", asRangedData=FALSE)
#---> SUBSET EXONS <---#
# Print status message
if ( opt$verbose ) cat("Subsetting exons...\n")
# Subset EXONS (discards all other categories, e.g. CDS, start_codon etc.)
gr <- gr[values(gr)[["type"]] == "exon"]
# Test if at least one region is returned
if ( length(gr) == 0 ) stop("No entries of type 'exon' in input file! Check file.\nExecution halted.\n")
#---> COMPILE LIST OF GENES <---#
# Print status message
if ( opt$verbose ) cat("Grouping exons by genes...\n")
# Split exons GRanges into GRangesList by 'gene_id'
grl <- split(gr, gr$gene_id)
#---> TAKE UNION OF EXONS <---#
# Print status message
if ( opt$verbose ) cat("Merging exons...\n")
# Make union of exons to generate pseudoexons
grl <- reduce(grl, min.gapwidth=1L)
#---> UNLIST GROUPED PSEUDOEXONS <---#
# Print status message
if ( opt$verbose ) cat("Ungrouping 'pseudoexons'...\n")
# Unlist GRangesList
gr <- unlist(grl)
#---> EXPORT GTF <---#
# Print status message
if ( opt$verbose ) cat("Writing output to GTF file '", opt$`gtf-out`, "'...\n", sep="")
# Write output file
export(object=grl, con=opt$`gtf-out`, format="gtf")
#---> END MESSAGE <---#
if ( opt$verbose ) cat("Done.\n")
#================#
# MAIN END #
#================#
|
320145b30f772f08718b2856f858151360f44304 | 7abce5f5d467b244bbd3f65ed20dfeaf16ff41a9 | /scripts/02-Plot.R | a6344d566233a6a666f8d37616dfe5c18157b5f5 | [] | no_license | majazaloznik/PH13.01.FS | 739ef339ac322db0cbe189143ccb12f958a632fc | 013258ab8929eafc24fc48abda99379e6c9198e4 | refs/heads/master | 2020-06-10T23:05:22.517871 | 2017-04-04T13:30:28 | 2017-04-04T13:30:28 | 75,848,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,855 | r | 02-Plot.R | ###############################################################################
## plots
###############################################################################
## preliminaries
###############################################################################
load("data/imported.RData")
library(dplyr)
library(tidyr)
## functions
###############################################################################
FunSubsetDF <- function(x) {
working.df %>%
filter(Country..name == unique(working.df$Country..name)[x])
}
FunSubsetTFR <- function(x) {
working.tfr %>%
filter(Country.or.area == unique(working.df$Country..name)[x])
}
FunPlot2 <- function(i, policy = "growth"){
years <- c(1976, 1986, 1996, 2001, 2003, 2005, 2007, 2009, 2011, 2013, 2015, 2016)
x <- FunSubsetDF(i)
x.t <- FunSubsetTFR(i)
min.max <- if (policy == "growth") c(-3,7) else c(0,9)
if (policy == "growth"){
par(mar = c(1,4,1,1))
plot(NA,
xlim = c(min(years), max(years)),
ylim = min.max,
xlab = "",
ylab = "",
bty = "n",
axes = FALSE,xaxs="r", yaxs="r")} else {
par(mar = c(2,4,0,1))
plot(NA,
xlim = c(min(years), max(years)),
ylim = min.max,
xlab = "",
ylab = "",
bty = "n",
axes = FALSE)
if (i %in% c(1,9,12,22)) {axis(1, at = years[3:12])}else{
axis(1, at = years)}
}
palette <- c("lightpink",
"goldenrod2",
"seashell4",
"white",
"darkolivegreen3")
col <- if (policy == "growth") x$Policy.on.growth[x$year %in% years] else
x$Policy.on.fertility.level[x$year %in% years]
rect(years[1:11], rep(min.max[1],11),
years[2:12], rep(min.max[2],11),
border = "white", lty = 3,
col = palette[col])
rect(years[1:11], rep(min.max[1],11),
years[2:12], rep(min.max[2],11),
col = "white", density = 12, angle = 30, lwd = 3)
if (policy == "growth") {
lines(c(1976, 2016), c(0,0), col = "white", lwd = 3)
points(x$year, x$growth.rate, pch = 16, cex = 1.3)
} else {
lines(c(1976, 2016), c(2.1,2.1), col = "white", lwd = 3)
points(x.t$TimeMid, x.t$DataValue, pch = 21, cex = 1.3, bg = "white", lwd = 2)
}
if (i %in% c(1,9,12,22)) {axis(2, las = 2, pos = 1995)}else{
axis(2, las = 2, pos = 1975 )}
if (i %in% c(1,9,12,22)) {poz <- -17} else {poz <- 2}
if (policy == "growth"){ mtext("PGR", side = 2, line = poz )}else{
mtext("TFR", side = 2, line = poz )}
}
FunPlots <- function(x){
par(mfrow = c(2,1), xpd = TRUE)
FunPlot2(x, policy = "growth")
FunPlot2(x, policy = "fertility")
}
## plots
###############################################################################
height = 5.5
width = 10
# latin american and carribean
FunPlots(2)
dev.copy2eps(file="../figures/barbados.eps", height=height, width=width)
FunPlots(3)
dev.copy2eps(file="../figures/chile.eps", height=height, width=width)
FunPlots(5)
dev.copy2eps(file="../figures/cuba.eps", height=height, width=width)
FunPlots(24)
dev.copy2eps(file="../figures/uruguay.eps", height=height, width=width)
# ex soviet:
FunPlots(1)
dev.copy2eps(file="../figures/armenia.eps", height=height, width=width)
FunPlots(9)
dev.copy2eps(file="../figures/georgia.eps", height=height, width=width)
FunPlots(12)
dev.copy2eps(file="../figures/kazakhstan.eps", height=height, width=width)
FunPlots(22)
dev.copy2eps(file="../figures/turkmenistan.eps", height=height, width=width)
# arab and perisan
FunPlots(10)
dev.copy2eps(file="../figures/iran.eps", height=height, width=width)
FunPlots(13)
dev.copy2eps(file="../figures/kuwait.eps", height=height, width=width)
FunPlots(16)
dev.copy2eps(file="../figures/qatar.eps", height=height, width=width)
FunPlots(18)
dev.copy2eps(file="../figures/saudi.arabia.eps", height=height, width=width)
FunPlots(21)
dev.copy2eps(file="../figures/turkey.eps", height=height, width=width)
FunPlots(23)
dev.copy2eps(file="../figures/uae.eps", height=height, width=width)
# east asian
FunPlots(4)
dev.copy2eps(file="../figures/china.eps", height=height, width=width)
FunPlots(7)
dev.copy2eps(file="../figures/dprk.eps", height=height, width=width)
FunPlots(15)
dev.copy2eps(file="../figures/mongolia.eps", height=height, width=width)
FunPlots(17)
dev.copy2eps(file="../figures/korea.eps", height=height, width=width)
FunPlots(19)
dev.copy2eps(file="../figures/singapore.eps", height=height, width=width)
FunPlots(20)
dev.copy2eps(file="../figures/thailand.eps", height=height, width=width)
# med
FunPlots(6)
dev.copy2eps(file="../figures/cyprus.eps", height=height, width=width)
FunPlots(11)
dev.copy2eps(file="../figures/israel.eps", height=height, width=width)
# africa
FunPlots(8)
dev.copy2eps(file="../figures/gabon.eps", height=height, width=width)
FunPlots(14)
dev.copy2eps(file="../figures/mauritius.eps", height=height, width=width)
## LEGEND
#################################
palette <- c( "seashell4",
"lightpink",
"goldenrod2",
"darkolivegreen3")
par(mfrow = c(1,1))
par(mar = c(15,1,1,20))
plot(0,0, ylim = c(0,1), xlim = c(0,4),type = "n", bty = "n", axes = FALSE, xlab = "", ylab = "")
rect(0:3, rep(0, 4), 1:4,rep(1,4),
col = palette, border = "white")
rect(0:3, rep(0, 4), 1:4,rep(1,4),
col = "white", density = 12, angle = 30, lwd = 3)
text(letters[4:1],
x = (0:3)+0.5, y = -0.1, srt=90)
lines(c(0,4), c(0.5, 0.5), col = "white", lwd = 3)
points(seq(0.5,3.5, length = 10),rep(0.75, 10), pch = 16, cex = 1.3)
points(seq(0.5,3.5, length = 10),rep(0.25, 10), pch = 21, cex = 1.3, bg = "white", lwd = 2)
text(LETTERS[1:2],
x = 4.3, y = c(0.25, 0.75))
dev.copy2eps(file="../figures/ledge.eps", height=height, width=width)
|
a59c4609306dffe58acb39bec25d14e85f5fc6e2 | ae4832aa6e2c608c84fb67c997f10c179ece18df | /source/truncated_normal_distr_mean.R | f132acdb86c7373a336787de96b1cb9d762b1694 | [] | no_license | SpyrosSpiliopoulos/snippets | fe35a2c93ffa8e5244ff6d1e9fb67eeaaadda196 | a93d5d3bb1f7392228d2aa71b67d2f16f85e2b10 | refs/heads/master | 2020-12-15T00:56:52.217438 | 2019-06-27T17:54:18 | 2019-06-27T17:54:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,742 | r | truncated_normal_distr_mean.R | # |-----------------------------------------------------------------------------------------------------|
# | Project: Means of truncated normal distributions |
# | Script: Means of truncated normal distributions |
# | Author: Davit Sargsyan |
# | Created: 11/23/2016 |
# | Source: https://assessingpsyche.wordpress.com/2014/06/04/using-the-truncated-normal-distribution/ |
# |-----------------------------------------------------------------------------------------------------|
# Data simulation is based on reported values from:
# "Efficacy and safety of alirocumab in reducing lipids and cardiovascular events",
# Robinson et al, The New England Journal of Medicine, April 2015
# https://www.ncbi.nlm.nih.gov/pubmed/25773378
# Data----
mu <- 48.3
sem <- 0.9
N <- 1530
std <- sem*sqrt(N)
x <- seq(0, 100, 0.1)
y <- dnorm(x = x,
mean = mu,
sd = std)
# Function to calculate truncated distribution mean
MeanNormalTruncated <- function(mu = 0,
sigma = 1,
a = -Inf,
b = Inf){
mu + sigma*(dnorm((a - mu)/sigma) - dnorm((b - mu)/sigma))/
(pnorm((b - mu)/sigma) - pnorm((a - mu)/sigma))
}
# Calculate and plot expected values for given threshods----
# th <- 15
th <- 25
mu1 <- MeanNormalTruncated(mu = mu,
sigma = std,
a = 0,
b = th)
mu1
mu2 <- MeanNormalTruncated(mu = mu,
sigma = std,
a = th,
b = 100)
mu2
plot(y ~ x,
type = "l",
ylim = c(0, 0.015),
xlab = "LDL",
ylab = "Probability",
main = paste(" Simulation of Alirocumab Effect on LDL",
"\n Mean = 48.3, SEM = 0.9, N = 1530, Threshold =",
th))
polygon(x = c(0,
seq(0, th, 0.1),
th),
y = c(0,
dnorm(x = seq(0, th, 0.1),
mean = mu,
sd = std),
0),
angle = 45,
density = 10)
polygon(x = c(th,
seq(th, 100, 0.1),
100),
y = c(0,
dnorm(x = seq(th, 100, 0.1),
mean = mu,
sd = std),
0),
angle = -45,
density = 10)
abline(v = c(mu1, mu2),
lty = 2)
text(x = c(mu1, mu2),
y = c(0.008, 0.012),
labels = round(c(mu1, mu2))) |
42d0f5b38728c276ab8ecea563f88e4428e842d7 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/wpp2017/examples/migration.Rd.R | 2617301c4d92fb83e421a1a554bae05772f06e65 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 169 | r | migration.Rd.R | library(wpp2017)
### Name: migration
### Title: Dataset on Migration
### Aliases: migration
### Keywords: datasets
### ** Examples
data(migration)
str(migration)
|
9f19723fcfe4c8e2138fb41d7990d1e3e5cf9ea2 | 898b56496da3b29740a17f3a91654e512eaa7a18 | /R_trivial_simulation/zipf.R | 8e0ff0f1dc56822963cf584bbd60d6c35004f141 | [] | no_license | JunpengGao233/R_simulation | e6516ed1ebb3f813740792be10b71cf1b286d543 | 4fc837fc3b49c2ed6dd0a63e2e5defe8b94baad3 | refs/heads/master | 2020-04-12T15:45:19.766105 | 2018-12-21T10:04:28 | 2018-12-21T10:04:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 685 | r | zipf.R | zipf<-function(a)
{trials=10^6
simlist<- numeric(trials)
simlist[1]<-2
for(i in 2:trials){
if(simlist[i-1]==1){
first=sample(c(-1,1),1)
if(first==1){
simlist[i]=1}
else{
p<- (1/2)^(a)
new<-sample(c(1,2),1,prob=c(1-p,p))
simlist[i]<-new
}}
else{leftright<- sample(c(-1,1),1)
if(leftright==-1){
simlist[i]<- simlist[i-1]-1}else{
p<- (simlist[i-1]/(simlist[i-1]+1))^(a)
simlist[i]<-sample(c(simlist[i-1],
1+simlist[i-1]),1,prob=c(1-p,p))
return(simlist)
}}}}
tab<-table(zipf(1))/trials
print(tab[1:8])
barplot(tab[1:8],main='the powerlaw limiting distribution',xlab='state',ylab='probability') |
51731bf3e1630b8dfabdcb7bb797afaadd872a5f | abe6814c80f5839131e726069762708ec9ed0ce6 | /scripts/013 Time Series Predictors from Scratch.R | 83209eabb0ff3ab67792f5a0330f21f6abca18d7 | [] | no_license | guariglia/HDA | 3345d260cf6dac4cac89cee5b90b9ce124974263 | 53bcf3dcb24fd220b78df790676c141c8a84f213 | refs/heads/master | 2023-07-14T13:22:42.597632 | 2021-08-24T11:00:10 | 2021-08-24T11:00:10 | 336,018,665 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,489 | r | 013 Time Series Predictors from Scratch.R | # This is an extra script in which I am trialling:
# Adding date predictors into the time series model for the full dataset:
# Time of day
# Day of week
# BST vs GMT
# Removal of 5am peak
# Linear modelling and plotting (nb. linear model not appropriate due to heteroscedasticity)
setwd("~/summerproj/data/")
library(dplyr)
library(lubridate)
library(chron)
# Full dataset import
chunkdf <- read.csv('000_shout_texts_10minchunk.csv') %>% mutate(chunk = as.POSIXct(chunk, format="%Y-%m-%d %H:%M:%S",tz="GMT"),texts = n) %>% dplyr::select(chunk,texts)
str(chunkdf)
head(chunkdf)
tail(chunkdf)
class(datetime$chunk)
class(chunkdf$chunk)
# Making a full set of timestamps from start to finish
dates <- seq(from = ymd('2018-05-13'), to = ymd('2021-05-13'), by='days')
hm <- merge(0:23, seq(0, 50, by = 10))
datetime <- merge(dates, chron(time = paste(hm$x, ':', hm$y, ':', 0)))
colnames(datetime) <- c('date', 'time')
datetime$dt <- as.POSIXct(paste(datetime$date, datetime$time),tz="GMT")
datetime <- as.data.frame(datetime[order(datetime$dt),'dt'])
row.names(datetime) <- NULL
colnames(datetime) <- c("chunk")
df <- merge(datetime, chunkdf, by=c("chunk"),all.x=TRUE)
df[is.na(df)] <- 0
#plot(df$chunk,df$texts,type='l')
#summary(df)
#head(df)
#class(df$chunk)
# Adding predictors
df <- df %>% mutate(localtime = strftime(chunk, format="%H:%M:%S"),GMTtime = strftime(chunk, format="%H:%M:%S",tz="GMT"), weekday = weekdays(chunk), month = month(chunk))
saved <- df
# Checking where 5am peak is on GMT time and Local time
#df[1:10,]
#df[90000:90010,]
#aggrGMT <- df %>% group_by(GMTtime) %>% summarise(texts = mean(texts)) %>% mutate(GMTtime = as.POSIXct(GMTtime,format="%H:%M:%S"))
#head(aggrGMT)
#aggrlocal <- df %>% group_by(localtime) %>% summarise(texts = mean(texts)) %>% mutate(localtime = as.POSIXct(localtime,format="%H:%M:%S"))
#head(aggrlocal)
#plot(aggrGMT$GMTtime,aggrGMT$texts, type='l') # One Peak at GMT
#plot(aggrlocal$localtime,aggrlocal$texts, type='l') # Two Peaks at Local Time
# aggrGMT %>% filter(GMTtime >= as.POSIXct("05:00:00",format="%H:%M:%S") & GMTtime < as.POSIXct("06:00:00",format="%H:%M:%S"))
# Peak is just at 5am GMT
# Removing 5am GMT peak by averaging text values with those either side of it
str(df)
df[df$GMTtime == "05:00:00",] # This selects the rows I want
for (i in 1:(dim(df)[1])) {
if (df$GMTtime[i] == "05:00:00") {
df$texts[i] <- mean(df$texts[i+1],df$texts[i-1])
}
}
# Checking this has worked
#aggrGMT <- df %>% group_by(GMTtime) %>% summarise(texts = mean(texts)) %>% mutate(GMTtime = as.POSIXct(GMTtime,format="%H:%M:%S"))
#head(aggrGMT)
#plot(aggrGMT$GMTtime,aggrGMT$texts, type='l') # There is no peak
#aggrGMT %>% filter(GMTtime >= as.POSIXct("05:00:00",format="%H:%M:%S") & GMTtime < as.POSIXct("06:00:00",format="%H:%M:%S"))
# There is no peak
write.csv(df,'013 Time Series Full Prediction.csv')
saveRDS(df,'013 Time Series Full Prediction.rds')
plot(df$chunk,df$texts, type='l',col="green")
# Linear modelling
df <- readRDS('013 Time Series Full Prediction.rds')
str(df)
train <- df %>% filter(chunk < "2021-03-13")
test <- df %>% filter(chunk >= "2021-03-13")
colnames(train)
lm <- lm(texts ~ chunk + factor(localtime) + factor(weekday) + factor(month), data=train)
summary(lm)
# Predict on Test Set
preds <- predict(lm,test)
test.preds <- cbind(test,preds)
head(test.preds)
# Performance/Error Metrics: Training and Test
# MAE
# MAPE
# RMSE
# MFE (gives a sense of bias)
# R native funcitons
mse = MSE(test.preds$preds, test.preds$texts)
mae = MAE(test.preds$preds, test.preds$texts)
# caret package functions
rmse = RMSE(test.preds$preds, test.preds$texts)
r2 = R2(test.preds$preds, test.preds$texts, form = "traditional")
# My functions
d = test.preds$texts - test.preds$preds
mfe = mean((d))
mape = mean(abs((d)/test.preds$texts))*100
summary(test.preds$texts)
cat(" MAE:", mae, "\n", "MSE:", mse, "\n",
"RMSE:", rmse, "\n", "R-squared:", r2, "\n",
"MFE:", mfe, "\n", "MAPE:", mape)
#Performance plot
ggplot(test.preds,aes(preds,texts)) + geom_point(alpha=0.5) + theme_bw()
# An example day
eg_plot <- test.preds %>% filter(chunk >= "2021-03-20" & chunk < "2021-03-21")
ggplot(eg_plot) + geom_line(aes(chunk,texts),col="green") + geom_line(aes(chunk,preds),col="blue") + theme_bw()
eg_plot <- test.preds %>% filter(chunk >= "2021-05-11" & chunk < "2021-05-12")
ggplot(eg_plot,aes(chunk,texts)) + geom_line(col="green") + geom_line(aes(chunk,preds),col="blue") + theme_bw()
|
165d63530f29f4e4fd18cf784e9a52e0241a130b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/JOUSBoost/examples/circle_data.Rd.R | 4c16bdd61451f143b114158e35353a7834a2e324 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 634 | r | circle_data.Rd.R | library(JOUSBoost)
### Name: circle_data
### Title: Simulate data from the circle model.
### Aliases: circle_data
### ** Examples
# Generate data from the circle model
set.seed(111)
dat = circle_data(n = 500, inner_r = 1, outer_r = 5)
## Not run:
##D # Visualization of conditional probability p(y=1|x)
##D inner_r = 0.5
##D outer_r = 1.5
##D x = seq(-outer_r, outer_r, by=0.02)
##D radius = sqrt(outer(x^2, x^2, "+"))
##D prob = ifelse(radius >= outer_r, 0, ifelse(radius <= inner_r, 1,
##D (outer_r-radius)/(outer_r-inner_r)))
##D image(x, x, prob, main='Probability Density: Circle Example')
## End(Not run)
|
d5908a5ab016aa11d12fc2e7349f4d778b2a6861 | 8c4a74b0a344440a15a2edee5bb761bcd2dfcad9 | /man/FuzzySet.Rd | d1a1de686ef09a916fdd302d62ddae41309f3c4a | [
"MIT"
] | permissive | xoopR/set6 | 341950b7649629dc9594b9230710df5140679bf7 | e65ffeea48d30d687482f6706d0cb43b16ba3919 | refs/heads/main | 2023-05-22T22:46:30.493943 | 2022-08-27T17:20:08 | 2022-08-27T17:20:08 | 197,164,551 | 9 | 0 | NOASSERTION | 2021-11-16T15:02:05 | 2019-07-16T09:36:22 | R | UTF-8 | R | false | true | 15,618 | rd | FuzzySet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Set_FuzzySet.R
\name{FuzzySet}
\alias{FuzzySet}
\title{Mathematical Fuzzy Set}
\description{
A general FuzzySet object for mathematical fuzzy sets, inheriting from \code{Set}.
}
\details{
Fuzzy sets generalise standard mathematical sets to allow for fuzzy relationships. Whereas a
standard, or crisp, set assumes that an element is either in a set or not, a fuzzy set allows
an element to be in a set to a particular degree, known as the membership function, which
quantifies the inclusion of an element by a number in [0, 1]. Thus a (crisp) set is a
fuzzy set where all elements have a membership equal to \eqn{1}. Similarly to \link{Set}s, elements
must be unique and the ordering does not matter, to establish order and non-unique elements,
\link{FuzzyTuple}s can be used.
}
\examples{
# Different constructors
FuzzySet$new(1, 0.5, 2, 1, 3, 0)
FuzzySet$new(elements = 1:3, membership = c(0.5, 1, 0))
# Crisp sets are a special case FuzzySet
# Note membership defaults to full membership
FuzzySet$new(elements = 1:5) == Set$new(1:5)
f <- FuzzySet$new(1, 0.2, 2, 1, 3, 0)
f$membership()
f$alphaCut(0.3)
f$core()
f$inclusion(0)
f$membership(0)
f$membership(1)
## ------------------------------------------------
## Method `FuzzySet$membership`
## ------------------------------------------------
f = FuzzySet$new(1, 0.1, 2, 0.5, 3, 1)
f$membership()
f$membership(2)
f$membership(list(1, 2))
## ------------------------------------------------
## Method `FuzzySet$alphaCut`
## ------------------------------------------------
f = FuzzySet$new(1, 0.1, 2, 0.5, 3, 1)
# Alpha-cut
f$alphaCut(0.5)
# Strong alpha-cut
f$alphaCut(0.5, strong = TRUE)
# Create a set from the alpha-cut
f$alphaCut(0.5, create = TRUE)
## ------------------------------------------------
## Method `FuzzySet$support`
## ------------------------------------------------
f = FuzzySet$new(0.1, 0, 1, 0.1, 2, 0.5, 3, 1)
f$support()
f$support(TRUE)
## ------------------------------------------------
## Method `FuzzySet$core`
## ------------------------------------------------
f = FuzzySet$new(0.1, 0, 1, 0.1, 2, 0.5, 3, 1)
f$core()
f$core(TRUE)
## ------------------------------------------------
## Method `FuzzySet$inclusion`
## ------------------------------------------------
f = FuzzySet$new(0.1, 0, 1, 0.1, 2, 0.5, 3, 1)
f$inclusion(0.1)
f$inclusion(1)
f$inclusion(3)
}
\seealso{
Other sets:
\code{\link{ConditionalSet}},
\code{\link{FuzzyMultiset}},
\code{\link{FuzzyTuple}},
\code{\link{Interval}},
\code{\link{Multiset}},
\code{\link{Set}},
\code{\link{Tuple}}
}
\concept{sets}
\section{Super class}{
\code{\link[set6:Set]{set6::Set}} -> \code{FuzzySet}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{FuzzySet$new()}}
\item \href{#method-strprint}{\code{FuzzySet$strprint()}}
\item \href{#method-membership}{\code{FuzzySet$membership()}}
\item \href{#method-alphaCut}{\code{FuzzySet$alphaCut()}}
\item \href{#method-support}{\code{FuzzySet$support()}}
\item \href{#method-core}{\code{FuzzySet$core()}}
\item \href{#method-inclusion}{\code{FuzzySet$inclusion()}}
\item \href{#method-equals}{\code{FuzzySet$equals()}}
\item \href{#method-isSubset}{\code{FuzzySet$isSubset()}}
\item \href{#method-clone}{\code{FuzzySet$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="set6" data-topic="Set" data-id="add">}\href{../../set6/html/Set.html#method-add}{\code{set6::Set$add()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="set6" data-topic="Set" data-id="contains">}\href{../../set6/html/Set.html#method-contains}{\code{set6::Set$contains()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="set6" data-topic="Set" data-id="multiplicity">}\href{../../set6/html/Set.html#method-multiplicity}{\code{set6::Set$multiplicity()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="set6" data-topic="Set" data-id="print">}\href{../../set6/html/Set.html#method-print}{\code{set6::Set$print()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="set6" data-topic="Set" data-id="remove">}\href{../../set6/html/Set.html#method-remove}{\code{set6::Set$remove()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="set6" data-topic="Set" data-id="summary">}\href{../../set6/html/Set.html#method-summary}{\code{set6::Set$summary()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Create a new \code{FuzzySet} object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$new(
...,
elements = NULL,
membership = rep(1, length(elements)),
class = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{...}}{Alternating elements and membership, see details.}
\item{\code{elements}}{Elements in the set, see details.}
\item{\code{membership}}{Corresponding membership of the elements, see details.}
\item{\code{class}}{Optional string naming a class that if supplied gives the set the \code{typed} property.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
\code{FuzzySet}s can be constructed in one of two ways, either by supplying the elements and their
membership in alternate order, or by providing a list of elements to \code{elements} and a list of
respective memberships to \code{membership}, see examples. If the \code{class} argument is non-\code{NULL},
then all elements will be coerced to the given class in construction, and if elements of a
different class are added these will either be rejected or coerced.
}
\subsection{Returns}{
A new \code{FuzzySet} object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-strprint"></a>}}
\if{latex}{\out{\hypertarget{method-strprint}{}}}
\subsection{Method \code{strprint()}}{
Creates a printable representation of the object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$strprint(n = 2)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{n}}{numeric. Number of elements to display on either side of ellipsis when printing.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A character string representing the object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-membership"></a>}}
\if{latex}{\out{\hypertarget{method-membership}{}}}
\subsection{Method \code{membership()}}{
Returns the membership, i.e. value in [0, 1], of either the given element(s)
or all elements in the fuzzy set.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$membership(element = NULL)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{element}}{element or list of element in the \code{set}, if \code{NULL} returns membership of all elements}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
For \code{FuzzySet}s this is straightforward and returns the membership of the given element(s),
however in \code{FuzzyTuple}s and \code{FuzzyMultiset}s when an element may be duplicated, the function returns the membership of
all instances of the element.
}
\subsection{Returns}{
Value, or list of values, in [0, 1].
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{f = FuzzySet$new(1, 0.1, 2, 0.5, 3, 1)
f$membership()
f$membership(2)
f$membership(list(1, 2))
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-alphaCut"></a>}}
\if{latex}{\out{\hypertarget{method-alphaCut}{}}}
\subsection{Method \code{alphaCut()}}{
The alpha-cut of a fuzzy set is defined as the set
\deqn{A_\alpha = \{x \epsilon F | m \ge \alpha\}}{A_\alpha = {x \epsilon F | m \ge \alpha}}
where \eqn{x} is an element in the fuzzy set, \eqn{F}, and \eqn{m} is the corresponding membership.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$alphaCut(alpha, strong = FALSE, create = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{alpha}}{numeric in [0, 1] to determine which elements to return}
\item{\code{strong}}{logical, if \code{FALSE} (default) then includes elements greater than or equal to alpha, otherwise only strictly greater than}
\item{\code{create}}{logical, if \code{FALSE} (default) returns the elements in the alpha cut, otherwise returns a crisp set of the elements}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
Elements in \link{FuzzySet} or a \link{Set} of the elements.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{f = FuzzySet$new(1, 0.1, 2, 0.5, 3, 1)
# Alpha-cut
f$alphaCut(0.5)
# Strong alpha-cut
f$alphaCut(0.5, strong = TRUE)
# Create a set from the alpha-cut
f$alphaCut(0.5, create = TRUE)
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-support"></a>}}
\if{latex}{\out{\hypertarget{method-support}{}}}
\subsection{Method \code{support()}}{
The support of a fuzzy set is defined as the set of elements whose membership
is greater than zero, or the strong alpha-cut with \eqn{\alpha = 0},
\deqn{A_\alpha = \{x \epsilon F | m > 0\}}{A_\alpha = {x \epsilon F | m > 0}}
where \eqn{x} is an element in the fuzzy set, \eqn{F}, and \eqn{m} is the corresponding
membership.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$support(create = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{create}}{logical, if \code{FALSE} (default) returns the support elements, otherwise returns a \link{Set} of the support elements}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
Support elements in fuzzy set or a \link{Set} of the support elements.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{f = FuzzySet$new(0.1, 0, 1, 0.1, 2, 0.5, 3, 1)
f$support()
f$support(TRUE)
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-core"></a>}}
\if{latex}{\out{\hypertarget{method-core}{}}}
\subsection{Method \code{core()}}{
The core of a fuzzy set is defined as the set of elements whose membership is equal to one,
or the alpha-cut with \eqn{\alpha = 1},
\deqn{A_\alpha = \{x \epsilon F \ : \ m \ge 1\}}{A_\alpha = {x \epsilon F : m \ge 1}}
where \eqn{x} is an element in the fuzzy set, \eqn{F}, and \eqn{m} is the corresponding membership.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$core(create = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{create}}{logical, if \code{FALSE} (default) returns the core elements, otherwise returns a \link{Set} of the core elements}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
Core elements in \link{FuzzySet} or a \link{Set} of the core elements.
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{f = FuzzySet$new(0.1, 0, 1, 0.1, 2, 0.5, 3, 1)
f$core()
f$core(TRUE)
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-inclusion"></a>}}
\if{latex}{\out{\hypertarget{method-inclusion}{}}}
\subsection{Method \code{inclusion()}}{
An element in a fuzzy set, with corresponding membership \eqn{m}, is:
\itemize{
\item Included - If \eqn{m = 1}
\item Partially Included - If \eqn{0 < m < 1}
\item Not Included - If \eqn{m = 0}
}
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$inclusion(element)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{element}}{element or list of elements in fuzzy set for which to get the inclusion level}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
For \link{FuzzySet}s this is straightforward and returns the inclusion level of the given element(s),
however in \link{FuzzyTuple}s and \link{FuzzyMultiset}s when an element may be duplicated, the function returns the inclusion level of
all instances of the element.
}
\subsection{Returns}{
One of: "Included", "Partially Included", "Not Included"
}
\subsection{Examples}{
\if{html}{\out{<div class="r example copy">}}
\preformatted{f = FuzzySet$new(0.1, 0, 1, 0.1, 2, 0.5, 3, 1)
f$inclusion(0.1)
f$inclusion(1)
f$inclusion(3)
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-equals"></a>}}
\if{latex}{\out{\hypertarget{method-equals}{}}}
\subsection{Method \code{equals()}}{
Tests if two sets are equal.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$equals(x, all = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{x}}{\link{Set} or vector of \link{Set}s.}
\item{\code{all}}{logical. If \code{FALSE} tests each \code{x} separately. Otherwise returns \code{TRUE} only if all \code{x} pass test.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
Two fuzzy sets are equal if they contain the same elements with the same memberships.
Infix operators can be used for:
\tabular{ll}{
Equal \tab \code{==} \cr
Not equal \tab \code{!=} \cr
}
}
\subsection{Returns}{
If \code{all} is \code{TRUE} then returns \code{TRUE} if all \code{x} are equal to the Set, otherwise
\code{FALSE}. If \code{all} is \code{FALSE} then returns a vector of logicals corresponding to each individual
element of \code{x}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-isSubset"></a>}}
\if{latex}{\out{\hypertarget{method-isSubset}{}}}
\subsection{Method \code{isSubset()}}{
Test if one set is a (proper) subset of another
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$isSubset(x, proper = FALSE, all = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{x}}{any. Object or vector of objects to test.}
\item{\code{proper}}{logical. If \code{TRUE} tests for proper subsets.}
\item{\code{all}}{logical. If \code{FALSE} tests each \code{x} separately. Otherwise returns \code{TRUE} only if all \code{x} pass test.}
}
\if{html}{\out{</div>}}
}
\subsection{Details}{
If using the method directly, and not via one of the operators then the additional boolean
argument \code{proper} can be used to specify testing of subsets or proper subsets. A Set is a proper
subset of another if it is fully contained by the other Set (i.e. not equal to) whereas a Set is a
(non-proper) subset if it is fully contained by, or equal to, the other Set.
Infix operators can be used for:
\tabular{ll}{
Subset \tab \code{<} \cr
Proper Subset \tab \code{<=} \cr
Superset \tab \code{>} \cr
Proper Superset \tab \code{>=}
}
}
\subsection{Returns}{
If \code{all} is \code{TRUE} then returns \code{TRUE} if all \code{x} are subsets of the Set, otherwise
\code{FALSE}. If \code{all} is \code{FALSE} then returns a vector of logicals corresponding to each individual
element of \code{x}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{FuzzySet$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
bb6a7b67ffb5a0b2e8ba4b9262c6a5e54c3d64f7 | a49135012e357594bcec2b6bc0fd63f4814f588d | /starup_find.R | 722bdc602839d6f52a54de26a4dfcf87c651cbbd | [] | no_license | DevGra/scrap_startup_sp | 8109605780802e302da54b8bf69b7dea2a50befe | 2a3035f47165694dd31912eb69c443277611fa1f | refs/heads/master | 2020-08-18T20:02:33.385730 | 2019-10-17T16:21:41 | 2019-10-17T16:21:41 | 215,828,444 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 825 | r | starup_find.R | getwd()
setwd("C:\\Users\\cgt\\Desktop\\startup")
library(sqldf)
library(stringr)
library(DescTools)
startup <- read.xlsx("Dados_startups.xlsx")
pipe <- read.xlsx("CGEE empresas_PIPE - resposta.xlsx")
startup_nome <- subset(startup, select = c(nome))
pipe_nome <- subset(pipe, select = c(Nome.da.Empresa))
names(pipe_nome) <- c("Nome_empresa")
startup_nome <- as.tibble(startup_nome)
pipe_nome <- as.tibble(pipe_nome)
startup_nome$nome <- str_to_lower(startup_nome$nome)
pipe_nome$Nome_empresa <- str_to_lower(pipe_nome$Nome_empresa)
pipe_nm <- word(pipe_nome$Nome_empresa, sep = fixed(" "))
df_result <- pipe_nome[pipe_nm %in% c(startup_nome$nome),]
#esta <- sqldf("select * from pipe_nome where Nome_empresa LIKE '%startup_nome$nome%'")
df_result <- startup_nome[startup_nome$nome %in% c(pipe_nome$Nome_empresa),]
|
c6d7c779dff6b4fd19acdc8da4c74261e74be702 | 2a58920a4906cff62c0ed80a10d23e3aacc485cb | /finance/tp1/TP1.R | 2db29f1f5bfd5584a7854afd9960993fabe78eb0 | [] | no_license | kensekense/unige-spring-2020 | 38c21962d11442697166955b56ac8f110827e29c | 3ca6b52133803f88f77e825cc6eb026db13878a6 | refs/heads/master | 2022-09-06T08:33:59.202914 | 2020-05-23T19:46:07 | 2020-05-23T19:46:07 | 238,730,992 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,154 | r | TP1.R | #define a function to calculate returns on a vector
mret <- function(x) {
sol <- c() #create a list to hold monthly returns
i <- 1
for(val in x){
if(i == 1){
ret <- 0 #first monthly return is 0
sol <- c(sol, ret)
}
#calc the return and append to list
ret <- (x[i]-x[i-1])/x[i-1]
sol <- c(sol, ret)
#increase index
i <- i+1
}
#return the solution
return(sol)
}
#define a function to annualize a vector of monthly returns
aret <- function(x) {
ra <- 1
i <- 1
for (val in x){
ra <- ra*(1+x[i])
i <- i+1 #NOTE! don't forget to increment!
}
ra <- ra - 1
return(ra)
}
#calculate and print the value
x <- seq(100,112)
x <- mret(x)
print(x)
#annualize the monthly return and print
y <- x
y <- aret(y)
print(y)
#compare to the sum
print(sum(x))
#compare the average monthly return vs average of the monthly returns
z <- seq(1,12)
print(z/12)
print(mean(x))
#expected value of x
print(2*0.4*0.2*0.6*0.2+3*0.2*0.6*0.4*0.2*0.4*0.4+2*0.6*0.2*0.2*0.4+1*0.6*0.4)
#var(x|y=0)
x <- c(0.2,0.1,0.0,0.3)
print(var(x))
|
103812c731b7a597a76c558e9ffd295647f3f5c9 | a06da6adbad285ae75e75666323e33bdd0f02de4 | /aux_output_scripts/OLD/global_vs_local_model_eval.r | f163c63b1f7d43184c1dda3e76fc640176b51395 | [] | no_license | lkaiser7/IS_V2 | 5c3ab012576a935444b854300703ba382e80f942 | ef6d498f644db615dee11af580ea62a96b54bf0b | refs/heads/master | 2023-06-16T23:10:38.880561 | 2023-05-30T16:41:35 | 2023-05-30T16:41:35 | 37,231,310 | 0 | 1 | null | 2022-10-27T18:02:47 | 2015-06-11T01:04:33 | R | UTF-8 | R | false | false | 8,595 | r | global_vs_local_model_eval.r | #wd="D:/projects/Invasives_modeling/results/xfirst round of results/main_results/"
setwd(rootDir)
dir.create("combined_results/model_eval_metric/", showWarnings = F, recursive = T)
#eval_metrics=c("TSS", "ROC", "KAPPA") #
eval_stat = eval_stats[1]
for (eval_stat in eval_stats){ #global_notHI regional_HI nested_HI
global_notHI_eval_df=read.csv(paste0("global_notHI_models/outputs/all_eval_mat_",eval_stat,".csv"))
regional_HI_eval_df=read.csv(paste0("regional_HI_models/outputs/all_eval_mat_",eval_stat,".csv"))
calculate_eval_mean_eval_metric=function(metric_df, model_eval_metric="MAXENT.Phillips"){
metric_df_maxent_mean=metric_df[metric_df$rownames.Spp_eval.==model_eval_metric, ]
metric_df_maxent_mean=metric_df_maxent_mean[,-2]
jnk=apply(metric_df_maxent_mean[,-1], 1, mean, na.rm=T)
mean_eval_metric=data.frame(species=metric_df_maxent_mean[, 1], eval_stat=jnk)
return(mean_eval_metric)
}
mean_global_notHI_maxent_eval=calculate_eval_mean_eval_metric(global_notHI_eval_df)
mean_regional_HI_maxent_eval=calculate_eval_mean_eval_metric(regional_HI_eval_df)
mean_global_notHI_GBM_eval=calculate_eval_mean_eval_metric(global_notHI_eval_df, model_eval_metric="GBM")
mean_regional_HI_GBM_eval=calculate_eval_mean_eval_metric(regional_HI_eval_df, model_eval_metric="GBM")
mean_maxent_eval_df=merge(mean_global_notHI_maxent_eval, mean_regional_HI_maxent_eval, by="species")
mean_GBM_eval_df=merge(mean_global_notHI_GBM_eval, mean_regional_HI_GBM_eval, by="species")
names(mean_maxent_eval_df)=c("Species", "Global", "Regional")
names(mean_GBM_eval_df)=c("Species", "Global", "Regional")
mean_allmodels_eval_df=merge(mean_maxent_eval_df, mean_GBM_eval_df, by="Species")
mean_allmodels_eval_df$Global=apply(mean_allmodels_eval_df[,c("Global.x", "Global.y")], 1, FUN=mean, na.rm=T)
mean_allmodels_eval_df$Regional=apply(mean_allmodels_eval_df[,c("Regional.x", "Regional.y")], 1, FUN=mean, na.rm=T)
mean_allmodels_eval_df=mean_allmodels_eval_df[,c("Species", "Global", "Regional")]
#View(mean_maxent_eval_df)
file_name=paste0("combined_results/model_eval_metric/eval_metric_comparison_maxent_", eval_stat, ".csv")
write.csv(mean_maxent_eval_df, file_name, row.names = F)
file_name=paste0("combined_results/model_eval_metric/eval_metric_comparison_GBM_", eval_stat, ".csv")
write.csv(mean_GBM_eval_df, file_name, row.names = F)
file_name=paste0("combined_results/model_eval_metric/eval_metric_comparison_allModels_", eval_stat, ".csv")
write.csv(mean_allmodels_eval_df, file_name, row.names = F)
plot(mean_maxent_eval_df$Global, mean_maxent_eval_df$Regional)
plot(mean_GBM_eval_df$Global, mean_GBM_eval_df$Regional)
geom.text.size = 2
theme.size = (14/5) * geom.text.size
library(ggplot2)
a=ggplot(mean_maxent_eval_df, aes(x=Global, y=Regional)) +
geom_point(aes(size=1.25)) +
geom_text(label=mean_maxent_eval_df$Species, nudge_x = 0.0, nudge_y = 0.015, size=geom.text.size)+
theme(legend.position="none")+geom_smooth(method = "lm", se = TRUE)+xlab("Global model skill")+ylab("Regional model skill")
a
tiff_name=paste0("combined_results/model_eval_metric/eval_metric_comparison_maxent_", eval_stat, ".tiff")
ggsave(filename = tiff_name, plot = a, width = 6, height = 4, units = "in", compress="lzw")
library(ggplot2)
a=ggplot(mean_GBM_eval_df, aes(x=Global, y=Regional)) +
geom_point(aes(size=1.25)) +
geom_text(label=mean_GBM_eval_df$Species, nudge_x = 0.0, nudge_y = 0.015, size=geom.text.size)+
theme(legend.position="none")+geom_smooth(method = "lm", se = TRUE)+xlab("Global model skill")+ylab("Regional model skill")
a
tiff_name=paste0("combined_results/model_eval_metric/eval_metric_comparison_GBM_", eval_stat, ".tiff")
ggsave(filename = tiff_name, plot = a, width = 6, height = 4, units = "in", compress="lzw")
a=ggplot(mean_allmodels_eval_df, aes(x=Global, y=Regional)) +
geom_point(aes(size=1.25)) +
geom_text(label=mean_maxent_eval_df$Species, nudge_x = 0.0, nudge_y = 0.015, size=geom.text.size)+
theme(legend.position="none")+geom_smooth(method = "lm", se = TRUE)+xlab("Global model skill")+ylab("Regional model skill")
a
tiff_name=paste0("combined_results/model_eval_metric/eval_metric_comparison_allModels_", eval_stat, ".tiff")
ggsave(filename = tiff_name, plot = a, width = 6, height = 4, units = "in", compress="lzw")
######################
#now compare match in model skill and variable importance
#mean_allmodels_eval_df$skill_deviation=abs(mean_allmodels_eval_df$Global-mean_allmodels_eval_df$Regional)
mean_allmodels_eval_df$skill_deviation=(mean_allmodels_eval_df$Global-mean_allmodels_eval_df$Regional)^2
species_var_imp_deviations_df=read.csv("combined_results/mean_VariImp_plots/mean_deviation_in_global_vs_regional_variable_importance.csv")
#View(species_var_imp_deviations_df)
skill_vs_varImp= merge(mean_allmodels_eval_df, species_var_imp_deviations_df, by.x="Species", by.y="species")
#View(skill_vs_varImp)
#names(skill_vs_varImp)
cor(skill_vs_varImp$skill_deviation, skill_vs_varImp$varImp_deviation)
a=ggplot(skill_vs_varImp, aes(x=varImp_deviation, y=skill_deviation)) +
geom_point(aes(size=1.25)) +
geom_text(label=skill_vs_varImp$Species, nudge_x = 0.0, nudge_y = 0.015, size=geom.text.size)+
theme(legend.position="none")+xlab("Deviation between global and regional model variable importance")+
ylab("Deviation between global and regional model skill")+geom_smooth(method = "lm", se = TRUE)
a
tiff_name=paste0("combined_results/model_eval_metric/skill_vs_varImp_SSdeviation_", eval_stat, ".tiff")
ggsave(filename = tiff_name, plot = a, width = 6, height = 4, units = "in", compress="lzw")
}
#now compare metrics
if (length(eval_stats)==3){
for (eval_stat in eval_stats){
file_name=paste0("combined_results/model_eval_metric/eval_metric_comparison_maxent_", eval_stat, ".csv")
maxent_file=read.csv(file_name)
maxent_file$metric=eval_stat
assign(paste0("maxent_", eval_stat), maxent_file)
file_name=paste0("combined_results/model_eval_metric/eval_metric_comparison_GBM_", eval_stat, ".csv")
gbm_file=read.csv(file_name)
gbm_file$metric=eval_stat
assign(paste0("gbm_", eval_stat), gbm_file)
}
all_maxent_metrics=rbind(maxent_TSS, maxent_ROC, maxent_KAPPA)
all_gbm_metrics=rbind(gbm_TSS, gbm_ROC, gbm_KAPPA)
all_maxent_metrics$Species=gsub(pattern="_", replacement=" ", all_maxent_metrics$Species)
all_gbm_metrics$Species=gsub(pattern="_", replacement=" ", all_gbm_metrics$Species)
a=ggplot(data=all_maxent_metrics, aes(x=Species, y=Regional, fill=metric)) +
geom_bar(stat="identity", position=position_dodge()) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
theme(legend.title=element_blank()) + ylab("Regional maxent eval. metrics") +xlab("")
a
tiff_name=paste0("combined_results/model_eval_metric/regional_HI_maxent_eval_metric_comparison.tiff")
ggsave(filename = tiff_name, plot = a, width = 6, height = 4, units = "in", compress="lzw")
a=ggplot(data=all_maxent_metrics, aes(x=Species, y=Global, fill=metric)) +
geom_bar(stat="identity", position=position_dodge()) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
theme(legend.title=element_blank()) + ylab("Global maxent eval. metrics") +xlab("")
a
tiff_name=paste0("combined_results/model_eval_metric/global_notHI_maxent_eval_metric_comparison.tiff")
ggsave(filename = tiff_name, plot = a, width = 6, height = 4, units = "in", compress="lzw")
a=ggplot(data=all_gbm_metrics, aes(x=Species, y=Regional, fill=metric)) +
geom_bar(stat="identity", position=position_dodge()) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
theme(legend.title=element_blank()) + ylab("Regional gbm eval. metrics") +xlab("")
a
tiff_name=paste0("combined_results/model_eval_metric/regional_HI_gbm_eval_metric_comparison.tiff")
ggsave(filename = tiff_name, plot = a, width = 6, height = 4, units = "in", compress="lzw")
a=ggplot(data=all_gbm_metrics, aes(x=Species, y=Global, fill=metric)) +
geom_bar(stat="identity", position=position_dodge()) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1)) +
theme(legend.title=element_blank()) + ylab("Global gbm eval. metrics") +xlab("")
a
tiff_name=paste0("combined_results/model_eval_metric/global_notHI_gbm_eval_metric_comparison.tiff")
ggsave(filename = tiff_name, plot = a, width = 6, height = 4, units = "in", compress="lzw")
}
|
b51df35c228e4ed7a2729bea3689923c02da9779 | e4be7f8bf65953c73b3b699a81a7d8db749e3e60 | /R/glmtools.R | b7d3788d89223b819e315fd8de5413e64957666d | [
"MIT"
] | permissive | FLARE-forecast/FLAREr | 0833470f9b8e744b4a0782c1a1b50a6a403534d1 | 0c1215688ea80eb7886f3ffe2d847b766b3fb6d0 | refs/heads/master | 2023-08-04T23:43:47.057220 | 2023-08-04T13:34:43 | 2023-08-04T13:34:43 | 292,376,842 | 4 | 8 | MIT | 2023-04-25T13:32:56 | 2020-09-02T19:32:21 | R | UTF-8 | R | false | false | 16,577 | r | glmtools.R | #All functions are from GLMtools package
#We moved them here because GLMtools package
#was not keeping up with R updates
#'@title read in a GLM simulation *.nml file
#'@description
#'read in a GLM simulation *.nml file and create a list. \cr
#'
#'
#'@param nml_file a string with the path to the GLM glm2.nml file, or
#'\code{'template'} for loading the GLM template nml file with GLMr (default)
#'@return glm_nml a nml (a list) for GLM config
#'@keywords methods
#'@author
#'Jordan S. Read
#'@seealso \link{get_nml_value}
#'@examples
#'glm_nml <- read_nml()
#'print(glm_nml)
#'@noRd
read_nml <- function(nml_file = 'template'){
#nml_file <- nml_path_norm(nml_file)
if (!ascii_only(nml_file)){
stop('non-ASCII characters found in nml file on line ', what_ascii(nml_file))
}
# skip all commented lines, return all variables and associated values
# requires NO return line variables (all variables must be completely defined on a single line)
c <- file(nml_file,"r")
fileLines <- readLines(c)
close(c)
lineStart <- substr(fileLines,1,1)
# ignore comment lines or empty lines
ignoreLn <- lineStart=='!' | fileLines==""
lineStart <- lineStart[!ignoreLn]
fileLines <- fileLines[!ignoreLn]
# find all lines which start with "&" * requires FIRST char to be value
lineIdx <- seq(1,length(lineStart))
blckOpen <- lineIdx[lineStart=="&"]
blckClse <- lineIdx[lineStart=="/"]
nml <- list()
for (i in seq_len(length(blckOpen))){
blckName <- substr(fileLines[blckOpen[i]],
2, nchar(fileLines[blckOpen[i]]))
blckName <- gsub("\\s", "", blckName)
oldNms <- names(nml)
nml[[i]] <- list()
names(nml) <- c(oldNms,blckName)
carryover <- ''
for (j in (blckOpen[i]+1):(blckClse[i]-1)){
textLine <- paste(carryover,
gsub("\t", "", gsub(" ", "", fileLines[j])), sep = '')
if(substr(textLine, 1, 1) != '!'){
# Add a check here, sometimes, if there is a hanging comma,
#and only sometimes that means add next row
if(substr(textLine, nchar(textLine), nchar(textLine)) == ',' &&
j+1 <= length(fileLines) &&
!any(grep("=", fileLines[j + 1])) &&
!any(grep("/", fileLines[j + 1]))){
carryover = textLine
next
}else{
carryover = ''
}
# else, line is commented out
lineVal <- buildVal(textLine, lineNum = j, blckName)
if(names(lineVal) %in% c("start","stop")){
lineVal[1] <- paste0(substring(lineVal[1],1,10)," ",substring(lineVal[1],11,15))
}
nml[[i]] <- c(nml[[i]], lineVal)
}
}
}
nml <- .nml(nml)
return(nml)
}
# private function
buildVal <- function(textLine, lineNum, blckName){
#-----function appends nml list with new values-----
# remove all text after comment string
textLine <- strsplit(textLine,'!')[[1]][1]
if (!any(grep("=", textLine))){
stop(c("no hanging lines allowed in .nml, used ",textLine,'.\nSee line number:',lineNum,' in "&',blckName,'" section.'))
}
params <- strsplit(textLine,"=") # break text at "="
parNm <- params[[1]][1]
parVl <- params[[1]][2]
# figure out what parval is...if string, remove quotes and keep as string
# ***for boolean text, use "indentical" so that 0!= FALSE
# can be: string, number, comma-sep-numbers, or boolean
# special case for date:
if (is.na(parVl)){
stop('Empty values after "', textLine, '" on line ', lineNum,
'. \nPerhaps the values are on the next line?', call. = FALSE)
}
if (nchar(parVl>17) & substr(parVl,14,14)==':' & substr(parVl,17,17)==':'){
parVl<-paste(c(substr(parVl,1,11),' ',substr(parVl,12,nchar(parVl))),collapse='')
}
if (any(grep("'",parVl))){
parVl <- gsub("'","",parVl)
}else if (any(grep("\"",parVl))){
parVl <- gsub("\"","",parVl)
}else if (isTRUE(grepl(".true.",parVl) || grepl(".false.",parVl))){
logicals <- unlist(strsplit(parVl,","))
parVl <- from.glm_boolean(logicals)
}else if (any(grep(",",parVl))){ # comma-sep-nums
parVl <- c(as.numeric(unlist(strsplit(parVl,","))))
}else { # test for number
parVl <- as.numeric(parVl)
}
lineVal <- list(parVl)
names(lineVal) <- parNm
return(lineVal)
}
#' go from glm2.nml logical vectors to R logicals
#'
#' @param values a vector of strings containing either .false. or .true.
#' @return a logical vector
#' @keywords internal
#' @noRd
from.glm_boolean <- function(values){
logicals <- sapply(values, FUN = function(x){
if (!isTRUE(grepl(".true.", x) || grepl(".false.", x))){
stop(x, ' is not a .true. or .false.; conversion to TRUE or FALSE failed.',
call. = FALSE)
}
return(ifelse(isTRUE(grepl(".true.", x)), TRUE, FALSE))
})
return(as.logical(logicals))
}
to.glm_boolean <- function(values){
val.logical <- values
values[val.logical] <- '.true.'
values[!val.logical] <- '.false.'
return(values)
}
# private function
findBlck <- function(nml,argName){
# test for argName being a string
if (!is.character(argName)){stop(c("parameter name must be a string"))}
fau <- " "
fault.string <- rep(fau,1000) # names fault matrix, only returned when empty match
blockNames <- names(nml)
blckI <- c()
for (i in seq_len(length(blockNames))){
if (any(argName %in% names(nml[[i]]))){
blckI <- c(blckI,i)
} else {
one.i <- which(fault.string==fau)[1]
fault.string[one.i:(one.i+length(names(nml[[i]]))-1)]=names(nml[[i]])
}
}
fault.string <- fault.string[!fault.string==fau] # is empty if found
# test to see if a block match was made
if (is.null(blckI)){stop(c("parameter name ",argName," not found in nml. Possible names:",paste(fault.string,collapse=', ')))}
return(blckI)
}
# private function
setnmlList <- function(glm_nml,arg_list){
if (!is.list(arg_list)){stop("arg_list must be a list")}
if (any(nchar(names(arg_list)) == 0) | length(names(arg_list)) == 0){
stop('arg_list must be a named list')
}
arg_names <- names(arg_list)
for (i in seq_len(length(arg_names))){
glm_nml <- set_nml(glm_nml,arg_name=arg_names[i],arg_val=arg_list[[i]])
}
return(glm_nml)
}
# private function
#' @importFrom utils tail
is_nml_file <- function(nml_file){
is_nml <- FALSE
fl_ext <- tail(strsplit(nml_file, "\\.")[[1]],1)
if (fl_ext == 'nml'){
is_nml <- TRUE
}
return(is_nml)
}
#' @importFrom utils capture.output
what_ascii <- function(file){
response <- capture.output(tools::showNonASCIIfile(file))
return(response)
}
ascii_only <- function(file){
response <- what_ascii(file)
if (length(response) > 0){
return(FALSE)
} else {
return(TRUE)
}
}
get_block <- function(glm_nml, arg_name, warn=TRUE){
arg_split = strsplit(arg_name,'::')[[1]]
if (length(arg_split) > 1){
blck = arg_split[1]
arg_name = get_arg_name(arg_name)
} else{
blck <- findBlck(glm_nml,arg_name)
}
if (length(blck) > 1){
if (warn)
warning(arg_name, " found in ", paste(names(glm_nml[blck]), collapse=' & '), ", returning the first. Try ",names(glm_nml[blck])[1],"::",arg_name, " for explicit match")
blck = blck[1]
}
return(blck)
}
get_arg_name <- function(arg_name){
arg_split = strsplit(arg_name,'::')[[1]]
if (length(arg_split) > 1){
blck = arg_split[1]
arg_name = arg_split[2]
}
return(arg_name)
}
.nml <- function(list_obj){
nml <- list_obj
class(nml) <- "nml"
invisible(nml)
}
write_nml <- function(glm_nml,file){
sink(file)
print(glm_nml)
sink()
}
nml_path_norm <- function(nml_file){
if (nml_file == "template"){
nml_file <- nml_template_path()
}
if (!is_nml_file(nml_file)){
stop(nml_file, ' is not of file type *.nml')
}
return(nml_file)
}
nml_template_path <- function(){
return(system.file('sim_files/glm3.nml'))
}
print.nml <- function(x, ...){
glm_nml <- x
for (i in seq_len(length(names(glm_nml)))){ # these are the blocks
blckNm <- names(glm_nml)[i]
cat("&")
cat(blckNm)
cat('\n')
blckList <- glm_nml[[i]]
for (j in seq_len(length(names(blckList)))){
cat(' ')
cat(names(blckList)[j])
cat(' = ')
if (length(blckList[[j]])>1){
if (is.logical(blckList[[j]])){
charText <- to.glm_boolean(blckList[[j]])
} else {
charText <- c(blckList[[j]])
}
writer <- paste(charText,collapse=', ')
} else if (is.character(blckList[[j]])) {
charText <- strsplit(blckList[[j]],',')
writer <- paste(c("'",paste(c(charText[[1]]),collapse="','"),"'"),collapse='')
} else if (is.logical(blckList[[j]])){
writer <- to.glm_boolean(blckList[[j]])
} else {
writer <- blckList[[j]]
}
cat(writer)
cat('\n')
}
cat('/\n')
}
}
#'@noRd
summary.nml <- function(object,...){
print(object,...)
}
#'@title get surface height from GLM simulation
#'@description
#'Creates a data.frame with DateTime and surface_height. \cr
#'
#'
#'@param file a string with the path to the netcdf output from GLM
#'@param ice.rm a boolean for including ice thickness in surface height
#'@param snow.rm a boolean for including snow depth thickness in surface height
#'@param ... additional arguments passed to \code{\link{resample_sim}}
#'@return a data.frame with DateTime and surface_height (in meters)
#'@keywords methods
#'@author
#'Jordan S. Read, Luke A. Winslow
#'@examples
#'sim_folder <- run_example_sim(verbose = FALSE)
#'nc_file <- file.path(sim_folder, 'output.nc')
#'surface <- get_surface_height(file = nc_file)
#'surface_w_ice <- get_surface_height(file = nc_file, ice.rm = FALSE, snow.rm = FALSE)
#'@importFrom ncdf4 ncvar_get
#'@noRd
get_surface_height <- function(file = 'output.nc', ice.rm = TRUE, snow.rm = TRUE, ...){
glm_nc <- get_glm_nc(file)
NS <- ncvar_get(glm_nc, "NS")
elev <- ncvar_get(glm_nc, "z")
time <- get_time(glm_nc)
close_glm_nc(glm_nc)
surface_height <- vector(mode = "numeric",length = length(NS))
for (j in seq_len(length(NS))){
surface_height[j] <- elev[NS[j],j]
}
if (!ice.rm){
surface_height <- surface_height + get_ice(file, snow.rm = TRUE)[, 2]
}
if (!snow.rm){
snow <- get_ice(file, snow.rm = TRUE)[, 2] - get_ice(file, snow.rm = TRUE)[, 2]
surface_height <- surface_height + snow
}
glm_surface <- data.frame('DateTime'=time, 'surface_height'=surface_height)
glm_surface <- resample_sim(df = glm_surface, ...)
return(glm_surface)
}
#' @importFrom ncdf4 nc_open
get_glm_nc <- function(file){
if(length(file) < 1 || is.na(file)){
stop('glm_nc file must be supplied string or proper file handle')
}
glm_nc <- nc_open(file, readunlim=TRUE)
return(glm_nc)
}
#' @importFrom ncdf4 nc_close
close_glm_nc <- function(glm_nc){
nc_close(glm_nc)
}
# Summary: Returns the converted time vector in R format
#' @importFrom ncdf4 ncvar_get
get_time <- function(glm_nc){
hours_since <- ncvar_get(glm_nc, "time")
time_info <- get_time_info(glm_nc)
time <- time_info$startDate + time_info$time_unit * hours_since * 60*60*24
return(time)
}
#' @importFrom ncdf4 ncatt_get ncvar_get
get_time_info <- function(glm_nc, file = NULL){
day_secs = 86400
time_unit <- 3600/day_secs
close_nc <- FALSE #flag if we should close nc in this function
#The units attribute on the time variable has basically the info we need
if (missing(glm_nc)){
glm_nc <- get_glm_nc(file)
close_nc <- TRUE
}
time_units <- ncatt_get(glm_nc,'time','units')$value
#It is written in prose instead of machine-readable format. Check to makes sure
# it says "hours since ", then we know the timestep is hours. As far as I know,
# this never changes
tiCheck <- regexpr('(hours since) (.*)' ,time_units, perl=TRUE)
#make sure the unit string is as expected. I think
# the timestep is always in hours
if(attr(tiCheck,'capture.start')[1] < 0 || attr(tiCheck,'capture.start')[2] < 0){
stop('Unexpected time unit in NetCDF file')
}
# Get the epoch from the unit string
epoch <- substr(time_units, attr(tiCheck,'capture.start')[2], attr(tiCheck,'capture.start')[2] + attr(tiCheck,'capture.length')[2])
#get the length of the time data, will use this later
tLen <- glm_nc$dim[["time"]][["len"]]
time_info <- data.frame("time_unit"=time_unit)
start_date <- coerce_date(epoch)
time_info <- cbind(time_info,"startDate"=start_date)
#End date/time
endT <- time_info$startDate + ncvar_get(glm_nc, 'time', start=tLen, count=1) * time_unit * day_secs
time_info <- cbind(time_info,"stopDate"=endT[1])
if (close_nc){
close_glm_nc(glm_nc)
}
return(time_info)
}
coerce_date <- function(dates){
# for non-POSIX dates
if (!"POSIXct" %in% class(dates) || attr(dates,'tzone') == ""){
# strip off POSIXct zone and replace w/ GMT offset
dates <- as.POSIXct(as.character(dates), tz = get_UTM_offset())
}
return(dates)
}
get_UTM_offset <- function(){
# local date comparison for daylight savings. Uses this to find UTM offset, which will be used as tz for POSIXct
summer <- data.frame(NH = as.POSIXct("2011-06-01 12:00:00"), SH = as.POSIXct("2011-12-01 12:00:00"))
dst <- c(NA, FALSE, TRUE)[as.POSIXlt(c(summer[,1], summer[,2]))$isdst + 2]
use_i <- which(!dst)[1]
UTM <- data.frame(NH = as.POSIXct("2011-06-01 12:00:00",tz = "GMT"), SH = as.POSIXct("2011-12-01 12:00:00", tz = "GMT"))
if (length(use_i) == 0 | is.na(use_i)){ return("")}
UTM_dif <- as.numeric(summer[,use_i] - UTM[,use_i]) # in hours
sym <- ifelse(UTM_dif < 0, '-','+')
tz <- paste0("Etc/GMT",sym, as.character(UTM_dif))
return(tz)
}
#'@title get subset of time from a generic timeseries data.frame
#'@description
#'resamples the input data.frame to only have rows corresponding to matches between
#'df$DateTime and t_out. Both df$DateTime and t_out are of type POSIXct, and the
#'precision of the match is passed in through the \code{precision} argument.
#'\emph{The order of t_out}, not df$DateTime is retained.
#'
#'@param df a data.frame with DateTime and potentially other columns
#'@param t_out a vector of POSIXct dates (or character array that can be coerced into POSIXct)
#'for matching to df$DateTime
#'@param method 'match' for exact match or 'interp' for temporal interpolation
#'@param precision matching precision (must be 'secs', 'mins','hours', 'days', or 'exact').
#'@return a data.frame with DateTime other original columns, resampled according to t_out
#'@keywords methods
#'@seealso \link{get_temp}, \link{get_wind}, \link{get_surface_height}, \link{get_evaporation}, \link{get_ice}
#'@author
#'Jordan S. Read
#'@examples
#'sim_folder <- run_example_sim(verbose = FALSE)
#'nc_file <- file.path(sim_folder, 'output.nc')
#'temp_surf <- get_temp(nc_file, reference = 'surface', z_out = c(0,1,2))
#'t_out <- as.POSIXct(c("2011-04-01", "2011-06-14", "2011-04-05", "2011-07-28"))
#'temp_out <- resample_sim(df = temp_surf, t_out = t_out)
#'
#'t_out <- c("2011-04-01 10:00", "2011-04-05 08:15",
#' "2011-06-14 10:30", "2011-04-05 10:21",
#' "2011-07-28 10:00")
#'temp_out <- resample_sim(df = temp_surf, t_out = t_out, precision = 'days')
#'
#'temp_out <- resample_sim(df = temp_surf, t_out = t_out, method = 'interp', precision = 'hours')
#'@noRd
resample_sim <- function(df, t_out, method = 'match', precision = 'days'){
if (missing(t_out)){
t_out = NULL
}
if (is.null(t_out)){
return(df)
}
if (length(unique(t_out)) != length(t_out)){stop('t_out values must be unique')}
t_out <- coerce_date(t_out)
if (!(method %in% c("match", "interp"))){
stop(paste0('method ', method, ' not currently supported'))
}
# wish this could be vectorized, but we need to retain the order of *t_out*, not df
if (precision != 'exact'){
time <- time_precision(t_out, precision)
} else {
time <- t_out
}
if (method == 'interp'){
df <- df_interp(df, time)
time_compr <- df$DateTime
} else {
time_compr <- time_precision(df$DateTime, precision)
}
idx_out <- vector(length = length(time))
for (j in seq_len(length(time))){
m_i <- which(time[j] - time_compr == 0) #funny, match doesn't work (lt vs ct types)
idx_out[j] = ifelse(length(m_i)==0,NA,m_i)
}
idx_out <- idx_out[!is.na(idx_out)]
df_out <- df[idx_out, ]
if (nrow(df_out) == 0){
add_msg = ''
if (method == 'match'){
add_msg = ". Try method = 'interp'"
}
warning(paste0("no matches found using method = '",method,"' at ",precision,' precision',add_msg))
}
return(df_out)
}
|
9694cdf16c1800b95bc60c348181fe1c5ec8dc2a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ALSM/examples/CastleBakery.Rd.R | 0af554a5de3919735186aadb5ec802c506487d07 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 205 | r | CastleBakery.Rd.R | library(ALSM)
### Name: CastleBakery
### Title: Castle Bakery
### Aliases: CastleBakery
### Keywords: datasets
### ** Examples
data(CastleBakery)
## maybe str(CastleBakery) ; plot(CastleBakery) ...
|
0b56af0e75f50aaeec78dc9a55e4cdb11b652dad | 8a7035200119a192e32a1839d55bbe6a1c117d5b | /man/dixon.outliers.Rd | 60219273fd58af22ed36d57e417afd1d75097ad2 | [] | no_license | cran/referenceIntervals | 24a87fd2fa204c0c5583939ee947c8c264a23a3b | 71bb25c6e9c860bd02a73e7802ae436fe9cff7b7 | refs/heads/master | 2022-04-30T13:38:34.513179 | 2022-04-28T21:00:02 | 2022-04-28T21:00:02 | 17,699,107 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,268 | rd | dixon.outliers.Rd | \name{dixon.outliers}
\alias{dixon.outliers}
\title{
Determines outliers using Dixon's Q Test method
}
\description{
This determines outliers of the dataset by calculating Dixon's Q statistic and comparing
it to a standardized table of statistics. This method can only determine outliers for
datasets of size 3 <= n <= 30. This function requires the outliers package.
}
\usage{
dixon.outliers(data)
}
\arguments{
\item{data}{A vector of data points.
}
}
\value{
Returns a list containing a vector of outliers and a vector of the cleaned data (subset).
\item{outliers}{ A vector of outliers from the data set
}
\item{subset}{ A vector containing the remaining data, cleaned of outliers
}
}
\references{
Statistical treatment for rejection of deviant values: critical values of Dixon's "Q"
parameter and related subrange ratios at the 95% confidence level. Anal. Chem., 1991, 63
(2), pp 139-146
DOI: 10.1021/ac00002a010. Publication Date: January 1991
One-sided and Two-sided Critical Values for Dixon's Outlier Test for Sample Sizes up to
n = 30. Economic Quality Control, Vol 23(2008), No. 1, 5-13.
}
\author{
Daniel Finnegan
}
\examples{
dixon.outliers(set20)
summary(dixon.outliers(set20)$subset)
}
\keyword{ ~Dixon }
\keyword{ ~outlier }
|
5539a4b65e6bde89abee052d2cb664754d2f1fb4 | 35bada16c07a9c00921ca07fa3557c35cf851ddf | /MONTECARLO.R | 3f4eeae0db5d50c8389c97fe41e41f6d912ebfee | [] | no_license | aguilarfabiola/estadistica-computacional | 0bb54e11543710fc3fa067aa84ff6a341f51532b | c244ce0eb1794c81d84d2efe8ee3cb312f5f3ff7 | refs/heads/main | 2023-07-22T12:03:22.194848 | 2021-09-06T23:30:46 | 2021-09-06T23:30:46 | 403,782,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 726 | r | MONTECARLO.R | # FUNCIÓN DE DENSIDAD DE DISTRIBUCIÓN ERLANG
erlang <- function(x){
(1.5*((1.5*x)^2)*exp(-1.5*x))/2
}
# GENERACIÓN DE NÚMEROS PSEUDOALEATORIOS
u <- runif(100000000,0,1)
# CALCULAR PRIMER MOMENTO
numbs <- c()
m1 <- function(u){
h <- -((u^(-1))-1)*erlang((u^(-1))-1)*(-u^-2)
numbs <- c(numbs, h)
}
primer <- sum(m1(u))/length(u)
# CALCULAR SEGUNDO MOMENTO
numbs1 <- c()
m2 <- function(u){
h1 <- -(((u^(-1))-1)^(2))*erlang((u^(-1))-1)*(-u^-2)
numbs1 <- c(numbs1, h1)
}
segundo <- sum(m2(u))/length(u)
# VARIANZA APROXIMADA
var_aprox <- segundo - (primer)^2
var_aprox
# VARIANZA EXACTA
var_exacta <- 3/((1.5)^2)
var_exacta
# DIFERENCIA DE VARIANZAS
var_exacta - var_aprox
|
1da7dfc46c5a12ecae6b84d64bc91d39a4602bd7 | 9600ab5e6ba7b8666596bbacddef1577d184fa9b | /R/mda_helpers.R | 7c260d948e13ea9c593ab2db8b37a5860ee7a4cf | [
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | ldecicco-USGS/mda.streams | c22635a9d202c10098b4db3cd3e5fd8f5cc70fac | 9ec6bdba32369a12e2fb03844d67256bf03e6785 | refs/heads/master | 2021-01-21T23:20:40.311104 | 2015-03-05T20:13:10 | 2015-03-05T20:13:10 | 27,842,879 | 0 | 0 | null | 2015-03-05T18:42:22 | 2014-12-10T22:29:48 | R | UTF-8 | R | false | false | 612 | r | mda_helpers.R | #'@title create timeseries pointer
#'@param variable a timeseries name (e.g., \code{wtr})
#'@return a timseries string
#'@export
make_ts_variable <- function(variable){
ts_prefix <- get_ts_prefix()
ts_variable = paste0(ts_prefix, variable)
return(ts_variable)
}
get_ts_prefix <- function(){
ts_prefix = 'ts_'
return(ts_prefix)
}
get_ts_extension <- function(){
ts_extension = 'tsv'
return(ts_extension)
}
get_ts_delim <- function(){
return('\t')
}
get_title <- function(id, session = NULL){
item_json <- item_get(id, session = session)
title = item_json[['title']]
return(title)
} |
3d0516b475178daf0990ee374e4c3bf87db59bab | c7004852f749639bca087ce00a8020c6dc357349 | /DataMiningHW4/hierarchial_clustering.R | 16aeb8ab9fa7fa28e05b4bc3b55d539d3185dc73 | [] | no_license | kevinaloys/DataMining | ead69ea8e00de78c5809ccce95114511a417886b | 5207a34dc0294f0b9cfca6b93fcfb2771e81d243 | refs/heads/master | 2021-01-25T08:29:56.334447 | 2014-12-09T21:00:01 | 2014-12-09T21:00:01 | 26,798,446 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 848 | r | hierarchial_clustering.R | # Hierarchial Clustering
# Kevin Aloysius
set.seed(123)
# Loading the data
character <- read.table("az-5000.txt", header = TRUE)
# Removing the first column
char <- character[,-1]
# Applying kmeans
fit <- vector()
for (i in 2:26)
{
output <- kmeans(char, centers = i, iter.max = 26)
}
# Hierarchial Clustering
fit <- hclust(a <- dist(output$centers, method = "euclidean"), method="average")
plot(fit)
# 26x26 Matrix Mapping, Letters vs Cluster numbers
letter_matrixrix <- character[,1]
num_cluster <- output$cluster
matrix <- matrix(0,26,26)
rownames(matrix) <- LETTERS
for(k in 1:5000)
{
matrix[letter_matrix[k], num_cluster[k]] <- matrix[letter_matrix[k], num_cluster[k]] + 1
}
# Replacing Values of Dendograms with Letters
common <- c()
for(i in 1:26)
{
common[i] <- which.max(matrix[,i])
}
plot(fit, labels=LETTERS[common]) |
dc540c028968d8b552ee2c4595d4ef953251be46 | 28e03d9d35bbc437b720ebe0bd3e45b86760ec19 | /r5.R | 6288d68ac7762c65d2ad3f99861f2776bcfe9346 | [] | no_license | VinayKumar552-coder/assignment- | 0975229393571813b836ee2c66ba225457bb02ac | 1d0b4e0e568f15cf5fc78007826ec55af5e65df5 | refs/heads/master | 2020-06-18T23:25:56.363142 | 2019-07-14T22:02:07 | 2019-07-14T22:02:07 | 196,491,654 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 170 | r | r5.R | MyData <- read.csv(file="/Users/vinaykumarkobbanna/Desktop/yearly_sales.csv",header= TRUE, sep=",")
view(MyData)
boxplot(cust_id,sales_total)
boxplot(sales_total,cust_id) |
9822f9ee7e2f6f6f18a18724ba47a81442ebf962 | 476a565b1974d9cb63112d763d76b16f1ec69142 | /Previous scripts/Foxrabies/A1_Reproject_rabies_data.r | e8b75b9f35a146727076c9cfdb7b58683d700425 | [] | no_license | ce4-peru/covid19 | ee42fc2c375eb398d394ac34820c5a0023864209 | 07e3d6cad692f1ddcb59d5b55fa02c9ec40f0a65 | refs/heads/master | 2021-05-20T09:10:00.719490 | 2020-05-10T04:38:44 | 2020-05-10T04:38:44 | 252,215,739 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,444 | r | A1_Reproject_rabies_data.r | ##'**MRES PROJECT**
##'**FOX RABIES RETREAT IN WESTERN EUROPE**
##'**MICAELA DE LA PUENTE L.**
##'**This script shows reprojection of the data**
### TRANSFORM IT TO A DATA FRAME ###
######################################################
# FIRST SET WORKING DIRECTORY IN RSTUDIO
# i.e. Session > Set Working Directory > To Source File Location
rm(list=ls())
######################################################
library(foreign)
library(maptools)
library(sp)
library(rgdal)
library(rworldmap)
require(rgeos)
######################################################
## Transform it in R
rabies.data.single <-readOGR("data/Incidence/singlepart/Rabigramm_positive_single.shp",
"Rabigramm_positive_single") # file created in QGIS by Katie.
rabies.data.single.df <- as.data.frame(rabies.data.single)
str(rabies.data.single.df)
# 'data.frame': 248066 obs. of 11 variables:
# coords.x1 and coords.x2 and the individual coords per rabies case.
# x and y and the groupal coords for the reports.
### REPROJECT THE DATA ####
## Look for possible projections
EPSG <- make_EPSG() # object with all possible projections
EPSG[grep("Europe", EPSG$note), ] # search for projections assiciated with a word.
# we keep "+proj=lcc +lat_1=35 +lat_2=65 +lat_0=52 +lon_0=10 +x_0=4000000 +y_0=2800000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
# CODE EPSG=5639, ISN2004 / LCC Europe
## Set longitude and latitude
longitude <- rabies.data.single.df$coords.x1
max(longitude); min(longitude)
#[1] 7221107
#[1] 2508469
latitude <- rabies.data.single.df$coords.x2
max(latitude); min(latitude)
#[1] 5076308
#[1] 1088140
## Define the coordinate systems
latlon_CRS <- CRS("+proj=longlat +ellps=WGS84")
## Call latlon_CRS
latlon_CRS
# Output: CRS arguments: +proj=longlat +ellps=WGS84
## Create spatial points
d.rabies <- SpatialPoints(cbind(x=rabies.data.single.df$coords.x1,
y=rabies.data.single.df$coords.x2))
d.rabies # features : 248066
## Plot the spatial points
plot(d.rabies)
## Create an object with the new projection for the spatial points
rabies_CRS <- CRS("+proj=lcc +lat_1=35 +lat_2=65 +lat_0=52 +lon_0=10 +x_0=4000000 +y_0=2800000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
## Convert the spatial points to a new object with the new projection
proj4string(d.rabies) <- rabies_CRS
d.rabies.new <- spTransform(d.rabies, latlon_CRS)
d.rabies.new
### Plot the new spatial points object over a low resolution map to confirm location
newmap <- getMap(resolution = "low")
plot(newmap)
points(d.rabies.new, col="red", cex=0.5)
# The location looks correct.
### CREATE A SPATIAL POINTS DATA FRAME OBJECT ###
rabies.data.spdf <- SpatialPointsDataFrame(d.rabies.new, rabies.data.single.df)
rabies.data <- as.data.frame(rabies.data.spdf)
str(rabies.data)
# 248066 obs. of 13 variables, including x.1 and y.1 (the new coordinates).
######################################################
# WHEN EXPORTING CREATE AND SAVE TO OUTPUT DIRECTORY - WILL HELP LATER!:
######################################################
### EXPORT DATA FRAMES (csv and shp) OBJECTS ###
#write.csv(rabies.data, file = "output/rabies.data.projected.csv")
#writeOGR(rabies.data.spdf, ".", "output/rabies.data.projected", driver="ESRI Shapefile")
## Confirm they work
#rabies <-readOGR("rabies.data.projected_.shp","rabies.data.projected_")
#rabies.data.single.df <- as.data.frame(rabies)
|
da7f248339a1297b4c4969a6a5c63cc4d419af8d | 1bbd922a9e81341c9f81cfba4aa48664aeaa9a95 | /R/selwik.R | 466ce9f9e0c1b9e56a8cce233fe3c169016febc8 | [] | no_license | mlesnoff/rnirs | b2519dee12788132107542c4c097611a73c1b995 | 1398d746df67f0f6d80063366db969998522dc04 | refs/heads/master | 2023-04-15T22:15:33.045477 | 2023-04-07T13:59:18 | 2023-04-07T13:59:18 | 208,553,347 | 18 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,501 | r | selwik.R | selwik <- function(
X, Y, ncomp,
algo = NULL, weights = NULL,
nperm = 50, seed = NULL,
print = TRUE,
...
) {
X <- .matrix(X)
zdim <- dim(X)
n <- zdim[1]
zp <- zdim[2]
Y <- .matrix(Y, row = FALSE, prefix.colnam = "y")
q <- dim(Y)[2]
if(is.null(algo))
algo <- pls_kernel
if(is.null(weights))
weights <- rep(1 / n, n)
else
weights <- weights / sum(weights)
xmeans <- .xmean(X, weights = weights)
X <- .center(X, xmeans)
ymeans <- .xmean(Y, weights = weights)
Y <- .center(Y, ymeans)
pval <- stat <- numeric()
set.seed(seed = seed)
for(a in seq_len(ncomp)) {
if(print)
cat(a, " ")
fm <- algo(X, Y, ncomp = 1, ...)
## Observed covariance
## stat0 <- c(cov(Y, fm$T))
## For PLSR2
stat0 <- sum(cov(Y, fm$T))
for(i in seq_len(nperm)) {
zY <- Y[sample(1:n), ]
zfm <- algo(X, zY, ncomp = 1, ...)
## H0 covariance
## stat[i] <- c(cov(zY, zfm$T))
## For PLSR2
stat[i] <- sum(cov(zY, zfm$T))
}
pval[a] <- sum(stat0 < stat) / nperm
X <- X - tcrossprod(fm$T, fm$P)
Y <- Y - tcrossprod(fm$T, fm$C)
}
set.seed(seed = NULL)
if(print)
cat("\n\n")
list(ncomp = seq_len(ncomp), pval = pval)
}
|
e6699f9f43e2c918cd46b4a8527c252c993398cc | aaedcedaf17ee3f3c5ec159044342c3112233048 | /GettingData.R | 386e7a156b52b30fc1685947b1e4f2421f819ba2 | [] | no_license | marciorbe/Testes | cbe92405d9e0515e92ae5734072ff631898809a0 | e24cf685dcae17784d5580d90318ccaadc73300a | refs/heads/master | 2021-01-01T06:44:38.927823 | 2014-10-21T20:59:42 | 2014-10-21T20:59:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,637 | r | GettingData.R | ## Create directorie
if (!file.exists("data")){
dir.create("data")
}
destfile <- "./data/cameras.csv"
## Downloading file
fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
download.file( fileUrl, destfile = destfile, method = "curl" )
list.files("./data")
## Reading local flat files
dateDownloaded <- date()
cameraData <- read.table(destfile) ## Error
head(cameraData) ## Error
##
cameraData <- read.table(destfile,sep=",",header=TRUE)
head(cameraData)
##
cameraData <- read.csv(destfile)
head(cameraData)
## Reading Excel files
fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.xlsx?accessType=DOWNLOAD"
download.file(fileUrl,destfile="./data/cameras.xlsx",method="curl")
dateDownloaded <- date()
library(xlsx)
cameraData <- read.xlsx("./data/cameras.xlsx",sheetIndex=1,header=TRUE)
head(cameraData)
colIndex <- 2:3
rowIndex <- 1:4
cameraDataSubset <- read.xlsx("./data/cameras.xlsx",sheetIndex=1,
colIndex=colIndex,rowIndex=rowIndex)
cameraDataSubset
## Reading XML files
##Read the file into R
library(XML)
fileUrl <- "http://www.w3schools.com/xml/simple.xml"
doc <- xmlTreeParse(fileUrl,useInternal=TRUE)
rootNode <- xmlRoot(doc)
xmlName(rootNode)
names(rootNode)
## Directly access parts of the XML document
rootNode[[1]]
rootNode[[1]][[1]]
## Programatically extract parts of the file
xmlSApply(rootNode,xmlValue)
## Programatically extract parts of the file
xmlSApply(rootNode,xmlValue)
## Get the items on the menu and prices
xpathSApply(rootNode,"//name",xmlValue)
xpathSApply(rootNode,"//price",xmlValue)
## Extract content by attributes
fileUrl <- "http://espn.go.com/nfl/team/_/name/bal/baltimore-ravens"
doc <- htmlTreeParse(fileUrl,useInternal=TRUE)
scores <- xpathSApply(doc,"//li[@class='score']",xmlValue)
teams <- xpathSApply(doc,"//li[@class='team-name']",xmlValue)
scores
teams
## Reading JSON
### Reading data from JSON {jsonlite package}
library(jsonlite)
jsonData <- fromJSON("https://api.github.com/users/jtleek/repos")
names(jsonData)
### Nested objects in JSON
names(jsonData$owner)
jsonData$owner$login
### Writing data frames to JSON
myjson <- toJSON(iris, pretty=TRUE)
cat(myjson)
### Convert back to JSON
iris2 <- fromJSON(myjson)
head(iris2)
## Using data.table
### Create data tables just like data frames
library(data.table)
DF = data.frame(x=rnorm(9),y=rep(c("a","b","c"),each=3),z=rnorm(9))
head(DF,3)
DT = data.table(x=rnorm(9),y=rep(c("a","b","c"),each=3),z=rnorm(9))
head(DT,3)
### See all the data tables in memory
tables()
### Subsetting rows
DT[2,]
DT[DT$y=="a",]
### Subsetting rows
DT[c(2,3)]
### Subsetting columns!?
DT[,c(2,3)] ## will not work!
### Column subsetting in data.table
#### The subsetting function is modified for data.table
#### The argument you pass after the comma is called an "expression"
#### In R an expression is a collection of statements enclosed in curley brackets
{
x = 1
y = 2
}
k = {print(10); 5}
## [1] 10
print(k)
## [1] 5
## Calculating values for variables with expressions
DT[,list(mean(x),sum(z))]
DT[,table(y)]
### Adding new columns
DT[,w:=z^2]
DT2 <- DT
DT[, y:= 2]
### Careful
head(DT,n=3)
head(DT2,n=3)
### Multiple operations
DT[,m:= {tmp <- (x+z); log2(tmp+5)}]
### plyr like operations
DT[,a:=x>0]
DT[,b:= mean(x+w),by=a]
### Special variables
#### .N An integer, length 1, containing the numbe r
set.seed(123);
DT <- data.table(x=sample(letters[1:3], 1E5, TRUE))
DT[, .N, by=x]
### Keys
DT <- data.table(x=rep(c("a","b","c"),each=100), y=rnorm(300))
setkey(DT, x)
DT['a']
### Joins
DT1 <- data.table(x=c('a', 'a', 'b', 'dt1'), y=1:4)
DT2 <- data.table(x=c('a', 'b', 'dt2'), z=5:7)
setkey(DT1, x); setkey(DT2, x)
merge(DT1, DT2)
### Fast reading
big_df <- data.frame(x=rnorm(1E6), y=rnorm(1E6))
file <- tempfile()
write.table(big_df, file=file, row.names=FALSE, col.names=TRUE, sep="\t", quote=FALSE)
system.time(fread(file))
system.time(read.table(file, header=TRUE, sep="\t"))
## Quiz 1
if (!file.exists("data")){
dir.create("data")
}
destfile <- "./data/data.csv"
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file( fileUrl, destfile = destfile, method = "curl" )
data <- read.table(destfile,sep=",",header=TRUE)
head(data)
## 01 - How many properties are worth $1,000,000 or more?
sum(data[["VAL"]]==24,na.rm=TRUE)
## 03
destfile <- "./data/ngap.xlsx"
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
download.file( fileUrl, destfile = destfile, method = "curl" )
colIndex <- 7:15
rowIndex <- 18:23
dat <- read.xlsx(destfile,sheetIndex=1,colIndex=colIndex,rowIndex=rowIndex)
sum(dat$Zip*dat$Ext,na.rm=T)
## 04
destfile <- "./data/restaurants.xml"
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
download.file( fileUrl, destfile = destfile, method = "curl" )
doc <- xmlTreeParse(fileUrl,useInternal=TRUE)
doc <- xmlTreeParse(destfile,useInternal=TRUE)
## 05
require(data.table)
dt <- fread(".data/micro.csv")
system.time({mean(DT[DT$SEX==1,]$pwgtp15); mean(DT[DT$SEX==2,]$pwgtp15)})
system.time(tapply(DT$pwgtp15,DT$SEX,mean))
system.time(mean(DT$pwgtp15,by=DT$SEX))
system.time(DT[,mean(pwgtp15),by=SEX])
system.time({rowMeans(DT)[DT$SEX==1]; rowMeans(DT)[DT$SEX==2]})
system.time(sapply(split(DT$pwgtp15,DT$SEX),mean))
## Reading mySQL
## Step 1 - Install MySQL
## Step 2 - Install RMySQL
## On a Mac: install.packages("RMySQL")
## On Windows:
## Official instructions - http://biostat.mc.vanderbilt.edu/wiki/Main/RMySQL (may be useful for Mac/UNIX users as well)
## Potentially useful guide - http://www.ahschulz.de/2013/07/23/installing-rmysql-under-windows/
## Connecting and listing databases
ucscDb <- dbConnect(MySQL(),user="genome", host="genome-mysql.cse.ucsc.edu")
result <- dbGetQuery(ucscDb,"show databases;");
dbDisconnect(ucscDb);
result
## Connecting to hg19 and listing tables
hg19 <- dbConnect(MySQL(),user="genome", db="hg19", host="genome-mysql.cse.ucsc.edu")
allTables <- dbListTables(hg19)
length(allTables)
allTables[1:5]
## Get dimensions of a specific table
dbListFields(hg19,"affyU133Plus2")
dbGetQuery(hg19, "select count(*) from affyU133Plus2")
## Read from the table
affyData <- dbReadTable(hg19, "affyU133Plus2")
head(affyData)
## Select a specific subset
query <- dbSendQuery(hg19, "select * from affyU133Plus2 where misMatches between 1 and 3")
affyMis <- fetch(query);
quantile(affyMis$misMatches)
affyMisSmall <- fetch(query,n=10); dbClearResult(query);
dim(affyMisSmall)
## Don't forget to close the connection!
dbDisconnect(hg19)
## Further resources
## RMySQL vignette http://cran.r-project.org/web/packages/RMySQL/RMySQL.pdf
## List of commands http://www.pantz.org/software/mysql/mysqlcommands.html
## Do not, do not, delete, add or join things from ensembl. Only select.
## In general be careful with mysql commands
## A nice blog post summarizing some other commands http://www.r-bloggers.com/mysql-and-r/
## Reading HDF5
## HDF5
## Used for storing large data sets
## Supports storing a range of data types
## Heirarchical data format
## groups containing zero or more data sets and metadata
## Have a group header with group name and list of attributes
## Have a group symbol table with a list of objects in group
## datasets multidmensional array of data elements with metadata
## Have a header with name, datatype, dataspace, and storage layout
## Have a data array with the data
## http://www.hdfgroup.org/
## R HDF5 package
source("http://bioconductor.org/biocLite.R")
biocLite("rhdf5")
library(rhdf5)
created = h5createFile("example.h5")
created
## This will install packages from Bioconductor http://bioconductor.org/, primarily used for genomics but also has good "big data" packages
## Can be used to interface with hdf5 data sets.
## This lecture is modeled very closely on the rhdf5 tutorial that can be found here http://www.bioconductor.org/packages/release/bioc/vignettes/rhdf5/inst/doc/rhdf5.pdf
## Create groups
created = h5createGroup("example.h5","foo")
created = h5createGroup("example.h5","baa")
created = h5createGroup("example.h5","foo/foobaa")
h5ls("example.h5")
## Write to groups
A = matrix(1:10,nr=5,nc=2)
h5write(A, "example.h5","foo/A")
B = array(seq(0.1,2.0,by=0.1),dim=c(5,2,2))
attr(B, "scale") <- "liter"
h5write(B, "example.h5","foo/foobaa/B")
h5ls("example.h5")
## Write a data set
df = data.frame(1L:5L,seq(0,1,length.out=5), c("ab","cde","fghi","a","s"), stringsAsFactors=FALSE)
h5write(df, "example.h5","df")
h5ls("example.h5")
## Reading data
readA = h5read("example.h5","foo/A")
readB = h5read("example.h5","foo/foobaa/B")
readdf= h5read("example.h5","df")
readA
## Writing and reading chunks
h5write(c(12,13,14),"example.h5","foo/A",index=list(1:3,1))
h5read("example.h5","foo/A")
## Notes and further resources
## hdf5 can be used to optimize reading/writing from disc in R
## The rhdf5 tutorial:
## http://www.bioconductor.org/packages/release/bioc/vignettes/rhdf5/inst/doc/rhdf5.pdf
## The HDF group has informaton on HDF5 in general http://www.hdfgroup.org/HDF5/
## Reading data from the web
## Webscraping
## Webscraping: Programatically extracting data from the HTML code of websites.
## It can be a great way to get data How Netflix reverse engineered Hollywood
## Many websites have information you may want to programaticaly read
## In some cases this is against the terms of service for the website
## Attempting to read too many pages too quickly can get your IP address blocked
## http://en.wikipedia.org/wiki/Web_scraping
## Getting data off webpages - readLines()
con = url("http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en")
htmlCode = readLines(con)
close(con)
htmlCode
## Parsing with XML
library(XML)
url <- "http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en"
html <- htmlTreeParse(url, useInternalNodes=T)
xpathSApply(html, "//title", xmlValue)
xpathSApply(html, "//td[@id='col-citedby']", xmlValue)
## GET from the httr package
library(httr);
html2 = GET(url)
content2 = content(html2,as="text")
parsedHtml = htmlParse(content2,asText=TRUE)
xpathSApply(parsedHtml, "//title", xmlValue)
## Accessing websites with passwords
pg1 = GET("http://httpbin.org/basic-auth/user/passwd")
pg1
## http://cran.r-project.org/web/packages/httr/httr.pdf
## Accessing websites with passwords
pg2 = GET("http://httpbin.org/basic-auth/user/passwd", authenticate("user","passwd"))
pg2
names(pg2)
## Using handles
google = handle("http://google.com")
pg1 = GET(handle=google,path="/")
pg2 = GET(handle=google,path="search")
## http://cran.r-project.org/web/packages/httr/httr.pdf
## Notes and further resources
## R Bloggers has a number of examples of web scraping http://www.r-bloggers.com/?s=Web+Scraping
## The httr help file has useful examples http://cran.r-project.org/web/packages/httr/httr.pdf
## See later lectures on APIs
## Reading data from APIs
## Accessing Twitter from R
myapp = oauth_app("twitter", key="yourConsumerKeyHere", secret="yourConsumerSecretHere")
sig = sign_oauth1.0( myapp, token = "yourTokenHere", token_secret = "yourTokenSecretHere")
homeTL = GET("https://api.twitter.com/1.1/statuses/home_timeline.json", sig)
## Converting the json object
json1 = content(homeTL)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1,1:4]
## Reading from other sources
## Quiz 2
Question 1
Register an application with the Github API here https://github.com/settings/applications.
Access the API to get information on your instructors repositories
(hint: this is the url you want "https://api.github.com/users/jtleek/repos").
Use this data to find the time that the datasharing repo was created.
What time was it created?
This tutorial may be useful (https://github.com/hadley/httr/blob/master/demo/oauth2-github.r).
You may also need to run the code in the base R package and not R studio.
token: c7297bca4d4ad3e2a9ab173130599242db0f1701
Client ID: bec7b43ad8481033bbc0
Client Secret: 3a013b8483a52af8423e83ad3a865dbbace56aaf
con = url("https://api.github.com/users/jtleek/repos")
code = readLines(con)
close(con)
library(httr)
oauth_endpoints("github")
myapp <- oauth_app("MyFirstApp","bec7b43ad8481033bbc0", "3a013b8483a52af8423e83ad3a865dbbace56aaf")
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/users/jtleek/repos", gtoken)
stop_for_status(req)
content(req)
library(httr)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. Register an application at https://github.com/settings/applications;
# Use any URL you would like for the homepage URL (http://github.com is fine)
# and http://localhost:1410 as the callback url
#
# Insert your client ID and secret below - if secret is omitted, it will
# look it up in the GITHUB_CONSUMER_SECRET environmental variable.
myapp <- oauth_app("github", "56b637a5baffac62cad9")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/rate_limit", gtoken)
stop_for_status(req)
content(req)
# OR:
req <- with_config(gtoken, GET("https://api.github.com/rate_limit"))
stop_for_status(req)
content(req)
Question 2
The sqldf package allows for execution of SQL commands on R data frames.
We will use the sqldf package to practice the queries we might send with the dbSendQuery command in RMySQL.
Download the American Community Survey data and load it into an R object called
acs
https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv
Which of the following commands will select only the data for the probability weights pwgtp1 with ages less than 50?
sqldf("select pwgtp1 from acs")
sqldf("select * from acs where AGEP < 50 and pwgtp1")
sqldf("select * from acs")
=> sqldf("select pwgtp1 from acs where AGEP < 50")
Question 3
Using the same data frame you created in the previous problem, what is the equivalent function to unique(acs$AGEP)
=> sqldf("select distinct AGEP from acs")
sqldf("select AGEP where unique from acs")
sqldf("select unique * from acs")
sqldf("select distinct pwgtp1 from acs")
Question 4
How many characters are in the 10th, 20th, 30th and 100th lines of HTML from this page:
http://biostat.jhsph.edu/~jleek/contact.html
(Hint: the nchar() function in R may be helpful)
=> 45 31 7 25
43 99 8 6
45 31 7 31
45 0 2 2
43 99 7 25
45 31 2 25
45 92 7 2
con = url("http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en")
code = readLines("http://biostat.jhsph.edu/~jleek/contact.html")
code[c(10,20,30,100)]
lapply(code[c(10,20,30,100)],nchar)
Question 5
Read this data set into R and report the sum of the numbers in the fourth of the nine columns.
https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for
Original source of the data: http://www.cpc.ncep.noaa.gov/data/indices/wksst8110.for
(Hint this is a fixed width file format)
36.5
35824.9
=>32426.7
28893.3
222243.1
101.83
data <- read.fwf(file="./data/q5.for", widths=c(10,9,4,9,4,9,4,9,4))
sum(data[,4])
destfile <- "./data/Q05.for"
## Downloading file
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for"
download.file( fileUrl, destfile = destfile, method = "curl" )
read.fwf(file="./data/q5.for", widths=c(10,9,4,9,4,9,4,9,4))
read.fwf(file="./data/q5.for", widths=c(10,9,4,9,4,9,4,9,4), header = FALSE, sep = "\t",
skip = 0, row.names, col.names, n = -1,
buffersize = 2000, fileEncoding = "", ...)
setwd("C:/Ferramentas/WorkSpaces/Coursera")
|
4386f647254fda59af4a43362fbf6c0788faede7 | 655ee959878fc9fa6f0ffdd7fb956f38936c2072 | /symbolicR/man/symbolic.gsub.Rd | 95edb1bb535477acb015278fd26fafffc15d4d2c | [] | no_license | isabella232/symbolicR | 29b1b28334f8889846156d8fd1effdbec6164e6d | 001707e9a380de37a8f7fe3d2a463cf047733109 | refs/heads/master | 2023-03-16T00:08:39.874076 | 2017-12-13T10:00:47 | 2017-12-13T10:00:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 601 | rd | symbolic.gsub.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/00symbolic.000general.R
\name{symbolic.gsub}
\alias{symbolic.gsub}
\title{symbolic.gsub}
\usage{
symbolic.gsub(pat, rep, e0)
}
\arguments{
\item{pat}{pattern expression}
\item{rep}{expression as a replacement}
\item{e0}{expression to be operated}
}
\value{
the replaced expression
}
\description{
recursively replace a pattern \code{pat} in an expression \code{e0} by an expression \code{rep} \cr
similar to \code{\link{symbolic.sub}} with exception that all the occurance of \code{pat} are replaced
}
\author{
jjxie
}
|
f419c3fa2369d33186984c9772373ed98f357dc2 | 36cc048d7e233bfc864fe3c91f99819ddab90595 | /app.R | 86d1dba63a38e29a47e37de4f27cac95eef82d86 | [] | no_license | PurityNyakundi/shiny | e987fe0f7cb42481cd2d52bb138a2d43ca9b6725 | e54e93aceab69069ffac0f5f1ef07592002d11c2 | refs/heads/master | 2023-02-28T19:41:36.894902 | 2021-02-12T12:29:02 | 2021-02-12T12:29:02 | 271,760,255 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 355 | r | app.R | library(shiny)
ui<-fluidPage(
wellPanel(sliderInput(inputId = "num",
label = "My label",
value=10,min = 1,max = 100),
plotOutput("hist")
))
server<-function(input,output){
output$hist<-renderPlot({
title = "histogram"
hist(rnorm(input$num),main = title,xlab = "histogram")
})
}
shinyApp(ui = ui,server = server) |
aee3109235544f0845de029166466c25ddac9ed8 | 417e6f1275e3ee4bfbd5bedfa94b5e5c3546a659 | /rscripts/performanceMetrics.R | 6aa8a40f718fadc017466400fd14c6f854ee6157 | [] | no_license | Delvis/GMML-Talus | c2cbdeaadf065364b10716c0071d72e7764f8816 | abf09273eb3ea67ef1b191e6864785f9a80c107e | refs/heads/master | 2021-01-18T02:58:35.036063 | 2016-01-18T13:33:54 | 2016-01-18T13:33:54 | 39,903,341 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,260 | r | performanceMetrics.R | # performanceMetrics computes simple metrics for binary classifiers evaluation.
#
# Metrics:
# Overall accuracy
# Kappa
# Sensitivity
# Predictive value
#
# Authors:
#
# João Coelho
# David Navega
#
# Laboratory of Forensic Anthropology
# Department of Life Sciences,
# University of Coimbra,
# Portugal
performanceMetrics <- function(ml.model){
library(caret)
metrics <- data.frame(Model = NA,
OverallAccuracy = NA,
SensitivityFemale = NA,
SensitivityMale = NA,
PredValFemale = NA,
PredValMale = NA)
for(i in 1:length(ml.model$model)){
cfMat <- confusionMatrix.train(ml.model$model[[i]])$table
mtr1 <- sum(diag(cfMat))/sum(cfMat) # Overall Accuracy
mtr2 <- diag(cfMat)[1]/sum(cfMat[,1]) # Sensitivity Female
mtr3 <- diag(cfMat)[2]/sum(cfMat[,2]) # Sensitivity Male
mtr4 <- diag(cfMat)[1]/sum(cfMat[1,]) # Predictive Value Female
mtr5 <- diag(cfMat)[2]/sum(cfMat[2,]) # Predictive Value Male
aux <- rbind(mtr1, mtr2, mtr3, mtr4, mtr5)
metrics[i,1] <- ml.model$model[[i]]$method
metrics[i,2:6] <- aux
}
return(metrics)
}
|
b89cbd85d9b3d497c9b2cf094d52404c5a2c0c98 | 0ecfd68e1413eefac556f14e4dfa541e528b5184 | /code/server-config.R | c11f4d49d68cd339918a6a66b03a86263f2d91a9 | [
"MIT"
] | permissive | HGray384/lrcfs-stats-reference-book | de1bd8a47c6362c59a89caaecfa69f6a2b322e74 | 3e3436f8e9182ad95f7052131a3458e210fe642a | refs/heads/master | 2023-06-04T11:29:11.445001 | 2021-06-22T12:28:58 | 2021-06-22T12:28:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 568 | r | server-config.R | #This is a server specific configuration file
#Update the SHINY_HOST property to work with development, test and production environments
SHINY_HOST = "http://127.0.0.1:5555" #development
#SHINY_HOST = "https://lrcfs.dundee.ac.uk/apps/interactive-lr" #Production Server
#SHINY_HOST = "https://lrcfs-shiny.test.dundee.ac.uk/apps/interactive-lr" #Test Server
#SHINY_HOST = "http://127.0.0.1:5555" #development #Example development path
#Base URL for interactive questions
QUESTIONS_HOST = "https://lrcfs.dundee.ac.uk/lr-book-questions/" |
fffc140e85cc99a583d18ca03d4a7899ac41f67f | 6f6f97554599532e8345d769f96c9b6e9d2cb943 | /httk/R/calc_analytic_css_pbtk.R | 5eb62e7460dcd4dbf61d66e72915e20c9c2c921c | [] | no_license | jrsfeir/CompTox-ExpoCast-httk | 37cbfa4142c261ed79d3141142a613a614b28d38 | bf2f6c300fe2cf0c538c16355c2ec437ca781c55 | refs/heads/main | 2023-06-26T16:56:17.010294 | 2021-05-10T14:53:13 | 2021-05-10T14:53:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,497 | r | calc_analytic_css_pbtk.R | #'Calculate the analytic steady state concentration for model pbtk.
#'
#'This function calculates the analytic steady state plasma or venous blood
#'concentrations as a result of infusion dosing.
#'
#'@param chem.name Either the chemical name, CAS number, or the parameters must
#' be specified.
#'@param chem.cas Either the chemical name, CAS number, or the parameters must
#' be specified.
#' @param dtxsid EPA's 'DSSTox Structure ID (\url{https://comptox.epa.gov/dashboard})
#' the chemical must be identified by either CAS, name, or DTXSIDs
#'@param parameters Chemical parameters from parameterize_pbtk (for model =
#' 'pbtk'), parameterize_3comp (for model = '3compartment),
#' parmeterize_1comp(for model = '1compartment') or parameterize_steadystate
#' (for model = '3compartmentss'), overrides chem.name and chem.cas.
#'@param hourly.dose Hourly dose rate mg/kg BW/h.
#'@param concentration Desired concentration type, 'blood', 'tissue', or default 'plasma'.
#'@param suppress.messages Whether or not the output message is suppressed.
#'@param recalc.blood2plasma Recalculates the ratio of the amount of chemical
#' in the blood to plasma using the input parameters. Use this if you have
#''altered hematocrit, Funbound.plasma, or Krbc2pu.
#'@param tissue Desired tissue conentration (defaults to whole body
#' concentration.)
#'@param restrictive.clearance If TRUE (default), then only the fraction of
#' chemical not bound to protein is available for metabolism in the liver. If
#' FALSE, then all chemical in the liver is metabolized (faster metabolism due
#' to rapid off-binding).
#'@param bioactive.free.invivo If FALSE (default), then the total concentration is treated
#' as bioactive in vivo. If TRUE, the the unbound (free) plasma concentration is treated as
#' bioactive in vivo. Only works with tissue = NULL in current implementation.
#'@param ... Additional parameters passed to parameterize function if
#' parameters is NULL.
#'
#'@return Steady state concentration in uM units
#'
#'@author Robert Pearce and John Wambaugh
#'@keywords pbtk
calc_analytic_css_pbtk <- function(chem.name=NULL,
chem.cas = NULL,
dtxsid = NULL,
parameters=NULL,
hourly.dose=1/24,
concentration='plasma',
suppress.messages=FALSE,
recalc.blood2plasma=FALSE,
tissue=NULL,
restrictive.clearance=TRUE,
bioactive.free.invivo = FALSE,
...)
{
#R CMD CHECK throws notes about "no visible binding for global variable", for
#each time a data.table column name is used without quotes. To appease R CMD
#CHECK, a variable has to be created for each of these column names and set to
#NULL. Note that within the data.table, these variables will not be NULL! Yes,
#this is pointless and annoying.
dose <- NULL
#End R CMD CHECK appeasement.
param.names.pbtk <- model.list[["pbtk"]]$param.names
param.names.schmitt <- model.list[["schmitt"]]$param.names
# We need to describe the chemical to be simulated one way or another:
if (is.null(chem.cas) &
is.null(chem.name) &
is.null(dtxsid) &
is.null(parameters))
stop('parameters, chem.name, chem.cas, or dtxsid must be specified.')
# Look up the chemical name/CAS, depending on what was provide:
if (is.null(parameters))
{
out <- get_chem_id(
chem.cas=chem.cas,
chem.name=chem.name,
dtxsid=dtxsid)
chem.cas <- out$chem.cas
chem.name <- out$chem.name
dtxsid <- out$dtxsid
parameters <- parameterize_pbtk(chem.cas=chem.cas,
chem.name=chem.name,
suppress.messages=suppress.messages,
...)
if (recalc.blood2plasma)
{
warning("Argument recalc.blood2plasma=TRUE ignored because parameters is NULL.")
}
} else {
if (!all(param.names.pbtk %in% names(parameters)))
{
stop(paste("Missing parameters:",
paste(param.names.pbtk[which(!param.names.pbtk %in% names(parameters))],
collapse=', '),
". Use parameters from parameterize_pbtk."))
}
if (recalc.blood2plasma) {
parameters[['Rblood2plasma']] <- 1 -
parameters[['hematocrit']] +
parameters[['hematocrit']] * parameters[['Krbc2pu']] * parameters[['Funbound.plasma']]
}
}
Qcardiac <- parameters[["Qcardiacc"]] / parameters[['BW']]^0.25
Qgfr <- parameters[["Qgfrc"]] / parameters[['BW']]^0.25
Clmetabolism <- parameters[["Clmetabolismc"]]
Kliver2pu <- parameters[['Kliver2pu']]
Qgut <- parameters[["Qgutf"]] * Qcardiac
Qliver <- parameters[["Qliverf"]] * Qcardiac
Qkidney <- parameters[['Qkidneyf']] * Qcardiac
Qrest <- Qcardiac-Qgut-Qliver-Qkidney
Rblood2plasma <- parameters[['Rblood2plasma']]
fup <- parameters[["Funbound.plasma"]]
if (!restrictive.clearance) Clmetabolism <- Clmetabolism / fup
hourly.dose <- hourly.dose * parameters$Fgutabs
# Calculate steady-state plasma Css:
Css <- (hourly.dose * (Qliver + Qgut) /
(fup * Clmetabolism / Rblood2plasma + (Qliver + Qgut))) /
(Qcardiac - (Qliver + Qgut)**2 /
(fup * Clmetabolism / Rblood2plasma + (Qliver + Qgut)) -
Qkidney**2 / (Qgfr * fup / Rblood2plasma + Qkidney) - Qrest)
# Check to see if a specific tissue was asked for:
if (!is.null(tissue))
{
# Need to convert to schmitt parameters:
#The parameters used in predict_partitioning_schmitt may be a compound
#data.table/data.frame or list object, however, depending on the source
#of the parameters. In calc_mc_css, for example, parameters is received
#as a "data.table" object. Screen for processing appropriately.
if (any(class(parameters) == "data.table")){
pcs <- predict_partitioning_schmitt(parameters =
parameters[, param.names.schmitt[param.names.schmitt %in%
names(parameters)], with = F])
}else if (class(parameters) == "list") {
pcs <- predict_partitioning_schmitt(parameters =
parameters[param.names.schmitt[param.names.schmitt %in%
names(parameters)]])
}else stop('httk is only configured to process parameters as objects of
class list or class compound data.table/data.frame.')
if (!paste0('K',tolower(tissue)) %in%
substr(names(pcs),1,nchar(names(pcs))-3))
{
stop(paste("Tissue",tissue,"is not available."))
}
# Tissues with sources (gut) or sinks (liver,kidney) need to be calculated
# taking the change of mass into account:
if (tissue == 'gut')
{
Qgut <- parameters$Qgutf * parameters$Qcardiacc / parameters$BW^0.25
Css <- parameters[['Kgut2pu']] * parameters[['Funbound.plasma']] *
(Css + dose / (Qgut * parameters[['Rblood2plasma']]))
} else if (tissue == 'liver') {
Qliver <- (parameters$Qgutf + parameters$Qliverf) * parameters$Qcardiacc /
parameters$BW^0.25
Clmetabolism <- parameters$Clmetabolismc
if (!restrictive.clearance) Clmetabolism <- Clmetabolism / fup
Css <- parameters[['Kliver2pu']] * fup * (hourly.dose +
Qliver * Css * Rblood2plasma) /
(Clmetabolism * fup + Qliver * Rblood2plasma)
} else if(tissue == 'kidney') {
Qkidney <- parameters$Qkidneyf * parameters$Qcardiacc / parameters$BW^0.25
Css <- parameters[['Kkidney2pu']] * fup * Qkidney * Css * Rblood2plasma /
(Qkidney * Rblood2plasma + parameters$Qgfrc * fup)
# All other tissues are proportional based on the partition coefficient:
} else {
Css <- Css * pcs[[names(pcs)[substr(names(pcs),2,nchar(names(pcs))-3)==tissue]]] * fup
}
}
if(tolower(concentration != "tissue")){
if (tolower(concentration)=='plasma')
{
Css <- Css
concentration <- "Plasma"
if(bioactive.free.invivo == T){
Css <- Css * parameters[['Funbound.plasma']]
}
} else if (tolower(concentration)=='blood')
{
Css <- Css * Rblood2plasma
concentration <- "Blood"
} else {
stop("Only blood and plasma concentrations are calculated.")
}
}
return(Css)
}
|
d073c777a04b29fdda5c66b843c0296eee046dda | 0f1b6a183d9df5061bca7a8ec089d0324adb8b1c | /cachematrix.R | 0c9762cbcd08269f16ef68fe53afa7b85d22a892 | [] | no_license | markcanete/ProgrammingAssignment2 | 1478f0aca804830d8392b552c5037c2bf4e7e6d8 | dcc0bc604f91587898e6070d203ee5c4ba779dee | refs/heads/master | 2021-08-23T07:30:05.188450 | 2017-12-04T04:23:24 | 2017-12-04T04:23:24 | 112,989,294 | 0 | 0 | null | 2017-12-04T03:19:17 | 2017-12-04T03:19:16 | null | UTF-8 | R | false | false | 1,150 | r | cachematrix.R | # This will Creates a matrix that can cache it's inverse, if the inverse has already been
#calculated before, the Cached inverse is return
#
#
# Returns:
# A matrix with functions to get/set value & get_matrix/set_matrix inverse
makeCacheMatrix <- function(x = matrix()) {
inverse_math <- NULL
##function get_matrix and set_matrix for matrix
get_matrix <- function() x
set_matrix <- function(y){
x <<- y
inverse_math <<- NULL
## function getinv for inverse of matrix
getinv <- function() inverse_math
setinv <- function(inverse) inverse_math <<- inverse
## return list of function for matrix
list(get_matrix=get_matrix, set_matrix = set_matrix, getinv=getinv,setinv=setinv)
}
## Computes the inverse of a matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse_math <- x$getinv()
if(!is.null(inverse_math)){
message("inverse is cached")
return(inverse_math)
}
m <- x$get_matrix()
inverse_math <- solve(m,...)
x$setinv(inverse_math)
return(inverse_math)
}
}
|
7f65390daf3ecf03676337590d8c265b9c6fc82f | 6b5c972cfc1f9e488a1444071f0640dd31351c64 | /R/pcRegression.R | 25752a2bd3f37805896becad3f928213550b7e3b | [] | no_license | chichaumiau/kBET | cbf04dfe5cb378946f8a95c6689675b89d65d493 | 429a7f82e29a83c4410a3eaadfeed24fbb369626 | refs/heads/master | 2021-01-20T05:02:05.333810 | 2017-03-03T09:19:48 | 2017-03-03T09:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,029 | r | pcRegression.R | #' pcRegression
#'
#' @description pcRegression does a linear model fit of principal components and a batch (categorical) variable
#' @param pca.data a list as created by 'prcomp', pcRegression needs $x: the principal components (PCs, correctly: the rotated data) and $sdev: the standard deviations of the PCs)
#' @param batch vector with the batch covariate (for each cell)
#' @param tol truncation threshold for significance level, default: 1e-16
#' @return List summarising principal component regression
#' \itemize{
#' \item maxVar: the variance explained by principal component(s) that correlate(s) most with the batch effect
#' \item PmaxVar: p-value (returned by linear model) for the respective principal components (related to maxVar)
#' \item R2Var: sum over Var(PC_i)*r2(PC_i and batch) for all i
#' \item ExplainedVar: explained variance for each PC
#' \item r2: detailed results of correlation (R-Square) analysis
#' }
#' @export
pcRegression <- function(pca.data, batch, tol=1e-16){
batch.levels <- unique(batch)
if(length(batch.levels)==2){
#r2.batch.raw <- r2.batch
correlate.fun <- function(rot.data, batch){
a <- lm(rot.data ~ batch)
result <- numeric(2)
result[1] <- summary(a)$r.squared #coefficient of determination
result[2] <- summary(a)$coefficients[2,4] #p-value (significance level)
t.test.result <- t.test(rot.data[batch==batch.levels[1]],
rot.data[batch==batch.levels[2]], paired = FALSE)
result[3] <- t.test.result$p.value
return(result)
}
# for-loop replaced by correlate.fun and apply
r2.batch <- apply(pca.data$x, 2, correlate.fun, batch)
r2.batch <- t(r2.batch)
colnames(r2.batch) <- c('R.squared', 'p.value.lm', 'p.value.t.test')
r2.batch[r2.batch[,2]<tol,2] <- tol
r2.batch[r2.batch[,3]<tol,3] <- tol
}else{
#r2.batch.raw <- r2.batch
correlate.fun <- function(rot.data, batch){
a <- lm(rot.data ~ batch)
result <- numeric(2)
result[1] <- summary(a)$r.squared #coefficient of determination
result[2] <- summary(a)$coefficients['batch',4] #p-value (significance level)
return(result)
}
r2.batch <- apply(pca.data$x, 2, correlate.fun, batch)
r2.batch <- t(r2.batch)
colnames(r2.batch) <- c('R.squared', 'p.value.lm')
# for-loop replaced by correlate.fun and apply
#for (k in 1:dim(r2.batch)[1]){
# a <- lm(pca.data$x[,k] ~ batch)
# r2.batch[k,1] <- summary(a)$r.squared #coefficient of determination
# r2.batch[k,2] <- summary(a)$coefficients['batch',4] #p-value (significance level)
#}
r2.batch[r2.batch[,2]<tol,2] <- tol
}
argmin <- which(r2.batch[,2]==min(r2.batch[,2]))
normal <- sum(pca.data$sdev^2)
var <- round((pca.data$sdev)^2 / normal *100,1)
batch.var <- sum(r2.batch[,1]*var)/100
result <- list()
result$maxVar <- var[argmin]
result$PmaxVar <- r2.batch[argmin,2]
result$R2Var <- batch.var
result$ExplainedVar <- var
result$r2 <- r2.batch
return(result)
}
|
27528ed1aa9b80cab5bc13daea1f9e1c5ef66077 | bd8cd68974cbe33217bf1ec07942dc38403f9aaa | /models/functions/fit_rf_plus_cart.R | 107bb3da78800074368b746ccfef7fec304fbe5e | [
"BSD-3-Clause"
] | permissive | nzanl/pilot_clientprofielen_wijkverpleging_2020 | 4c67a1d9d1c6baf61a51bafe4e77574bcedea640 | d70349fd6af7700475a1cf4a839966b08702e875 | refs/heads/master | 2023-01-02T14:38:28.088643 | 2020-10-16T09:08:41 | 2020-10-16T09:08:41 | 304,569,754 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 69 | r | fit_rf_plus_cart.R | # nog niet geimplementeerd
fit_rf_plus_cart <- function(){
}
|
8871888ff8e7a6ea21d14e89adff0aec52f46b8a | 85b01c6070393f012bdd4bf8d6bc36832375434a | /man/u133VsExon.Rd | 3d7d2bbe683f452521db4536264730c7757959f3 | [] | no_license | AEBilgrau/GMCM | 4f0aef2910389e18d461edc2a18a9f3e0e2eebc0 | dce017d7be16c9cdf26565f78162e7dbe0619c73 | refs/heads/master | 2021-11-24T08:48:22.631145 | 2021-11-04T22:28:46 | 2021-11-04T22:28:46 | 15,970,417 | 12 | 2 | null | 2020-01-27T19:25:32 | 2014-01-16T14:41:13 | R | UTF-8 | R | false | true | 1,952 | rd | u133VsExon.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GMCM-package.R
\docType{data}
\name{u133VsExon}
\alias{u133VsExon}
\title{Reproducibility between U133 plus 2 and Exon microarrays}
\format{
The format of the \code{data.frame} is:
\code{'data.frame': 19577 obs. of 2 variables:}\cr
\code{$ u133: num 0.17561 0.00178 0.005371 0.000669 0.655261 ...}\cr
\code{$ exon: num 1.07e-01 6.74e-10 1.51e-03 6.76e-05 3.36e-01 ...}\cr
}
\description{
This dataset contains a \code{data.frame} of unadjusted P-values for
differential expression between germinal center cells and other B-cells
within tonsils for two different experiments. The experiments differ
primarily in the microarray platform used. The first column corresponds the
evidence from the Affymetrix GeneChip Human Genome U133 Plus 2.0 Array.
The second column corresponds to the Affymetrix GeneChip Human Exon 1.0 ST
Array.
}
\details{
Further details can be found in Bergkvist et al. (2014) and
Rasmussen and Bilgrau et al. (2014).
}
\examples{
data(u133VsExon)
str(u133VsExon)
# Plot P-values
plot(u133VsExon, cex = 0.5)
# Plot ranked and scaled P-values
plot(Uhat(1-u133VsExon), cex = 0.5)
}
\references{
Bergkvist, Kim Steve, Mette Nyegaard, Martin Boegsted, Alexander Schmitz,
Julie Stoeve Boedker, Simon Mylius Rasmussen, Martin Perez-Andres et al.
(2014). "Validation and Implementation of a Method for Microarray Gene
Expression Profiling of Minor B-Cell Subpopulations in Man".
BMC immunology, 15(1), 3.
Rasmussen SM, Bilgrau AE, Schmitz A, Falgreen S, Bergkvist KS, Tramm AM,
Baech J, Jacobsen CL, Gaihede M, Kjeldsen MK, Boedker JS, Dybkaer K,
Boegsted M, Johnsen HE (2015). "Stable Phenotype Of B-Cell Subsets Following
Cryopreservation and Thawing of Normal Human Lymphocytes Stored in a Tissue
Biobank." Cytometry Part B: Clinical Cytometry, 88(1), 40-49.
}
\author{
Anders Ellern Bilgrau <anders.ellern.bilgrau@gmail.com>
}
\keyword{data}
|
150a1bc4ac801b1b6751a690161743c5cd8aaf88 | 6b915ba9db1de8d26bec39589c77fd5d225e1fce | /stanlba/singlelevelmodel/rl_rl_discrim_evaluate_fixed.R | 0fa3ac08b451ed9d025a6a77267cf4e947d8439c | [
"Apache-2.0"
] | permissive | bjsmith/reversallearning | c9179f8cbdfdbbd96405f603e63e4b13dfc233af | 023304731d41c3109bacbfd49d4c850a92353978 | refs/heads/master | 2021-07-09T23:42:22.550367 | 2018-12-08T01:40:54 | 2018-12-08T01:40:54 | 100,329,149 | 0 | 2 | null | 2017-09-05T04:27:41 | 2017-08-15T02:21:25 | R | UTF-8 | R | false | false | 12,761 | r | rl_rl_discrim_evaluate_fixed.R | source("stanlba/lba_rl_joint_setup.R")
source("stanlba/singlelevelmodel/single_level_model_summarize_fast.R")
source("stanlba/singlelevelmodel/lba_rl_joint_v2_evaluate_functions.R")
source("stanlba/singlelevelmodel/lba_rl_joint_v1_functions.R")
source("stanlba/singlelevelmodel/lba_rl_joint_v7_functions.R")
source("stanlba/singlelevelmodel/lba_rl_joint_v10_functions.R")
regions<-get_dmn_regions()
#what we really wanna do is extract the sigmas
Sigma_dims<-25
rpe_covarvec<-paste0("Sigma[",1:Sigma_dims[1],",",1,"]")
rpe_covarvec_df<-paste0("Sigma.",1:Sigma_dims[1],".",1,".")
library(rstan)
lba_rl_version<-"rl_joint_20180927_1"
DeltaThetaLabels=paste0("con_",get_dmn_regions())
library(rstan)
model_rl_joint_v9<-single_level_model_summarize_fast(
single_run_dir=paste0(localsettings$data.dir,"lba_rl"),
model_version=lba_rl_version,
model_filename="rl_single_exp_joint_v9",
model_subversion="")
#seems to be very patchy, timing out occasionally and more often, failing to fully converge.
model_rl_discrim_joint_v4<-single_level_model_summarize_fast(
single_run_dir=paste0(localsettings$data.dir,"lba_rl"),
model_version=lba_rl_version,
model_filename="rl_discrim_single_exp_joint_v4",
model_subversion="")
ggplot(model_rl_discrim_joint_v4$complete_posterior_list,
aes(alpha,beta,color=UniqueRunCode))+
geom_point(alpha=0.2)+labs(title="Posterior samples of alpha, beta")
ggplot(model_rl_discrim_joint_v4$complete_posterior_list,aes(alpha,beta))+
geom_point(alpha=0.2)+labs(title="Posterior samples of alpha, beta")+
coord_cartesian(ylim=c(0,200))
theta_names<-c("RPE","EV")
delta_names<-paste0("Delta",1:4)
heatmap(get_Sigma_m_n(model_rl_discrim_joint_v4$results_summary_dt,theta_names,delta_names))
#log likelihood comparison; this is not the main thing we're trying to look into
model_rl_discrim_joint_v4$results_summary_dt<-data.table(model_rl_discrim_joint_v4$results_summary)
#model_rl_discrim_joint_v4$results_summary_dt[param_name=="alpha_discrim_pr",.N,by=sid]
mean(model_rl_discrim_joint_v4$results_summary_dt[param_name=="log_lik"]$mean)
mean(model_rl_joint_v9$results_summary_dt[param_name=="log_lik"]$mean)
#so can we look at just runs that we got across both modes to try to compare?
model_rl_discrim_joint_v4$results_summary_dt[,FullRunName:=paste0("s",sid,"r",rid,"m",motivation)]
model_rl_joint_v9$results_summary_dt[,FullRunName:=paste0("s",sid,"r",rid,"m",motivation)]
runs_in_common<-intersect(
unique(model_rl_discrim_joint_v4$results_summary_dt$FullRunName),
unique(model_rl_joint_v9$results_summary_dt$FullRunName))
log_lik_comparison<-merge(
model_rl_discrim_joint_v4$results_summary_dt[FullRunName %in% runs_in_common & param_name=="log_lik",.(FullRunName,mean)],
model_rl_joint_v9$results_summary_dt[FullRunName %in% runs_in_common & param_name=="log_lik",.(FullRunName,mean)],
by="FullRunName",suffixes = c("_discrim_joint_v4","_joint_v9"))
hist(log_lik_comparison$mean_discrim_joint_v4-log_lik_comparison$mean_joint_v9)
t.test(log_lik_comparison$mean_discrim_joint_v4-log_lik_comparison$mean_joint_v9)
#now do a comparison by run.
#what do the distributions of the EVs and RPEs look like?
ggplot(model_rl_joint_v9$results_summary_dt[grep("trial_expected_val",param_name),],aes(mean))+
geom_histogram(binwidth = 0.02)+facet_grid(sid~rid+motivation,scales = "free")
ggplot(model_rl_joint_v9$results_summary_dt[grep("run_pred_err_c2",param_name),],aes(mean))+
geom_histogram(binwidth = 0.02)+facet_grid(sid~rid+motivation,scales = "free")
ggplot(model_rl_joint_v9$results_summary_dt[param_name=="alpha",],aes(mean))+
geom_histogram(binwidth = 0.02)+facet_grid(sid~rid+motivation,scales = "free")
#including this here just to look at the distribution of EVs and RPEs for this
model_lba_rl_joint_v13<-single_level_model_summarize_fast(
single_run_dir=paste0(localsettings$data.dir,"lba_rl"),
model_version="joint_20180723_1",
model_filename="lba_rl_single_exp_joint_v13",
model_subversion="")
ggplot(model_lba_rl_joint_v13$results_summary_dt[grep("trial_expected_val",param_name),],aes(mean))+
geom_histogram(binwidth = 0.02)+facet_grid(sid~rid+motivation,scales = "free")
ggplot(model_lba_rl_joint_v13$results_summary_dt[grep("run_pred_err_c2",param_name),],aes(mean))+
geom_histogram(binwidth = 0.02)+facet_grid(sid~rid+motivation,scales = "free")
#brandon's suggestion: try using hte absolute values:
#center distribution by run and parameter so that we get a reasonable estimate of what
#the model actually has to work with...
model_lba_rl_joint_v13$results_summary_dt[,mean_c:=mean-mean(mean),.(sid,rid,motivation,param_name)]
model_lba_rl_joint_v13$results_summary_dt$abs_mean_c<-abs(model_lba_rl_joint_v13$results_summary_dt$mean_c)
model_lba_rl_joint_v13$results_summary_dt$abs_mean<-abs(model_lba_rl_joint_v13$results_summary_dt$mean)
ggplot(model_lba_rl_joint_v13$results_summary_dt[grep("trial_expected_val",param_name),],aes(abs_mean))+
geom_histogram(binwidth = 0.02)+facet_wrap(~sid+rid+motivation,scales = "free")
ggplot(model_lba_rl_joint_v13$results_summary_dt[grep("run_pred_err_c2",param_name),],aes(abs_mean))+
geom_histogram(binwidth = 0.02)+facet_wrap(~sid+rid+motivation,scales = "free")
ggplot(model_lba_rl_joint_v13$results_summary_dt[grep("trial_expected_val",param_name),],aes(mean))+
geom_histogram(binwidth = 0.02)+facet_wrap(~sid+rid+motivation,scales = "free")+labs(title="Expected Value")
ggplot(model_lba_rl_joint_v13$results_summary_dt[grep("run_pred_err_c2",param_name),],aes(mean))+
geom_histogram(binwidth = 0.02)+facet_wrap(~sid+rid+motivation,scales = "free")+labs(title="Reward Prediction Error")
source("stanlba/singlelevelmodel/empirical_correlations_estimate.R")
neural_trial_data<-merge_with_neural_data(model_lba_rl_joint_v13$results_summary_dt)
#Alright, so now, let's try grabbing the neural data and running a correlation ourselves.
ggplot(neural_trial_data[(sid %% 5)==0 & run_pred_err_c2>0],aes(run_pred_err_c2,con_ROI_Left.Accumbens.area,color=as.factor(presentation_n)))+geom_point()+
facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="L Accumbens Activity by RPE for each run", x="RPE",color="Presentation Number")
ggplot(neural_trial_data[(sid %% 5)==0],aes(trial_expected_val,con_ROI_Left.Accumbens.area,color=as.factor(presentation_n)))+geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="L Accumbens Activity by EV for each run", x="EV",color="Presentation Number")
ggplot(neural_trial_data[(sid %% 5)==0],aes(run_pred_err_c2,con_fsl_roi_accumbens_r,color=as.factor(presentation_n)))+geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="R Accumbens Activity by abs RPE for each run", x="EV",color="Presentation Number")
#insula activity is what we expect to relate to absolute RPE....
ggplot(neural_trial_data[(sid %% 5)==0],aes(run_pred_err_c2_abs,ROI_ctx_lh_S_circular_insula_ant,color=as.factor(presentation_n)))+geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="insula Activity by abs RPE for each run", x="absRPE",color="Presentation Number")
ggplot(neural_trial_data[(sid %% 5)==0],aes(run_pred_err_c2_abs,ROI_ctx_rh_S_circular_insula_ant,color=as.factor(presentation_n)))+geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="insula Activity by abs RPE for each run", x="absRPE",color="Presentation Number")
ggplot(neural_trial_data[(sid %% 5)==0],
aes(presentation_n_in_segment,con_ROI_Left.Accumbens.area,color=as.factor(presentation_n)))+
geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="L Accumbens Activity by presentation_n_in_segment for each run", x="RPE",color="Presentation Number")
ggplot(neural_trial_data[(sid %% 5)==0],
aes(presentation_n_in_segment,con_ROI_Right.Accumbens.area,color=as.factor(presentation_n)))+
geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="R Accumbens Activity by presentation_n_in_segment for each run", x="PNIS",color="Presentation Number")
ggplot(neural_trial_data[(sid %% 5)==0],
aes(presentation_n_in_segment,ROI_ctx_lh_S_circular_insula_ant,color=as.factor(presentation_n)))+
geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="R Accumbens Activity by presentation_n_in_segment for each run", x="PNIS",color="Presentation Number")
ggplot(neural_trial_data[(sid %% 5)==0],
aes(presentation_n,con_fsl_roi_accumbens_l))+
geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="R Accumbens Activity by presentation_n_in_segment for each run", x="PNIS",color="Presentation Number")
ggplot(neural_trial_data[(sid %% 5)==0],
aes(presentation_n,con_fsl_roi_accumbens_r))+
geom_point()+facet_wrap(~interaction(sid,rid,motivation),scales = "free")+
labs(title="R Accumbens Activity by presentation_n_in_segment for each run", x="PNIS",color="Presentation Number")
ggplot(neural_trial_data,aes(run_pred_err_c2,con_ROI_Right.Accumbens.area))+geom_point()+facet_wrap(~sid+rid+motivation,scales = "free")
ggplot(neural_trial_data,aes(trial_expected_val,con_ROI_Right.Accumbens.area))+geom_point()+facet_wrap(~sid+rid+motivation,scales = "free")
ggplot(neural_trial_data,aes(run_pred_err_c2_abs,con_ROI_Left.Accumbens.area))+geom_point()+facet_wrap(~sid+rid+motivation,scales = "free")
ggplot(neural_trial_data,aes(run_pred_err_c2_abs,con_ROI_Left.Accumbens.area))+geom_point()+facet_wrap(~sid+rid+motivation,scales = "free")
ggplot(neural_trial_data,aes(trial_expected_val_abs,con_ROI_Left.Accumbens.area))+geom_point()+facet_wrap(~sid+rid+motivation,scales = "free")
ggplot(neural_trial_data,aes(run_pred_err_c2_abs,con_ROI_Right.Accumbens.area))+geom_point()+facet_wrap(~sid+rid+motivation,scales = "free")
ggplot(neural_trial_data,aes(trial_expected_val_abs,con_ROI_Right.Accumbens.area))+geom_point()+facet_wrap(~sid+rid+motivation,scales = "free")
summary(lm(trial_expected_val_abs~con_ROI_Left.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(trial_expected_val_abs~con_ROI_Right.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(run_pred_err_c2_abs~con_ROI_Left.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(run_pred_err_c2_abs~con_ROI_Right.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(presentation_n_in_segment~con_ROI_Left.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(presentation_n_in_segment~con_ROI_Right.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(trial_expected_val~con_ROI_Left.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(trial_expected_val~con_ROI_Right.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(run_pred_err_c2_abs~con_ROI_Left.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(run_pred_err_c2_abs~con_ROI_Right.Accumbens.area+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(trial_expected_val~con_ROI_ctx_lh_S_suborbital+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(trial_expected_val~con_ROI_ctx_rh_S_suborbital+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(run_pred_err_c2_abs~con_ROI_ctx_lh_S_suborbital+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
summary(lm(run_pred_err_c2_abs~con_ROI_ctx_rh_S_suborbital+as.factor(sid)*as.factor(rid)*motivation,neural_trial_data))
model_lba_rl_joint_v12<-single_level_model_summarize_fast(
single_run_dir=paste0(localsettings$data.dir,"lba_rl"),
model_version="joint_20180725_1",
model_filename="lba_rl_single_exp_joint_v12",
model_subversion="")
# model_lba_rl_joint_v12$results_summary_dt[param_name=="alpha",]
# dim(model_lba_rl_joint_v12$complete_posterior_list)
# colnames(model_lba_rl_joint_v12$complete_posterior_list)[4600:4612]
ggplot(model_lba_rl_joint_v12$complete_posterior_list,aes(alpha,p,color=UniqueRunCode))+geom_point(alpha=0.2)+labs(title="Posterior samples of p, alpha,\nacross 13 runs")
#model_lba_rl_joint_v13$results_summary_dt[param_name=="alpha",]
hist(model_lba_rl_joint_v12$results_summary_dt[param_name=="alpha",mean])
hist(model_lba_rl_joint_v13$results_summary_dt[param_name=="alpha",mean])
#learning rate has gotten higher.
theta_names<-c("RPE","EV")
delta_names<-get_dmn_regions()
heatmap(get_Sigma_m_n(model_lba_rl_joint_v12$results_summary_dt,theta_names,delta_names))
|
2c620c64c264fa894c7800201dda0adf67214183 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610055576-test.R | 4ee81e1f4c7c1de93d46a9636fdb83f5a96d411d | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,084 | r | 1610055576-test.R | testlist <- list(a = -1040187601L, b = 989855743L, x = c(-1073741825L, -16056321L, -51777L, -1L, -54785L, -16035380L, 483183820L, -859035137L, -1L, -1L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, 1886417008L, -1040187601L, 989855743L, -16318417L, -851982L, -222298113L, -1L, -870527796L, -858993623L, -52993L, -1L, -1L, -13563137L, -203L, -218959118L, -1073741825L, -215L, 458358783L, 792395775L, 471039L, -1L, -215L, 53553663L, -1L, -1L, -207L, 184497599L, -1L, -1L, -1L, -572662307L, -570425407L, -53446L, -65529L, 805253386L, -1L, 905114354L, -222298113L, -1L, 689656074L, -1L, 704610559L, -2130706433L, -1L, -1L, -8585216L, -35279L, 184549375L, -13241614L, -218972161L, -1L, -14083247L, -58164L, -859033334L, -1L, -1L, -13563137L, -215L, -1L, -3342L, -1073741825L, -215L, 458314239L, -1L, -8739L, -572662273L, -1L, -37889L, -1593835521L, -8323073L, -54785L, 822804479L, 689656319L, -58565L, 993722496L, 989868298L, -1L, 905117695L, 704642864L, -1L, -1L, -1L, -1L, -1L, -572662307L, -570425345L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
e6e40ceeaa87c2037a13347810be4d2bc5449627 | 829c2a4c9dee2dc51ddba44c10266d65f8911034 | /lib/topicCountryNetwork.R | 974d1c2ff14d0d2513bf05a3aefa7ad9b4a0fd1f | [] | no_license | TwlyY29/tminspector | ccc16421d899740171178b3c81b43104dbefb591 | 59af49ab6aa3cca5fd1726478c4d0c271269f0cd | refs/heads/master | 2020-09-08T17:54:39.090085 | 2019-11-13T09:21:37 | 2019-11-13T09:21:37 | 221,202,113 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 507 | r | topicCountryNetwork.R | topicCountryNetwork <- function(t_min = min(GLB_ALL_YEARS), t_max = max(GLB_ALL_YEARS)){
w <- which(CORPUS$meta$year %in% c(t_min:t_max))
d <- THETA[w,]
colnames(d) <- 1:N_TOPICS
rownames(d) <- CORPUS$meta$doc_id[w]
d <- as.data.frame(as.table(d))
colnames(d) <- c("cntr","topc","weights")
d$cntr <- CORPUS$meta$country[match(unlist(d$cntr), CORPUS$meta$doc_id)] # replace doc-names with countries
d <- aggregate(weights ~ cntr+topc, data = d, FUN = sum) # aggregate weights for countries
}
|
bcf500b51eaab103eca5915446467538420fe474 | f36dd0a74b04145ac996f7b1ba728ad5e73206c7 | /MLR Final Script.R | c924bd0781a0be0dcb093823d376b125c67ec30c | [] | no_license | Yashk-02/Predicting-student-grades-and-Value-of-owner-occupied-home-in-Boston-using-machine-learning | 7fd302f1ea6cafdd9c87d1799df17f65b0b8191a | 0dea8ca53ee85875648087f432b37463c79f1bb5 | refs/heads/master | 2023-01-04T18:16:33.946438 | 2020-10-28T23:21:24 | 2020-10-28T23:21:24 | 295,794,678 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,448 | r | MLR Final Script.R | ##install relevant packages at start and corresponding libraries, can take up to 15 minutes to load caret pacakge
install.packages('caret', dependencies = TRUE)
install.packages('xfun', dependencies = TRUE)
install.packages('generics', dependencies = TRUE)
install.packages('gower', dependencies = TRUE)
library(plyr)
library(readr)
library(dplyr)
library(caret)
library(ggplot2)
library(repr)
library(tibble)
library(MASS)
library(ISLR)
library(glmnet)
## for the purposes of MLR we have cleaned data at source rather than applying cleaning techniques in R
## changing Yes / No values to 1 / 0 and other binary outcomes
## SVM uses cleaning in R to show flexibility
##CSV file needs to be stored in same directory as R script to run as below
Students <- read_csv('Student_Grades_Data_Cleaned.csv')
names(Students)
dmy <- dummyVars(" ~ .", data = Students)
Students_h <- data.frame(predict(dmy, newdata = Students))
Students_h
##names(Students_h) not needed, useful to see list of column names
##glimpse(Students_h) preview of the data structures
##split the dataset into training and test data, assuming 25% training data
index = sample(1:nrow(Students_h), 0.75*nrow(Students_h))
train = Students_h[index,] # Create the training data
test = Students_h[-index,] # Create the test data
dim(train)
dim(test)
##name the individual variables from 'Students_h' file here
##exclude G3 being the dependent variable
cols = c('schoolGP0MS2', 'sexF0M1', 'age', 'addressR0U1',
'famsizeLE30GT31', 'PstatusA0T1', 'Medu', 'Fedu',
'Mjobat_home', 'Mjobhealth', 'Mjobother', 'Mjobservices',
'Mjobteacher', 'Fjobat_home', 'Fjobhealth', 'Fjobother',
'Fjobservices', 'Fjobteacher', 'reasoncourse', 'reasonhome',
'reasonother', 'reasonreputation', 'guardianfather', 'guardianmother',
'guardianother', 'traveltime', 'studytime', 'failures',
'schoolsup', 'famsup', 'paid', 'activities',
'nursery', 'higher', 'internet', 'romantic',
'famrel', 'freetime', 'goout', 'Dalc',
'Walc', 'health', 'absences')
pre_proc_val <- preProcess(train[,cols], method = c('center', 'scale'))
train[,cols] = predict(pre_proc_val, train[,cols])
test[,cols] = predict(pre_proc_val, test[,cols])
## get some summary statistics on the variables from the training dataset
summary(train)
## execute a linear regression with all predictors on training dataset
lr = lm(G3~.,data = train)
summary(lr)
##lm(formula = G3 ~ ., data = train) just to see in coefficient terms
##Create output metrics
#Step 1 - create the evaluation metrics function
eval_metrics = function(model, df, predictions, target){
resids = df[,target] - predictions
resids2 = resids**2
N = length(predictions)
r2 = as.character(round(summary(model)$r.squared, 2))
adj_r2 = as.character(round(summary(model)$adj.r.squared, 2))
print(adj_r2) #Adjusted R-squared
print(as.character(round(sqrt(sum(resids2)/N), 2))) #RMSE
}
# Step 2 - predicting and evaluating the model on train data
predictions = predict(lr, newdata = train)
eval_metrics(lr, train, predictions, target = 'G3')
# Step 3 - predicting and evaluating the model on test data
predictions = predict(lr, newdata = test)
eval_metrics(lr, test, predictions, target = 'G3')
cols_reg = c('schoolGP0MS2', 'sexF0M1', 'age', 'addressR0U1',
'famsizeLE30GT31', 'PstatusA0T1', 'Medu', 'Fedu',
'Mjobat_home', 'Mjobhealth', 'Mjobother', 'Mjobservices',
'Mjobteacher', 'Fjobat_home', 'Fjobhealth', 'Fjobother',
'Fjobservices', 'Fjobteacher', 'reasoncourse', 'reasonhome',
'reasonother', 'reasonreputation', 'guardianfather', 'guardianmother',
'guardianother', 'traveltime', 'studytime', 'failures',
'schoolsup', 'famsup', 'paid', 'activities',
'nursery', 'higher', 'internet', 'romantic',
'famrel', 'freetime', 'goout', 'Dalc',
'Walc', 'health', 'absences', 'G3')
dummies <- dummyVars(G3 ~ ., data = Students_h[,cols_reg])
train_dummies = predict(dummies, newdata = train[,cols_reg])
test_dummies = predict(dummies, newdata = test[,cols_reg])
print(dim(train_dummies)); print(dim(test_dummies))
##execute the ridge regression
## per below you'll note for Ridge that alpha is set to 0
x = as.matrix(train_dummies)
y_train = train$G3
x_test = as.matrix(test_dummies)
y_test = test$G3
lambdas <- 10^seq(2, -3, by = -.1)
ridge_reg = glmnet(x, y_train, nlambda = 25, alpha = 0, family = gaussian, lambda = lambdas)
summary(ridge_reg)
cv_ridge <- cv.glmnet(x, y_train, alpha = 0, lambda = lambdas)
optimal_lambda <- cv_ridge$lambda.min
optimal_lambda
##Getting Ridge regression outputs
# Compute R^2 from true and predicted values
eval_results <- function(true, predicted, df) {
SSE <- sum((predicted - true)^2)
SST <- sum((true - mean(true))^2)
R_square <- 1 - SSE / SST
RMSE = sqrt(SSE/nrow(df))
# Model performance metrics
data.frame(
RMSE = RMSE,
Rsquare = R_square
)
}
# Prediction and evaluation on train data
predictions_train <- predict(ridge_reg, s = optimal_lambda, newx = x)
eval_results(y_train, predictions_train, train)
# Prediction and evaluation on test data
predictions_test <- predict(ridge_reg, s = optimal_lambda, newx = x_test)
eval_results(y_test, predictions_test, test)
##Lasso attempt
lambdas <- 10^seq(2, -3, by = -.1)
# Setting alpha = 1 implements lasso regression
lasso_reg <- cv.glmnet(x, y_train, alpha = 1, lambda = lambdas, standardize = TRUE, nfolds = 5)
# Best
lambda_best <- lasso_reg$lambda.min
lambda_best
##note for lasso we've now set aplha to 1
lasso_model <- glmnet(x, y_train, alpha = 1, lambda = lambda_best, standardize = TRUE)
predictions_train <- predict(lasso_model, s = lambda_best, newx = x)
eval_results(y_train, predictions_train, train)
predictions_test <- predict(lasso_model, s = lambda_best, newx = x_test)
eval_results(y_test, predictions_test, test)
##plot ridge and lasso results for report on Student Grades
plot(lasso_reg)
plot(cv_ridge)
########### Section Change, below is the same code base applied to Boston
names(Boston)
dmy <- dummyVars(" ~ .", data = Boston)
Boston_h <- data.frame(predict(dmy, newdata = Boston))
set.seed(100)
index = sample(1:nrow(Boston_h), 0.75*nrow(Boston_h))
train = Boston_h[index,] # Create the training data for Boston
test = Boston_h[-index,] # Create the test data for Boston
dim(train)
dim(test)
##name the individual variables from Boston file here
##exclude medv being the dependent variable for Boston
cols = c('crim','zn','indus','chas','nox','rm','age','dis','rad','tax','ptratio','black','lstat')
pre_proc_val <- preProcess(train[,cols], method = c('center', 'scale'))
train[,cols] = predict(pre_proc_val, train[,cols])
test[,cols] = predict(pre_proc_val, test[,cols])
##summary(train) - create summary statistics on the training data, not needed
## execute a linear regression with all predictors on training dataset
lr = lm(medv~.,data = train)
summary(lr)
##execute the same model without age or indus classifiers
lr2 = lm(medv~. -age -indus, data = train)
summary(lr2)
##lr3 gets the MLR outputs on the full dataset, not split into test & train
lr3 = lm(medv~., data = Boston_h)
summary(lr3)
##lm(formula = medv ~ ., data = train) ##same output except in different format looking at the coefficients
##Create output metrics
#Step 1 - create the evaluation metrics function
# we will be assessing the models on the basis of adjusted r-squared and RMSE
# this function is used to create out puts or RMSE & R^2
eval_metrics = function(model, df, predictions, target){
resids = df[,target] - predictions
resids2 = resids**2
N = length(predictions)
r2 = as.character(round(summary(model)$r.squared, 2))
adj_r2 = as.character(round(summary(model)$adj.r.squared, 2))
print(adj_r2) #Adjusted R-squared
print(as.character(round(sqrt(sum(resids2)/N), 2))) #RMSE
}
# Step 2 - predicting and evaluating the model on train data
predictions = predict(lr, newdata = train)
eval_metrics(lr, train, predictions, target = 'medv')
# Step 3 - predicting and evaluating the model on test data
predictions = predict(lr, newdata = test)
eval_metrics(lr, test, predictions, target = 'medv')
##change column names here
cols_reg = c('crim','zn','chas','nox','rm','dis','rad','tax','ptratio','black','lstat','medv')
##removing 'indus', 'age',
dummies <- dummyVars(medv ~ ., data = Boston_h[,cols_reg])
train_dummies = predict(dummies, newdata = train[,cols_reg])
test_dummies = predict(dummies, newdata = test[,cols_reg])
print(dim(train_dummies)); print(dim(test_dummies))
##execute the ridge regression using the glmnet library functions
##library(glmnet) removing this as already imported at start
x = as.matrix(train_dummies)
y_train = train$medv
x_test = as.matrix(test_dummies)
y_test = test$medv
lambdas <- 10^seq(2, -3, by = -.1)
ridge_reg = glmnet(x, y_train, nlambda = 25, alpha = 0, family = gaussian, lambda = lambdas)
summary(ridge_reg)
cv_ridge <- cv.glmnet(x, y_train, alpha = 0, lambda = lambdas)
optimal_lambda <- cv_ridge$lambda.min
optimal_lambda
##Getting Ridge regression outputs
# Compute R^2 from true and predicted values
eval_results <- function(true, predicted, df) {
SSE <- sum((predicted - true)^2)
SST <- sum((true - mean(true))^2)
R_square <- 1 - SSE / SST
RMSE = sqrt(SSE/nrow(df))
# Model performance metrics
data.frame(
RMSE = RMSE,
Rsquare = R_square
)
}
# Prediction and evaluation on train data
predictions_train <- predict(ridge_reg, s = optimal_lambda, newx = x)
eval_results(y_train, predictions_train, train)
# Prediction and evaluation on test data
predictions_test <- predict(ridge_reg, s = optimal_lambda, newx = x_test)
eval_results(y_test, predictions_test, test)
##Lasso attempt
lambdas <- 10^seq(2, -3, by = -.1)
# Setting alpha = 1 implements lasso regression
lasso_reg <- cv.glmnet(x, y_train, alpha = 1, lambda = lambdas, standardize = TRUE, nfolds = 5)
# Best
lambda_best <- lasso_reg$lambda.min
lambda_best
lasso_model <- glmnet(x, y_train, alpha = 1, lambda = lambda_best, standardize = TRUE)
predictions_train <- predict(lasso_model, s = lambda_best, newx = x)
eval_results(y_train, predictions_train, train)
predictions_test <- predict(lasso_model, s = lambda_best, newx = x_test)
eval_results(y_test, predictions_test, test)
##plot ridge and lasso results for report on Boston
plot(lasso_reg)
plot(cv_ridge)
|
fd421fb2512a12b29c69966b893c284339887e8a | 00685ffda63b270414cc9890c72768fbd0f96d87 | /tests/testthat/test_TR001_totvarbulk.R | be61c7c130d0a1dd710385b3c01b846defb6a85a | [
"MIT"
] | permissive | SysBioChalmers/DSAVE-R | 43fb16be548a4f2013db55cd1683c826210d402a | f63f8e6f7e62c08303efba3e8f7458e69ebe5028 | refs/heads/master | 2021-10-26T11:11:14.477823 | 2021-10-19T07:03:11 | 2021-10-19T07:03:11 | 178,033,532 | 2 | 2 | NOASSERTION | 2020-06-29T12:00:02 | 2019-03-27T16:27:53 | R | UTF-8 | R | false | false | 1,392 | r | test_TR001_totvarbulk.R | test_that("TR001 - Total variation in bulk",{
d <- rbind(c(1,1,1,1,2,2,2,2),c(300,300,300,300,300,300,300,300), c(0,0,0,0,0,0,0,0));
s <- as.matrix(d);
rownames(s) = c('A','B','C');
t1vs1 = DSAVEGetTotalVariationFromBulk(s, FALSE, 250, 0.5, rescale = FALSE);
#calculate (each value will be compared with 3 samples of the same value and 4 samples with the double value)
exp1vs1 = log(2.05/1.05)*4/7;
#4 vs 4
t4vs4 = DSAVEGetTotalVariationFromBulk(s, TRUE, 250, 0.5, rescale = FALSE);
#calculate this differently than in the function, with loops :)
#calculate the distribution over number of ones (vs twos) in the first of
#the two sets for each combination
numones = rep(0,5);#represents 0 1 2 3 4 ones, i.e. the first the number of combinations with 0 ones, etc
for (i in 1:5) {
for (j in (i+1):6) {
for (k in (j+1):7) {
for (m in (k+1):8) {
index = (i<5)+(j<5)+(k<5)+(m<5)+1; # represents number of ones in the combination
numones[index] = numones[index] + 1;
}
}
}
}
a = numones/sum(numones);#find number to scale each comb type
exp4vs4 = log(2.05/1.05)*a[1]*2 + log(1.80/1.30)*a[2]*2;
expect_equal(t1vs1,exp1vs1, info = "TR001: DSAVEGetTotalVariationFromBulk: 1 vs 1", tolerance=1e-10)
expect_equal(t4vs4, exp4vs4, info = "TR001: DSAVEGetTotalVariationFromBulk: 4 vs 4", tolerance=1e-10)
})
|
e1cf0bbb6b4ec8bdb5f04e3ce822efe04a445bd2 | ba855e045647547b6f704c39fe80fece37311340 | /man/aba.flatten.Rd | c3a744f45d4ec5e79a87167dcb7877f71405657b | [] | no_license | tobyjohnson/gtx | bb9f6a6ea9ec4ec720e16c988580ffc5fbf22573 | 9afa9597a51d0ff44536bc5c8eddd901ab3e867c | refs/heads/master | 2021-01-17T00:47:26.344059 | 2019-08-29T19:34:54 | 2019-08-29T19:34:54 | 29,961,962 | 20 | 12 | null | 2019-09-11T17:33:40 | 2015-01-28T10:18:52 | R | UTF-8 | R | false | true | 595 | rd | aba.flatten.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aba.R
\name{aba.flatten}
\alias{aba.flatten}
\title{\strong{aba.flatten() - Flatten full aba results}}
\usage{
aba.flatten(.data)
}
\arguments{
\item{.data}{\code{\link{aba.query}} results object to filter}
}
\value{
data.frame with input \code{\link{aba.query}} flattened to 1 gene / row.
}
\description{
The aba results and region queries will return colocs
}
\examples{
Basic use:
colocs <- aba.query(hgncid = "foo")
colocs_flat <- aba.flatten(colocs)
}
\author{
Karsten Sieber \email{karsten.b.sieber@gsk.com}
}
|
abbdf5feed33835a3a2bcccf674d1fe7d7591a20 | b2db48ccdffe5060c8f215d04776332ae6838113 | /code/coef_map.R | 26b79f16f8a4bf3772f7509b62b6fabde676a6cd | [] | no_license | judgelord/dissertation | e5e42759dfab7d9bdf0029cb37cae3abab1f16ad | 2488363f1d546b2d8af515d5f520ce9c520fb679 | refs/heads/master | 2023-05-01T10:53:57.426547 | 2023-04-25T21:30:06 | 2023-04-25T21:30:06 | 152,605,888 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,417 | r | coef_map.R | # coef map to rename vars
cm = c("campaign_TRUE" = "Pressure Campaign",
"campaign_TRUE:presidentObama" ="Pressure Campaign x Obama",
"campaign_TRUE:presidentTrump" = "Pressure Campaign x Trump",
"campaign_TRUE:presidentBush" = "Pressure Campaign x Bush",
"campaign_TRUE:coalition_typePublic:presidentObama" = "Pressure Campaign x presidentObama",
"campaign_TRUE:coalition_business_TRUE" = "Pressure Campaign x Business",
"campaign_TRUE:coalition_typePublic" = "Pressure Campaign x Public",
"campaign_TRUE:coalition_typePublic:presidentTrump" = "Pressure Campaign x Public x Trump",
"log(comments + 1):coalition_typePublic" = "Log(Mass Comments) x Public",
"log(comments + 1):coalition_business_TRUE" = "Log(Mass Comments) x Business",
"coalition_TRUE" = "Coalition",
"log(coalition_size)" = "Log(Coalition Size)",
"coalition_size" = "Coalition Size",
"coalition_typePublic" = "Public",
"coalition_business_TRUE" = "Business Coalition",
"Coalition_PositionSupports Rule" = "Supports Rule",
"coalition_unopposedTRUE" = "Unopposed",
"PositionSupports Rule" = "Supports Rule",
"comments100k" = "Mass Comments",
"comments100k:presidentTrump" = "Mass Comments x Trump",
"comments100k:coalition_typePublic" = "Mass Comments x Public",
"comments100k:coalition_typePublic:presidentTrump" = "Mass Comments x Public x Trump",
"comments100k:coalition_business_TRUE" = "Mass Comments x Business",
"log(comments + 1)"= "Log(Mass Comments)",
"log(comments + 1):presidentTrump"= "Log(Mass Comments) x Trump",
"log(comments + 1):presidentBush"= "Log(Mass Comments) x Bush",
"I(comments100k^2)" = "(Mass Comments)^2",
"coalition_business:I(comments100k^2)" = "Business x (Mass Comments)^2",
"agency" = "Agency",
"org_name"= "Organization",
"docket_id" = "Rule Docket",
"president" = "President",
"presidentTrump" = "Trump",
"presidentObama" = "Obama",
"presidentBush" = "Bush",
"coalition_typePublic:presidentObama" = "Public x Obama",
"coalition_typePublic:presidentTrump" = "Public x Trump",
"coalition_business_TRUE:presidentTrump" = "Business x Trump",
"coalition_business_TRUE:presidentBush" = "Business x Bush",
"coalition_typePublic:presidentBush" = "Public x Bush",
"partyRepublican" = "Republican President",
"coalition_typePublic:partyRepublican" = "Republican President x Public",
"log(comments + 1):coalition_business_TRUE:presidentTrump"= "Log(Mass Comments) x Business x Trump",
"log(comments + 1):coalition_business_TRUE:presidentBush"= "Log(Mass Comments) x Business x Bush",
"log(comments + 1):coalition_typePublic:presidentTrump"= "Log(Mass Comments) x Public x Trump",
"log(comments + 1):coalition_typePublic:presidentBush"= "Log(Mass Comments) x Public x Bush",
"acme_0" = "Average Conditional Marginal Effect",
"ade_0" = "Average Direct Effect",
"coalition_congress" = "Members of Congress",
"presidentTrump:coalition_typePublic" = "Trump x Public",
"presidentBush:coalition_typePublic"= "Bush x Public",
"campaign_TRUE:presidentBush:coalition_typePublic" = "Campaign x Public x Bush",
"campaign_TRUE:coalition_business_TRUE:presidentBush" = "Campaign x Business x Bush",
"campaign_TRUE:presidentTrump:coalition_typePublic" = "Campaign x Public x Trump")
# coef map as tibble
cm2<- tibble(term = names(cm),
Term = cm)
#FIXME rename
rowsFE <- tibble(
term = c("Dependent Variable"),
`1` = c("Lobbying Success"),
`2` =c("Lobbying Success"),
`3` = c("Lobbying Success"),
`4` = c("Lobbying Success"),
`5` = c("Lobbying Success"),
`6` = c("Lobbying Success")
)
attr(rowsFE, 'position') <- c(0)
# new name
success_rows6 <- tibble(
term = c("Dependent Variable"),
`1` = c("Lobbying Success"),
`2` =c("Lobbying Success"),
`3` = c("Lobbying Success"),
`4` = c("Lobbying Success"),
`5` = c("Lobbying Success"),
`6` = c("Lobbying Success")
)
attr(success_rows6, 'position') <- c(0)
# new name
success_rows3 <- tibble(
term = c("Dependent Variable"),
`1` = c("Lobbying Success"),
`2` =c("Lobbying Success"),
`3` = c("Lobbying Success")
)
attr(success_rows3, 'position') <- c(0)
|
545cfe0249feb622358740f473329e5855df13e0 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/blandr/examples/blandr.draw.Rd.R | ce42e2cff09facfd5fc2c215178f21876b5f05ec | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 749 | r | blandr.draw.Rd.R | library(blandr)
### Name: blandr.draw
### Title: Bland-Altman drawing function for R
### Aliases: blandr.draw
### ** Examples
# Generates two random measurements
measurement1 <- rnorm(100)
measurement2 <- rnorm(100)
# Generates a plot, with no optional arguments
blandr.draw( measurement1 , measurement2 )
# Generates a plot, using the in-built R graphics
blandr.draw( measurement1 , measurement2 , plotter = 'rplot' )
# Generates a plot, with title changed
blandr.draw( measurement1 , measurement2 , plotTitle = 'Bland-Altman example plot' )
# Generates a plot, with title changed, and confidence intervals off
blandr.draw( measurement1 , measurement2 , plotTitle = 'Bland-Altman example plot' ,
ciDisplay = FALSE , ciShading = FALSE )
|
dd6f32849e802086e80811d072f50591538f120c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Quandl/examples/Quandl.api_key.Rd.R | b58a6cbbabd79739f7eeb35a71d95fc1e9910122 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 189 | r | Quandl.api_key.Rd.R | library(Quandl)
### Name: Quandl.api_key
### Title: Query or set Quandl API key
### Aliases: Quandl.api_key
### ** Examples
## Not run:
##D Quandl.api_key('foobar')
## End(Not run)
|
6172fc83ef86514b2f43bafd69acd6db3d9db27f | f6de2d8a88b19ec7847de8e19651bec06b3c5640 | /man/geojsonio.Rd | 8382ad3d6603966ebd3b4cd8db451c6b435097f6 | [
"MIT"
] | permissive | mashoedoe/geojsonio | 04988c62765976c85e5440ada9d3610f0e1440d1 | 062e463800a58865b012a79bc955667c5e8c6847 | refs/heads/master | 2020-12-11T03:33:08.190335 | 2015-07-11T20:02:44 | 2015-07-11T20:02:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 586 | rd | geojsonio.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/geojsonio-package.R
\docType{package}
\name{geojsonio}
\alias{geojsonio}
\alias{geojsonio-package}
\title{Convert various data formats to/from GeoJSON or TopoJSON}
\description{
This package focuses mostly on converting lists, data.frame's, numeric, SpatialPolygons,
SpatialPolygonsDataFrame, and more to GeoJSON with the help of \code{rgdal} and
friends. You can currently read TopoJSON - writing TopoJSON will come in a future
version of this package.
}
\author{
Scott Chamberlain
Andy Teucher
}
|
a92224baa5170b12e74f5cc440180c73777f937e | 1f89e9b31c3e19e6a3c47e341cbbbb9eb3d94dcc | /main.R | 558cef1a156fe025f375486ff6808e8c81fc85dc | [
"MIT"
] | permissive | giveMeLife/QAP-MetaHeuristics | 44f84812669506e80d45b7a96464c9eae2f9708a | e4376d7ecf4a8abf5249b9d84c89d58097081e66 | refs/heads/master | 2022-12-11T06:31:09.614424 | 2020-08-30T06:42:50 | 2020-08-30T06:42:50 | 289,588,809 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,558 | r | main.R | #!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
source("scripts/evolutiveAlgorithm.R")
source("scripts/qap.R")
source("scripts/simulatedAnnealing.R")
file = args[1]
metaheuristic = args[2]
iteration = args[3]
workdir = args[4]
print(args)
my_file = paste0(workdir, "instances/", file)
instancia = readQAP(my_file)
sol <- 1:instancia$n
poblation <- generatePoblation(sample(sol), 20)
sol <- sample(sol)
pid = Sys.getpid()
command = paste("psrecord", pid, "--log", paste0("logs/monitor/", file, "_", metaheuristic, "_", iteration,".txt"), "--plot", paste0("logs/", file, "_", metaheuristic, "_", iteration,".png"),"--interval 1")
system(command, intern = FALSE, ignore.stdout = FALSE, ignore.stderr = FALSE, wait = FALSE, input = NULL)
start_time = Sys.time()
if (metaheuristic == "SA") {
tmax <- evaluarQAP(sol, instancia$f, instancia$d)
tmin <- 10
it <- 20
beta <- 0.95
e <- simulatedAnnealing(instancia, sol, tmax, tmin, it, beta, iteration = iteration, prefix = file)
} else {
e <- evolutiveAlgorithm(instancia, poblation, size = 50, generations = 20, fighters = 10, parents = 50, percent = 0.2, iteration = iteration, prefix = file)
}
#e <- simulatedAnnealing(instancia, sol, tmax, tmin, it, beta, iteration = i, prefix = file)
end_time = Sys.time()
total_time = end_time - start_time
total_time = as.numeric(total_time, units = "secs")
output_file = paste0(workdir, "output/results/", file, "_", iteration, "_", metaheuristic, ".csv")
write.table(c(e, total_time), file = output_file, row.names = F, sep=";", dec=".") |
04779a00255ec86ac9e2cc0f5667151a38a2b827 | d6dccbcc663fa63794a45a27d057c7d4577e51d5 | /src/r/denoising.r | eabc2171668857d266a2814a202aae6b38e6d89d | [] | no_license | iarkhanhelsky/master-thesis | cbbcfc3c284347c9c4b1f3c2307610e7a64f19bd | 283ddb295f4c88647666772fc8c01458eb1bb29f | refs/heads/master | 2021-01-22T06:37:10.507561 | 2015-05-27T03:24:15 | 2015-05-27T03:25:31 | 30,262,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,891 | r | denoising.r | thresholding.hard <- function(x, lambda) {
t <- c(x)
t[abs(t) <= lambda] <- 0
t
}
thresholding.soft <- function(x, lambda) {
t <- c(x)
t[abs(x) <= lambda] <- 0
t[x < -lambda] <- t[x < -lambda] + lambda
t[x > lambda] <- t[x > lambda] - lambda
t
}
dwt.1d <- wavelets::dwt
idwt.1d <- wavelets::idwt
lambda.universal <- function(s) sqrt(2 * log(s))
lambda.c <- 4.505
SURE <- function(lambda, X) {
s <- length(X)
s + sum(sapply(X, function(x) min(lambda, X)^2)) - 2*sum(X[abs(X) < lambda])
}
lambda.sure <- function(X) {
optimise(function(l) SURE(l, X), lower=0, upper=lambda.universal(length(X)))$minimum
}
lambda.sureShrink <- function(X) {
ifelse(sparsity(X) <= 1, lambda.universal(length(X)), lambda.sure(X))
}
sparsity <- function(X) {
s <- length(X)
s^(1/2) * sum(X^2 - 1) / log2(s)^(3/2)
}
sure.shrink <- function(X, filter='d4') {
d <- dwt.1d(X, filter = filter)
d@W <- lapply(d@W, function(w) t(t(thresholding.soft(w, lambda.sureShrink(w)))))
return(idwt.1d(d))
}
sure.shrink2 <- function(X, filter) {
d <- dwt.2d(X, filter)
for(name in grep('HH', names(d), value=TRUE)) {
w <- c(d[[name]])
r <- thresholding.soft(w, lambda.sureShrink(w))
d[[name]] <- matrix(r, nrow=dim(d[[name]])[1])
}
idwt.2d(d)
}
neigh.block.base <- function(w) {
n <- length(w)
l0 <- floor(log(n) / 2)
l1 <- max(1, floor(l0 / 2))
djk <- c(w)
if (l0 > 0)
{
group.count <- n / l0
wext <- c(w, w, w)
l <- l0 + 2 * l1
for(g in 1:group.count) {
gl <- n + (g-1)*l - l1
gh <- gl + l
if (abs(gl - gh) > 1) {
sc <- sum(wext[gl:gh] ^ 2)
kl <- (g-1)*l0 + 1
kh <- g*l0 + 1
if (abs(kl- kh) > 1 && sc != 0) {
djk[kl:kh] <- sapply(kl:kh, function(k) max(0, (sc^2 - lambda.c*l)/sc) * w[k])
}
}
}
}
djk[is.na(djk)] <- 0
djk
}
neigh.block <- function(X, filter) {
d <- dwt.1d(X, filter = filter)
d@W <- lapply(d@W, function(w) matrix(neigh.block.base(w)))
return(idwt.1d(d))
}
neigh.block2 <- function(X, filter) {
d <- dwt.2d(X, filter)
for(name in grep('HH', names(d), value=TRUE)) {
w <- c(d[[name]])
r <- neigh.block.base(as.vector(w))
d[[name]] <- matrix(r, nrow=dim(d[[name]])[1])
}
idwt.2d(d)
}
vis.thresholding.types <- function(lambda, bound) {
t <- seq(-bound, bound, 0.01)
r.s <- thresholding.soft(t, lambda)
r.h <- thresholding.hard(t, lambda)
visual.multiplot(
qplot(t, r.h, geom='line', main="Жесткая замена", ylab=expression(delta[h])),
qplot(t, r.s, geom='line', main="Мягкая замена", ylab=expression(delta[s])),
cols=2)
}
vis.noisy <- function(t, clean, noisy) {
data <- data.frame(
t = t,
noisy = noisy,
clean = clean
)
data.long <- melt(data, id='t', value.name='x')
ggplot(data=data.long, aes(x=t, y=x, colour=variable)) +
theme(legend.position="none") +
geom_line()
}
vis.universal.bound <- function(s = 1000) {
t <- 1:s
v <- rnorm(t)
d <- data.frame(amp = abs(v), t = t)
u <- data.frame(t = c(-Inf, Inf), y = lambda.universal(s), u = factor(lambda.universal(s)))
m <- data.frame(t = c(-Inf, Inf), mean = mean(v))
ggplot(d, aes( t, amp ) ) +
ylab('Амплитуда') +
geom_point() +
geom_hline(color='red', yintercept=lambda.universal(s)) +
annotate("text", 0, ymin = lambda.universal(s),y=lambda.universal(s) * 1.05, label = "lambda[U]", parse=TRUE, show_guide=TRUE)
}
vis.diff <- function(t, clean, restored) {
d <- data.frame(
t = t,
clean = clean,
restored = restored
)
d$min <- pmin(d$clean, d$restored)
d$max <- pmax(d$clean, d$restored)
ggplot(d) +
ylab("x(t)") +
xlab("t") +
geom_line(aes(x=t, y=clean, color='blue')) +
geom_ribbon(aes(x=t, ymin=min, ymax=max), fill="grey", alpha=.4) +
geom_line(aes(x=t, y=restored, color='red'))
}
|
11deacdd1155fe441d92735a8c2c33742b44996d | 906e6bbbda8b4f46a6efaf31abbb318180a9c536 | /R/KL.R | 316dc07ec373f3fe02a9847165b4b2f749f67ec1 | [] | no_license | cran/catR | 6c8dffc1a1027b3883f5ffcd4ec5b3d0f4017fed | 112a2efb6621fed446a5df4b1467e4bfe937ab81 | refs/heads/master | 2022-07-10T15:36:37.953792 | 2022-06-24T07:00:02 | 2022-06-24T07:00:02 | 17,694,983 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,786 | r | KL.R | KL <- function (itemBank, item, x, it.given, model=NULL, theta=NULL,lower = -4, upper = 4, nqp = 33,
type = "KL", priorDist="norm", priorPar = c(0, 1), D=1, X=NULL, lik = NULL)
{
if (type != "KL" & type != "KLP")
stop("'type' must be either 'KL' or 'KLP'", call. = FALSE)
if (!is.null(X) & !is.null(lik)){
if (length(X) != length(lik)) stop("'X' and 'lik' must have the same length!",call.=FALSE)
}
if (is.null(theta)) theta<-thetaEst(it.given,x,D=D,model=model, method="ML")
KLF<-NULL
par <- rbind(itemBank[item,])
if (is.null(X)) X<-seq(from=lower,to=upper,length=nqp)
if (is.null(model)){
if (is.null(lik)){
L <- function(th, r, param)
prod(Pi(th, param,D=D)$Pi^r * (1 - Pi(th,param,D=D)$Pi)^(1 - r))
lik<-sapply(X,L,x,it.given)
}
KLF[1:nqp] <- Pi(theta,par,D=D)$Pi * log(Pi(theta,par,D=D)$Pi/Pi(X[1:nqp],par,D=D)$Pi) + (1 - Pi(theta,par,D=D)$Pi) * log((1 - Pi(theta,par,D=D)$Pi)/(1 - Pi(X[1:nqp],par,D=D)$Pi))
crit.value <- lik*KLF
if (type=="KLP") {
pd<-switch(priorDist,norm=dnorm(X,priorPar[1],priorPar[2]),unif=dunif(X,priorPar[1],priorPar[2]))
crit.value <- crit.value*pd
}
}
else{
if (is.null(lik)){
LL <- function(th, param, r, model,D=1) {
prob <- Pi(th, param, model = model,D=D)$Pi
res <- 1
for (i in 1:length(r)) res <- res * prob[i, r[i] + 1]
return(res)
}
lik<-sapply(X,LL,it.given,x,model=model,D=D)
}
pi<-Pi(theta,par,model=model,D=D)$Pi
for (i in 1:length(X)){
pri<-Pi(X[i],par,model=model,D=D)$Pi
KLF[i]<-sum(pi*log(pi/pri),na.rm=TRUE)
}
crit.value <- lik*KLF
if (type=="KLP") {
pd<-switch(priorDist,norm=dnorm(X,priorPar[1],priorPar[2]),unif=dunif(X,priorPar[1],priorPar[2]))
crit.value <- crit.value*pd
}
}
RES <- integrate.catR(X, crit.value)
return(RES)
}
|
65487950c8d2ba9893444b363a41fdea4aa1bc6b | 6bf17ed3ca5388fbbca9fa3a60e40e52ca11b4e4 | /server.R | f812bfdf4ec8e74c3d27f15264ee951520072010 | [] | no_license | joebookslevy/Capstone | 7f5180eb79454846a00220571888c043405e2df5 | 672001a5e2179baa4c0b40739a9f131abdf02b54 | refs/heads/master | 2021-01-10T09:54:13.274782 | 2016-01-24T16:14:21 | 2016-01-24T16:14:21 | 49,830,948 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 155 | r | server.R | shinyServer(
function(input, output) {
output$text1<- renderText({
suggest(input$text1)}) ##List of 1-5 predicted words based on data set
}
) |
c60f901471b9189726e7a40311e55ee805110a95 | ff5d10f0cd1d25a6253f18159d57b4ec3f6f5dc4 | /plot5.R | bfca2c593f3629f121482cb1169d6a0d24ca51fe | [] | no_license | sheilazpy/Exdata_Plot2 | f076cfc547a23cfdba40f010bc7243cdba967c8e | 0f8135734530674e76c9e1442b608d2680532b0f | refs/heads/master | 2020-05-19T11:07:11.336961 | 2014-12-07T11:56:54 | 2014-12-07T11:56:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,123 | r | plot5.R | ## plot5.R -> plot5.png
## Read files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Subsetting data
motorSCC <- SCC[grep("Motor", SCC$Short.Name), 1]
baltimoreNEI <- NEI[NEI$fips %in% c( "24510"), ]
baltimoreMotor <- baltimoreNEI[baltimoreNEI$SCC %in% motorSCC, ]
## Aggregate
annualBaltimoreMotor <- aggregate(Emissions ~ year, baltimoreMotor, sum)
# Plot 5
png("plot5.png", width = 480, height = 480)
opar <- par(no.readonly = TRUE)
par( mfrow= c(2,1))
plot(annualBaltimoreMotor$year, annualBaltimoreMotor$Emissions,
type="b", ylab="Total Emissions[in Tons]", xlab="Year", col = "blue",
main="Annual PM2.5 Emissions in Baltimore From Motor Vehicle(1999~2008)")
plot5 <- barplot(annualBaltimoreMotor$Emissions,
main = "Annual PM2.5 Emissions in Baltimore From Motor Vehicle(1999~2008)",
names.arg = annualBaltimoreMotor$year,
xlab = "Year",
ylab = "Total Emissions [in Tons]",
col = rainbow(10, start = 0, end = 1, alpha = 0.5))
text(plot5, 0, round(annualBaltimoreMotor$Emissions, 0), cex = .8, pos = 3)
par(opar)
dev.off() |
614a915a9c4702ce06f201c5b820d39275f4dd20 | cbc8ac3950002bd7996c2c94bf74ea138b52cbad | /scripts/03_hoch_diff_gene.R | 8adab23bbece57d172df2df037a300f657946eed | [
"MIT"
] | permissive | harrislachlan/data | 5f09dda7a6eb0c31c50aa38b69d3b57b848cdaf7 | 9e35d3adc58b644cce0612b2ab6312ce4add0aa4 | refs/heads/main | 2023-02-18T02:29:34.345833 | 2021-01-20T11:39:38 | 2021-01-20T11:39:38 | 310,011,825 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 531 | r | 03_hoch_diff_gene.R | load("hoch_nsc_lineage.RData")
DefaultAssay(hoch_nsc_lineage) <- "RNA"
hoch_nsc_lineage <- NormalizeData(hoch_nsc_lineage, assay = "RNA")
A <- FindMarkers(hoch_nsc_lineage, ident.1 = "nsc", ident.2 = "granule neurons",
min.pct = 0.0, assay = "RNA", logfc.threshold = 0.0)
sig_genes_pvalue <- A$p_val
FDR <- p.adjust(sig_genes_pvalue, "fdr")
A$p_val_adj <- FDR
A <- A %>% filter(pct.1 >= 0.1|pct.2 >= 0.1)
#granule neurons here are a mix of neuroblasts and more mature neurons
write.csv(A, "nscs_neurob.csv")
|
ab6e565fc8a8cbf7864888cda08c50f393cef02e | e2028416f8b840411bc1b1cf420056515a59c9af | /tests/plan.R | 6f156ed506ce9052f3ef9dfdfa4968e9c2b83421 | [] | no_license | HenrikBengtsson/future.callr | 70fd9b6db231d2309c258cf6ce99233810caf26e | 3c27b32f640954ae5fe7bd258e2f9ba506b278ca | refs/heads/master | 2023-09-04T09:58:53.948772 | 2023-08-09T19:51:02 | 2023-08-09T19:51:02 | 92,703,596 | 62 | 0 | null | null | null | null | UTF-8 | R | false | false | 828 | r | plan.R | source("incl/start,load-only.R")
message("*** plan() ...")
message("*** future::plan(future.callr::callr)")
oplan <- future::plan(future.callr::callr)
print(future::plan())
future::plan(oplan)
print(future::plan())
library("future.callr")
plan(callr)
for (type in c("callr")) {
mprintf("*** plan('%s') ...", type)
plan(type)
stopifnot(inherits(plan("next"), "callr"))
a <- 0
f <- future({
b <- 3
c <- 2
a * b * c
})
a <- 7 ## Make sure globals are frozen
v <- value(f)
print(v)
stopifnot(v == 0)
mprintf("*** plan('%s') ... DONE", type)
} # for (type ...)
message("*** Assert that default backend can be overridden ...")
mpid <- Sys.getpid()
print(mpid)
plan(callr)
pid %<-% { Sys.getpid() }
print(pid)
stopifnot(pid != mpid)
message("*** plan() ... DONE")
source("incl/end.R")
|
fbf0d3add32009f9550b1ff22a332ff363628073 | 8d7fab3c3e1139ea0ab27b6913fdbc447d0faac8 | /plot2.R | 2d17fb5d3e635a78e66d6da275f431d0af1cc070 | [] | no_license | johnffarmer/ExData_Plotting1 | 9886df1a4fe79f2fab1b64ed1607b7b42dc99c74 | 31cc27013a8e7eb991d6a6a5168e2d38a1699ea5 | refs/heads/master | 2021-01-23T21:42:41.521871 | 2015-05-10T22:28:38 | 2015-05-10T22:28:38 | 35,383,615 | 0 | 0 | null | 2015-05-10T18:53:47 | 2015-05-10T18:53:46 | null | UTF-8 | R | false | false | 528 | r | plot2.R | household_data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?",stringsAsFactors=FALSE)
household_data$DateTime <- paste(household_data$Date, household_data$Time)
household_data$DateTime <- strptime(household_data$DateTime, "%d/%m/%Y %H:%M:%S")
filtered_data <- household_data[household_data$Date %in% c("1/2/2007","2/2/2007") ,]
png("plot2.png")
plot(filtered_data$DateTime, filtered_data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
ab6ace22aebe564d9c9e010ff7a70ecd87ecab17 | cfcf2a2002bf6099ed5bbfcfa215f3c83efb14a2 | /OLD/199_RLplotting1.R | cb8ac4826ff571850d3efc0d77e6788a4e8993fb | [] | no_license | griffada/AQUACAT_UKCEH | c07dcbf1ac277cd4759929e3cc2fe121cdc68fb5 | cee49f0fa5a8b3d1fc7dab7f02da4f64648ffc5a | refs/heads/master | 2023-08-16T08:02:51.831710 | 2021-10-22T13:50:48 | 2021-10-22T13:50:48 | 281,631,316 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,281 | r | 199_RLplotting1.R | #~~~~~~~~~~~~~~~~~~~~~~~
# Adam Griffin, 2020-10-16
#
# Function for plotting extracted values in terms of return period.
#
# For aquaCAT, Project 07441.
#
# OUTPUTS: .png plots
#
#~~~~~~~~~~~~~~~~~~~~~~~
library(ncdf4)
library(raster)
library(fields)
library(readr)
library(dplyr)
if(substr(osVersion,1,3) == "Win"){
source("S:/CodeABG/setup_script_00.R")
}else{
source("/prj/aquacat/CodeABG/setup_script_00.R")
}
gbgrid <- readRDS("S:/data/Misc/gbshape.RDS")
genericMapPlotter <- function(rn, vals, threshold=NULL,
label="Value", gbgrid,
filename=NULL){
# rn data frame of river network: row, col, E, N.
# (should be loaded by setup_script_00.R)
# vals vector of values to plot at corresponding points of rn.
# gbgrid matrix of values for GB shape (-1 for land, NA for sea)
# threshold vector of values for thresholds corresponding to points of rn.
# if supplied, vals below threshold are greyed out.
# filename if supplied, .png is saved to this filepath.
#
# label label for legend describing value plotted.
NH <- nrow(rn)
if(length(vals) != NH){
stop("Vals not correct length.")
}
if(is.null(threshold)){
threshold <- rep(-1e6, NH)
}else{
if(length(threshold) != NH){
stop("Threshold not correct length.")
}
}
vals[vals < threshold] <- -0.5
vals[is.na(vals)] <- -1
vmax <- max(vals, na.rm=T)
M <- gbgrid
for(i in 1:NH){
M[rn$row[i], rn$col[i]]<-vals[i]
}
Mfocal <- raster::focal(raster(M[,ncol(M):1]),
w=matrix(1, 3, 3),
fun=function(x){
if(!all(is.na(x))){max(x, na.rm=T)}else{return(NA)}
})
brks <- c(-1.01, -.51, seq(0, max(vals, na.rm=T), length.out=20))
f <- function(){
par(mar=c(1,1,1,1), mgp=c(1,1,0))
image.plot(as.matrix(Mfocal),
x=0:700, y=0:1000, ylim=c(0,1000), zlim=c(-2,max(M)),
col=c("darkseagreen1", "darkseagreen2", topo.colors(19)),
breaks=brks, asp=1,
xlab="", ylab="", axes=F)
}
f()
if(!is.null(filename)){
png(filename, res=300, width=100, height=100, units='mm', pointsize=10)
f()
dev.off()
}
}
returnLevelMapPlotter <- function(RL_df, rn_matrix, rn, eventNumber,
poe_days=TRUE,
filename=NULL){
# plots a map of return periods (in terms of years) and saves it as .png
#
# RL_df event dataframe: should include Easting, Northing,
# eventNo, gpp, val, thresh.
# rn_matrix matrix showing location of river network in GB.
# (can be taken straight from ncdf4::ncvar_get)
# rn data frame of river network: row, col, E, N.
# (should be loaded by setup_script_00.R)
# eventNumber number from RL_df of desired event.
# poe_days if TRUE, converts from POE in days to POE in years.
# filename if provided, a .png is saved to this filepath.
RL_day <- RL_df %>% dplyr::filter(eventNo == eventNumber)
day <- RL_day$DayS[1] # day number, you should infer from the event number.
s <- sum(rn_matrix[!is.na(rn_matrix)]>0)
w_above_thresh <- RL_day$val > RL_day$thresh
Wdown <- RL_day[which(w_above_thresh), ]
WW <- RL_day[which(!w_above_thresh), ]
sw <- round(sum(w_above_thresh)/s, 2) * 100
rn_matrix[rn[,1:2]] <- -0.5
if(poe_days){
rn_matrix[as.matrix(Wdown[,c("Easting","Northing")])] <-
1 / (1 - (1-Wdown$gpp)^360)
}
flowR <- raster(rn_matrix[,rev(seq_len(ncol(flowR)))])
flowN <- focal(flowR, w=matrix(1, 3, 3),
fun=function(x){
if(!all(is.na(x))){max(x, na.rm=T)}else{return(NA)}
})
brks <- c(-1.01, -0.51, seq(0, max(70, na.rm=T), length.out=20))
f <- function(){
par(mar=c(1,1,1,1), mgp=c(1,1,0))
image.plot(as.matrix(flowN),
x=0:700, y=0:1000, ylim=c(0,1000), zlim=c(-2,200),
col=c("darkseagreen1", "darkseagreen2", topo.colors(19)),
breaks=brks, asp=1,
xlab="", ylab="", axes=F)
pu <- par('usr')
text(pu[1] + (pu[2] - pu[1])*0.9, pu[3] + (pu[4] - pu[3])*0.9,
paste0("POT2 threshold\n", sw, "% inundation"),
adj=c(1,0.5), cex=0.8)
}
f()
if(!is.null(filename)){
png(filename, res=300, width=100, height=100, units='mm', pointsize=10)
f()
dev.off()
}
}
#### DEBUGGING ####
if(FALSE){
present <- readr::read_csv(paste0(data_wd,
"TestData/present_returnlevels_POT2_pc05.csv"),
col_types = cols( .default = col_double()))
netcdf <- nc_open(paste0(wd_id,"dmflow_timechunks.nc"))
fl <- ncvar_get(netcdf, "dmflow", start=c(1,1,1), count=c(-1, -1, 1))
eventNo <- 17
returnLevelMapPlotter(RL_df=present, rn_matrix=fl, rn,
eventNumber=eventNo, filename="test0.png")
} |
3caa092b17fbdebcee89eae689cabd6b55c8a5df | d31602f21f6c627b05a6889b94aab7a07b5ceb9b | /R/cvDeps.R | d47442066d72edafbc808ff3dabe6f50c0315d46 | [
"MIT"
] | permissive | evandeilton/cvforecast | ff7162983e163a5f180deed360074145e142fd4d | d6d934d1eb1e036b472a16c6ea5dafdce0a36111 | refs/heads/master | 2020-04-25T12:26:40.028320 | 2017-05-22T02:17:45 | 2017-05-22T02:17:45 | 37,792,953 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 360 | r | cvDeps.R | #' @aliases cvforecast package-cvforecast
#' @import foreach plyr rkt forecast
#' @importFrom lubridate hour minute second day month quarter year ymd week
#' @importFrom lmtest bptest dwtest
#' @importFrom TSA LB.test
#' @importFrom tseries jarque.bera.test
#' @importFrom xts xts try.xts as.xts
#' @importFrom zoo zoo as.zoo coredata
#' @name cvforecast
NULL
|
76dd3d432b4d496652a97014476da467736125e4 | bdd17429152bfe4b0f3efc3bef1a048dcc7a700c | /Objective Results/PPMI.R | 36bd359571ffdbb993107c2ffcd44915e9815a90 | [] | no_license | sobradob/thesis | 867bb7e60ec7209743c3a44d5cf0f51b0607c831 | 3f01477846fb7b62aeb6bf2312b7585df08f5e7c | refs/heads/master | 2021-09-14T04:32:33.851873 | 2018-05-08T14:47:27 | 2018-05-08T14:47:27 | 108,986,717 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,605 | r | PPMI.R | # PPMI results
# create features for model input
# create test variable
test<- objLog %>% mutate(timestampMs = as.numeric(time), clust = Cluster)
seconds_in_day <- 24*60*60
test<- test %>%
select(time,timestampMs,clust) %>%
mutate( day = strftime(time,"%u"),
month = strftime(time,"%m"),
secMidnight = lubridate::period_to_seconds(lubridate::hms(strftime(time,"%T"))),
sinTime = sin(2*pi*secMidnight/seconds_in_day),
cosTime = cos(2*pi*secMidnight/seconds_in_day),
nextMeas = NA,
prevMeas = NA,
lagClust = NA,
leadClust = NA,
timestampLast = NA,
timestampNext = NA,
clustNext = NA,
clustPrev = NA,
isMissing = 1)
# create train variable
train<- fin %>%
select(time,timestampMs,clust) %>%
mutate( day = strftime(time,"%u"),
month = strftime(time,"%m"),
secMidnight = lubridate::period_to_seconds(lubridate::hms(strftime(time,"%T"))),
sinTime = sin(2*pi*secMidnight/seconds_in_day),
cosTime = cos(2*pi*secMidnight/seconds_in_day),
nextMeas = as.numeric(lead(timestampMs)-timestampMs),
prevMeas = as.numeric(timestampMs-lag(timestampMs)),
lagClust = lag(clust),
leadClust = lead(clust),
timestampLast = as.numeric(time),
timestampNext = as.numeric(time),
clustNext = clust,
clustPrev = clust,
isMissing = 0)
# bind test and train together to scale appropriately & fill next values
all <- rbind(train,test) %>%
arrange(timestampMs) %>%
fill(timestampLast,clustPrev, .direction = "down") %>%
fill(timestampNext,clustNext, .direction = "up") %>%
mutate( prevMeas = timestampMs - timestampLast,
nextMeas = timestampNext - timestampMs,
lagClust = clustPrev,
leadClust = clustNext) %>%
mutate(nextMeas = scales::rescale(nextMeas),
prevMeas = scales::rescale(prevMeas))
# separate into test and train
test <- all %>% filter(isMissing == 1)
train <- all %>% filter(isMissing == 0)
# remove lagged NA's
test <- test %>% na.omit()
train <- train %>% na.omit()
# calculate baseline model naive
cat(paste0("Baseline model previous Clust is: ", test %>% summarise(mean(clust == lagClust))))
# distance measures for naive baseline
test %>%
select(clust,lagClust) %>%
left_join(allPoints,by="clust") %>%
left_join(allPoints,by=c("lagClust" = "clust"),suffix = c(".clust",".predC")) %>%
na.omit() %>%
mutate( dist = raster::pointDistance(matrix(c(lon.clust,lat.clust),ncol = 2),
matrix(c(lon.predC,lat.predC),ncol = 2),
longlat = T)) %>%
summary()
# generate model input
dataListObj<- getModelInput(test = test, train = train)
# separate into individual matrices
x_train <- dataListObj[[1]]
x_test <- dataListObj[[2]]
y_train <- dataListObj[[3]]
y_test <- dataListObj[[4]]
# define sequential model
modelObj <- keras_model_sequential()
modelObj %>%
layer_dense(units = 250, activation = 'relu', input_shape = ncol(x_test)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 120, activation = 'relu') %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = nClust, activation = 'softmax')
# compile model
modelObj %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy')
)
# train model
modelObj %>% fit(
as.matrix(x_train), as.matrix(y_train),
epochs = 10, batch_size = 200,
validation_split = 0.3
)
# get model accuracy
modelObj %>% evaluate(as.matrix(x_test),as.matrix(y_test))
# get distance matrix of all clusters
distMatrix<- geosphere::distm(allPoints[,c("lon","lat")])
#Calculate distance measure for all missing timepoints
t<- objLog %>% mutate(predC = as.integer(modelObj %>% predict_classes(as.matrix(x_test)))) %>%
select(Cluster, predC,lon,lat) %>%
left_join(allPoints,by=c("predC" = "clust"),suffix = c(".clust",".predC")) %>%
na.omit() %>%
mutate( dist = raster::pointDistance(matrix(c(lon.clust,lat.clust),ncol = 2),
matrix(c(lon.predC,lat.predC),ncol = 2),
longlat = T))
# explore results
summary(t)
# get expected distance
probs <- modelObj %>% predict_proba(as.matrix(x_test))
probs<- probs[,-1]
dOrdered<- distMatrix[,t %>% select(Cluster) %>% pull()]
t$expDist<- diag(probs %*% dOrdered)
# calculate certainty
t$certainty<- apply(X = probs[,-1],MARGIN = 1,max)
# explore results
summary(t)
|
a2d79cd30e30a7b7bfed2c3f08c4cd5629797d8f | 40b34e9cf5f4dea2a34fc143a67a03f6d4f01c05 | /cubic_spline_test.R | 9a5d9d6e70edf46cf20821f0fc254f224923b554 | [] | no_license | bbbales2/gp | 8f16cdb9a7563307bee781c3902247e76cb17a1a | 663c71fac94d7095078b02141e60a4642e401446 | refs/heads/master | 2021-01-12T02:11:58.617718 | 2018-07-13T20:36:07 | 2018-07-13T20:36:07 | 78,486,789 | 0 | 0 | null | 2017-06-03T18:22:52 | 2017-01-10T01:47:58 | Python | UTF-8 | R | false | false | 304 | r | cubic_spline_test.R | library(tidyverse)
library(ggplot2)
y1 = 5.0
y2 = 2.0
k1 = -5.0
k2 = 3.0
x1 = 1.0
x2 = 1.75
x = seq(x1 - 0.1, x2 + 0.1, length = 100)
t = (x - x1) / (x2 - x1)
a = k1 * (x2 - x1) - (y2 - y1)
b = -k2 * (x2 - x1) + (y2 - y1)
q = (1 - t) * y1 + t * y2 + t * (1 - t) * (a * (1 - t) + b * t)
qplot(x, q)
|
7414d27b7657794edf5aa8d130dcd056ec19d9e8 | ae14b8136a13fbd24a1df5ec0d5c81f2a6a22ffa | /R/illustration.R | 736a6cc7df9b5307af8749bfe6d9dc47b428245e | [] | no_license | cran/startupmsg | 2a755e1abda5c0e53cd184fac6ca284e9dab63a0 | 2646fc1439d17f3f3728b4a0e99df5978cb3fa16 | refs/heads/master | 2021-01-22T09:09:35.561603 | 2019-03-11T16:20:22 | 2019-03-11T16:20:22 | 17,700,108 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,125 | r | illustration.R | #####EXAMPLES:
##note: to avoid misunderstandings: 'SMHandler' stands for /S/tartup/M/essage/Handler/
#mySMHandler <- function(c) {
# pkg <- startupPackage(c)
# npkg <- nchar(pkg)
# linestarter <- paste(":",pkg,"> ", sep ="")
# linestarterN <- paste("\n",linestarter, sep ="")
# linestarterE <- paste(linestarterN,"$",sep="")
# writeLines(paste(linestarter, sub(linestarterE,"\n",
# gsub("\n", linestarterN,
# conditionMessage(c))),sep=""),stderr())
#}
mystartupMessage <- function(..., domain = NULL, pkg = "", type = "version",
SMHandler = mySMHandler, endline = FALSE){
withRestarts(withCallingHandlers(
startupMessage(..., domain = domain,
pkg = pkg, type=type, endline = endline),
StartupMessage=function(m)
{signalCondition(m)
invokeRestart("custom",c=m,f=SMHandler)}
),
#as suggested by Seth Falcon:
onlytypeMessage = function(c0,atypes)
{if(startupType(c0) %in% atypes)
SMHandler(c=c0)
},
#as suggested by Seth Falcon:
custom = function(c,f) f(c),
muffleMessage = function() NULL )
invisible(NULL)
}
buildStartupMessage <- function(..., pkg, library=NULL, domain=NULL,
packageHelp=FALSE, MANUAL = NULL,
VIGNETTE = NULL,
SMHandler=mySMHandler){
#
tit.vers <- readVersionInformation(pkg,library)
if((!getOption("StartupBanner")=="off")||is.null(getOption("StartupBanner")))
mystartupMessage(tit.vers$"title", " (version ", tit.vers$"ver", ")",
domain = domain, pkg = pkg, type="version",
SMHandler = SMHandler)
###
if((getOption("StartupBanner")=="complete")||
is.null(getOption("StartupBanner"))){
llist <- length(list(...))
### checks as to existence of URL- NEWS- and MANUAL-information
#
URL <- readURLInformation(pkg,library)
NEWS <- pointertoNEWS(pkg,library)
#
if ( packageHelp) packageHelpS <- c("?\"", pkg, "\"")
else packageHelpS <- ""
if (!is.null(NEWS)) NEWSS <- NEWS
else NEWSS <- ""
if (!is.null(URL)) URLS <- c("\n ",URL)
else URLS <- ""
## MANUALL : is there a MANUAL entry?
MANUALL <- FALSE
MANUALS <- ""
if(!is.null(MANUAL))
{if (all(substr(as.character(MANUAL),1,7)=="https://"))
{MANUALL <- TRUE
MANUALS <- c("\n ",MANUAL)}
else {MANUAL1 <- paste(MANUAL,
sep = .Platform$file.sep,
collapse = .Platform$file.sep)
MANUALpath <- file.path(system.file(package = pkg),
MANUAL1, collapse = "")
if (file.exists(MANUALpath))
{ MANUALL <- TRUE
MANUALS <- c("\n ",MANUALpath)}
}
}
VIGNETTES = ifelse(!is.null(VIGNETTE),
paste("\n",VIGNETTE, sep = "", collapse = ""), "")
## are there any info-lines?
L <- sum(!is.null(URL), packageHelp , !is.null(NEWS) , MANUALL,
!is.null(VIGNETTE))
##determining the separators:
seps <- character(3)
seps[1] <- ifelse(packageHelp&&L>1,", ","")
seps[2] <- ifelse(!is.null(NEWS)&&
sum(!is.null(NEWS) , MANUALL, !is.null(URL))>1,
gettext(", as well as", domain = domain),
"")
seps[3] <- ifelse(MANUALL && sum(MANUALL, !is.null(URL))>1,
", ", "")
if( (MANUALL|| !is.null(URL)) && is.null(NEWS))
seps[1] <- gettext(", as well as", domain = domain)
#
if (L>0){
if (llist > 0)
mystartupMessage(..., domain=domain, pkg=pkg, type="notabene",
SMHandler=SMHandler)
mystartupMessage("For more information see ",
packageHelpS, seps[1], NEWSS, seps[2], URLS, seps[3],
MANUALS, VIGNETTES, "\n",
domain = domain, pkg = pkg, type = "information",
SMHandler = SMHandler, endline = TRUE)
}
else{
if (llist > 0)
mystartupMessage(..., domain=domain, pkg=pkg, type="notabene",
SMHandler=SMHandler, endline = TRUE)
}
}
}
########### end Examples
|
b337c333c9cbd5756edfe2dda766271115bc04af | af802742350160ecc98e26d445873043d30e0e17 | /R/multslapmeg.R | af03b8e7b889df477d2444256d817ecceb7cfb82 | [] | no_license | mitra-ep/slapmeg | 9a3de2d781341283262e01e2aabf5cb7593e48ad | 5e4936092988f53288d572491d2e01f49791a168 | refs/heads/master | 2021-07-16T14:07:54.479410 | 2020-09-15T19:36:19 | 2020-09-15T19:36:19 | 207,333,258 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,101 | r | multslapmeg.R | #' @title Testing multiple pathways using SLaPMEG (shared latent process mixed effects model and Globaltest) for
#' longitudinal Omics data
#'
#' @description Run slapmeg simultaneously for several pathways. For each pathway a p-value is calculated based
#' on SLaPMEG prodcedure as in \code{\link{multslapmeg}}.
#' Then the p-values are adjusted for multiple comparisons based on the selected procedure.
#'
#' @param pathlist A list of pathways to be tested.
#'
#' @param fixed A one-sided linear formula object for specifying the
#' fixed-effects in the linear mixed model at the latent process level that
#' starts with the \code{~} sign.
#'
#' @param random A one-sided formula for the random-effects in the
#' latent process mixed model and starts with the \code{~} sign. At least one random
#' effect should be included. Covariates with a random-effect are separated
#' by \code{+}.
#'
#' @param grouping name of the covariate representing the grouping structure.
#'
#' @param subject name of the covariate representing the repeated measures structure such as subject IDs.
#'
#' @param data data frame containing the variables named in list of \code{pathlist}, \code{fixed},
#' \code{random}, \code{grouping} and \code{subject}.
#'
#' @param method Correction method for p-values, the default is "BH". For more methods see\code{?p.adjust}.
#'
#' @return A datafram including the name of pathways and corresponding adjusted p-values.
#'
#' @author Mitra Ebrahimpoor
#'
#' \email{m.ebrahimpoor@@lumc.nl}
#'
#' @seealso
#'
#' \code{\link{slapmeg}}, \code{\link{pairslapmeg}}, \code{\link{plotslapmeg}}
#'
#' @references
#' paper DOI will be added.
#'
#' @examples
#'
#' \donttest{
#' # simulate data with 20 omics
#' testdata<-simslapmeg(nY=20, ntime=5, nsubj = 30)
#' head(testdata)
#'
#' # creat a list of 3 random pathways of different sizes
#'
#' pathlist<-list(path1=sample(colnames(testdata)[-c(1:3)],5),
#' path2=sample(colnames(testdata)[-c(1:3)],11),
#' path3=sample(colnames(testdata)[-c(1:3)],9) )
#'
#'
#' #use mult slampmeg to get test for the differential expression of all pathways
#' #and get adjusted p-values
#' mfit<- multslapmeg(pathlist, ~time, ~1+time, grouping="group", subject="ID", data=testdata)
#' summary(mfit)
#' }
#'
#' @export
#'
#' @importFrom stats p.adjust formula terms
multslapmeg<-function(pathlist, fixed, random, grouping, subject, method = "BH", data){
#check the arguments
if(missing(fixed)) stop('The argument fixed must be specified for all models!')
if(missing(random)) stop('The argument random must be specified for all models!')
if(class(pathlist)!="list") stop("Pathlist argument should be a list!")
if(length(pathlist)<2) stop("Only one pathway is defined!")
if(fixed[[1]]!="~") stop("The Fixed formula is not correctly specified! Check the vignette for help.")
if(length(fixed)>2) stop("The Fixed formula is not correctly specified! Check the vignette for help.")
if(! method %in% c("holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"))
stop("P-value correction method is not correctly specified! Check ?p.adjust.")
#apply slapmeg for all pathways
fixed_forms<-sapply(pathlist,
function(x) paste0(paste0(x,collapse="+"),"~",Reduce(paste, deparse(fixed[[2]]))))
fixed_forms<-lapply(fixed_forms, function(f) as.formula(f))
slapmeg<-sapply(fixed_forms, function(forms) {
mod<-slapmeg(forms, random, grouping, subject, data)
return(list(mod$Globaltest[1], mod$slapmethod)) })
psize<-sapply(pathlist, function(x) length(x))
#correct the p-values and round the result
adj.ps<-round(p.adjust(slapmeg[[1]], method), 4)
#organize and return the output
if(is.null(names(pathlist))) {
path.nom<-paste0("Path", 1:length(adj.ps))} else
path.nom<-names(pathlist)
res<-data.frame(path.nom, adj.ps, psize, slapmeg[[2]],row.names = NULL)
colnames(res)<-c("Path.Name",paste0("adj.P","(",paste(method),")"),"Path.size","method")
class(res) <-c("mslapmeg")
return(res)
}
|
4b96811981eef7b1a7eae3499dabc81ccb54b7ae | 6b423163a5210a3f9dbe23673a3af18a0dc59627 | /regression.r | 853bc09fa75af1d27b77bc8fc5cbb690eedee984 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | PsychoCod3r/R | bc27da41d53178788e03e40c5f80d653b210fb56 | 703e54e6b3449d26b3e734f6eeef3653c01d78de | refs/heads/main | 2023-03-24T13:49:26.834188 | 2021-03-13T19:45:14 | 2021-03-13T19:45:14 | 305,809,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,074 | r | regression.r | #!/usr/bin/env Rscript
# Command syntax:
# regression.r [--noheader] filename.csv
# CSV file must be in one of two formats:
# independent,dependent
# index,independent,dependent
# CSV header is optional
args <- commandArgs()
h <- TRUE # CSV file has header?
for( arg in args ){
if( arg == "--noheader" ){
h <- FALSE
}
else{
f <- arg
}
}
data <- read.csv( f, header = h )
fields <- colnames( data ) # Used for labeling axes
if( ncol( data ) == 2 ){ # No index field
colnames( data ) <- c( "x", "y" )
}
if( ncol( data ) == 3 ){ # Index field
colnames( data ) <- c( "x", "q", "y" )
}
lm.data <- lm( y ~ x, data )
plot( data$x, data$y, col = "#0000ff", type = "n", pch = 19,
# I got an error message if I didn't supply xlim and ylim
xlim = c( min( data$x ), max( data$x ) ),
ylim = c( min( data$y ), max( data$y ) ),
xlab = fields[ncol( data )-1],
ylab = fields[ncol( data )] )
lines( data$x, data$y, col = "#0000ff", type = "o" )
abline( lm.data, col="#ff0000" )
lm.data # Print slope and intercept
|
761d13e2648d9e1e951fa9d8d07df957b83487b2 | a36ee60ef3673d4af1980f9a5151a68b63ce3cea | /inst/doc/ex1_BackgroundQuestionnaireGeneration.R | 3786d48b6d9afb3a1852a58de8cd2d5f92ef065f | [] | no_license | cran/lsasim | 36c042b64d96e3ae78745874a962b3a9b6c0ab34 | 86d89294245c04db04ccf9ce15a50c70b229bade | refs/heads/master | 2023-03-31T18:30:22.208960 | 2023-03-28T10:10:02 | 2023-03-28T10:10:02 | 82,914,843 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,313 | r | ex1_BackgroundQuestionnaireGeneration.R | ## ----setup, include = FALSE, warning = FALSE--------------------------------------------
library(knitr)
options(width = 90, tidy = TRUE, warning = FALSE, message = FALSE)
opts_chunk$set(comment = "", warning = FALSE, message = FALSE,
echo = TRUE, tidy = TRUE)
## ----load-------------------------------------------------------------------------------
library(lsasim)
## ----packageVersion---------------------------------------------------------------------
packageVersion("lsasim")
## ----equation, eval=FALSE---------------------------------------------------------------
# questionnaire_gen(n_obs, cat_prop = NULL, n_vars = NULL, n_X = NULL, n_W = NULL,
# cor_matrix = NULL, cov_matrix = NULL,
# c_mean = NULL, c_sd = NULL,
# theta = FALSE, family = NULL,
# full_output = FALSE, verbose = TRUE)
## ----ex 1a------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, family = "gaussian")
str(bg)
## ----ex 1b------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, theta = TRUE, family = "gaussian")
str(bg)
## ----ex 2a------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_vars = 4, family = "gaussian")
str(bg)
## ----ex 2b------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_vars = 4, theta = TRUE, family = "gaussian")
str(bg)
## ----ex 3a------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 3, n_W = 0, theta = TRUE, family = "gaussian")
str(bg)
## ----ex 3b------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 3, theta = TRUE, family = "gaussian")
str(bg)
## ----ex 3c------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, cat_prop = list(1, 1, 1, 1), theta = TRUE, family = "gaussian")
str(bg)
## ----ex 4a------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 0, n_W = 2, family = "gaussian")
str(bg)
## ----ex 4b------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 0, n_W = list(2, 4, 4, 4), family = "gaussian")
str(bg)
## ----ex 5a------------------------------------------------------------------------------
set.seed(4388)
bg <- questionnaire_gen(n_obs = 100, n_X = 2, n_W = list(2, 2), theta = TRUE,
c_mean = c(500, 0, 0), c_sd = c(100, 1, 1), family = "gaussian")
str(bg)
## ----ex 5b------------------------------------------------------------------------------
set.seed(4388)
props <- list(1, c(.25, 1), c(.2, .8, 1))
yw_cov <- matrix(c(1, .5, .5, .5, 1, .8, .5, .8, 1), nrow = 3)
bg <- questionnaire_gen(n_obs = 100, cat_prop = props, cov_matrix = yw_cov,
c_mean = 2,
family = "gaussian")
str(bg)
|
14c770a978fa9cf62457f5a356200534b35af92c | 7ee2341bca2bfa888d3497e6489e9f8f51460f23 | /inst/tinytest/test_cube.R | c31ccc08d55e08b5b9f4b3559e72fa749339fa26 | [] | no_license | gdemin/maditr | 4203a11804d5ebc243aeb4394b7f71eb3b34e488 | ea3ee62fdc9392a2bd335fd0695d491cf34f6449 | refs/heads/master | 2022-05-10T18:04:05.927975 | 2022-04-02T14:36:26 | 2022-04-02T14:36:26 | 129,561,860 | 60 | 4 | null | 2019-01-02T23:01:22 | 2018-04-14T23:26:58 | R | UTF-8 | R | false | false | 1,120 | r | test_cube.R | cat("\nContext:","cube, rollup and etc", "\n")
n = 24L
set.seed(25)
DT = data.table(
color = sample(c("green","yellow","red"), n, TRUE),
year = as.Date(sample(paste0(2011:2015,"-01-01"), n, TRUE)),
status = as.factor(sample(c("removed","active","inactive","archived"), n, TRUE)),
amount = sample(1:5, n, TRUE),
value = sample(c(3, 3.5, 2.5, 2), n, TRUE)
)
DF = as.data.frame(DT)
# rollup
expect_equal(
rollup(DT, j = sum(value), by = c("color","year","status")),
rollup(DF, j = sum(value), by = c("color","year","status"))
)
# cube
expect_equal(
cube(DT, j = lapply(.SD, sum), by = c("color","year","status"), id=TRUE, .SDcols="value"),
cube(DF, j = lapply(.SD, sum), by = c("color","year","status"), id=TRUE, .SDcols="value")
)
# groupingsets
expect_equal(
groupingsets(DT, j = c(list(count=.N), lapply(.SD, sum)), by = c("color","year","status"),
sets = list("color", c("year","status"), character()), id=TRUE),
groupingsets(DF, j = c(list(count=.N), lapply(.SD, sum)), by = c("color","year","status"),
sets = list("color", c("year","status"), character()), id=TRUE)
)
|
49a54f3fbe4369d7019c4f880d8017afae884d57 | cdda0c172955e5fcaf24c1fd3a531717733b0991 | /pisa.survey.R | ca2d8cb095717c15380e4f260de22a89a62bedf8 | [] | no_license | avnerkantor/openpisa2 | 0ac9d52c43f62ad9d68434418e5739f1d47bf075 | bf9c0dd720cdc9152b538666a02033a0e80175a7 | refs/heads/master | 2021-05-23T07:21:39.024297 | 2020-11-28T17:50:56 | 2020-11-28T17:50:56 | 75,209,547 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 611 | r | pisa.survey.R | updateCheckboxGroupInput(session, "show_survey_vars", "", colnames(surveyAnalyze), selected = c("Country", "subject", "variable", "r.squared", "p.value"), inline=TRUE)
output$surveyTable = DT::renderDataTable(
surveyAnalyze%>%select(input$show_survey_vars),
filter = 'top',
server = TRUE,
rownames= FALSE,
#extensions = 'Buttons',
options = list(
pageLength = 5,
scrollX = TRUE,
fixedColumns = TRUE,
# order = list(list(5, 'desc')),
searchCols = list(
list(search = 'Israel'),
list(search = 'ESCS')
)
#dom = 'Bfrtip',
#buttons = c('copy', 'excel')
)
) |
51d5508744423ad7cd61664690035b493214902b | 29585dff702209dd446c0ab52ceea046c58e384e | /NPC/R/StudentsT.R | c23ece8dec994596398740405bd7680a7d54ce6e | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 250 | r | StudentsT.R | StudentsT <- function (y, tr, tl, ...) {
## Student's T statistic
d <- mean.default(y[tr==tl]) - mean.default(y[tr!=tl])
se <- sqrt(stats::var(y[tr==tl]) / sum(tr==tl) +
stats::var(y[tr!=tl]) / sum(tr!=tl))
return (d / se)
}
|
773c8857377b652aa3f7125895e6f7ebfdf3b69f | faa4637c4d07b0b7da1dae6e2f58e01e87ed79fe | /EDA.R | fc0aea986247324574a090a34b0a975f29c223b2 | [] | no_license | gracegk/Bops_by_Country | 4d12c1cd9c0f40a0c85629cc5476ece046de7353 | e46a2b669f0cad0b19cf4f598e52f00f2dfea49b | refs/heads/master | 2021-07-13T02:54:14.615757 | 2020-11-03T20:06:08 | 2020-11-03T20:06:08 | 221,054,913 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,672 | r | EDA.R | source("Setup.R")
library(plotly)
library(ggplot2)
# variables to consider: danceability, speechiness, acousticness, valence, tempo, duration_ms
# irrelevant variables: URL, type, uri, track_href, analysis_url
################################
## how data from different countries look like
# distribution of speechiness
plot_density_speech <- ggplot(all, aes(x=speechiness, fill=country,
text = paste(country)))+
geom_density(alpha=0.7, color=NA)+
labs(x="Speechiness", y="Density") +
guides(fill=guide_legend(title="Country"))+
theme_minimal()+
ggtitle("Distribution of Speechiness Data")
ggplotly(plot_density_speech, tooltip=c("text"))
# distribution of acousticness
plot_density_acoustic <- ggplot(all, aes(x=acousticness, fill=country,
text = paste(country)))+
geom_density(alpha=0.7, color=NA)+
labs(x="Acousticness", y="Density") +
guides(fill=guide_legend(title="Country"))+
theme_minimal()+
ggtitle("Distribution of Acousticness Data")
ggplotly(plot_density_acoustic, tooltip=c("text"))
# distribution of danceability
plot_density_dance <- ggplot(all, aes(x=danceability, fill=country,
text = paste(country)))+
geom_density(alpha=0.7, color=NA)+
labs(x="Danceability", y="Density") +
guides(fill=guide_legend(title="Country"))+
theme_minimal()+
ggtitle("Distribution of Danceability Data")
ggplotly(plot_density_dance, tooltip=c("text"))
# distribution of energy
plot_density_energy <- ggplot(all, aes(x=energy, fill=country,
text = paste(country)))+
geom_density(alpha=0.7, color=NA)+
labs(x="Energy", y="Density") +
guides(fill=guide_legend(title="Country"))+
theme_minimal()+
ggtitle("Distribution of Energy Data")
ggplotly(plot_density_energy, tooltip=c("text"))
# distribution of valence -- not rly insightful
plot_density_val <- ggplot(all, aes(x=valence, fill=country,
text = paste(country)))+
geom_density(alpha=0.7, color=NA)+
labs(x="Valence", y="Density") +
guides(fill=guide_legend(title="Country"))+
theme_minimal()+
ggtitle("Distribution of Valence Data")
ggplotly(plot_density_val, tooltip=c("text"))
# distribution of tempo -- not that different. what's with the two peaks?
plot_density_tempo <- ggplot(all, aes(x=tempo, fill=country,
text = paste(country)))+
geom_density(alpha=0.7, color=NA)+
labs(x="Tempo", y="Density") +
guides(fill=guide_legend(title="Country"))+
theme_minimal()+
ggtitle("Distribution of Tempo Data")
ggplotly(plot_density_tempo, tooltip=c("text"))
# distribution of duration -- not rly insightful
plot_density_duration <- ggplot(all, aes(x=duration_ms, fill=country,
text = paste(country)))+
geom_density(alpha=0.7, color=NA)+
labs(x="Duration", y="Density") +
guides(fill=guide_legend(title="Country"))+
theme_minimal()+
ggtitle("Distribution of Duration Data")
ggplotly(plot_density_duration, tooltip=c("text"))
# distribution of liveness -- not rly insightful
plot_density_live <- ggplot(all, aes(x=liveness, fill=country,
text = paste(country)))+
geom_density(alpha=0.7, color=NA)+
labs(x="Liveness", y="Density") +
guides(fill=guide_legend(title="Country"))+
theme_minimal()+
ggtitle("Distribution of Liveness Data")
ggplotly(plot_density_live, tooltip=c("text"))
################################
# isolate the numeric variables
usa_test <- usa[,c(4, 7, 8, 10, 12, 13, 15, 16, 22)]
View(cor(usa_test))
## relationships between stream count or rank and song characteristics
# speechiness
ggplot(usa, aes(x=speechiness, y=log(Streams))) +
geom_point(aes(col=date)) +
ggtitle("Speechiness vs. Streams (USA)")
plot_ly(data=usa, x = ~speechiness, y = ~Streams,
type = "scatter", color = ~date, showlegend = T)
plot_ly(data=taiwan, x = ~speechiness, y = ~Streams,
type = "scatter", color = ~date, showlegend = T)
# acousticness
plot_ly(data=usa, x = ~acousticness, y = ~Streams,
type = "scatter", color = ~date, showlegend = T)
# danceability
plot_ly(data=usa, x = ~danceability, y = ~Streams,
type = "scatter", color = ~date, showlegend = T)
# artist as group?
################################
ggplot(data = all, mapping = aes(x = danceability, y = log(Streams), color=country)) +
geom_point()
# compare stream percentage?
log(all$Streams)
get_stream_pct <- function(data) {
stream_pct <- data$Streams[1:200]/sum(data$Streams) * 100
return(stream_pct)
} |
92b3fe90ec33e0c8a43a35f4f55487c9b5a471a3 | 6bfb71f09b903929c286ab2741c25231c093b473 | /data/mathom_archives/pair_stan/seepairs_esttriads.R~ | 7f59a47ce51a177f3b1f3249f0d7228d038aafde | [] | no_license | stevenlangsford/trianglestairs | ad21f8f3c4c69babb2085bd2d86e04268dd620ce | 3ae1501f2a07b768ca5fe8101df323a4f9921097 | refs/heads/master | 2020-03-24T21:58:35.233140 | 2018-11-01T14:09:31 | 2018-11-01T14:09:31 | 143,060,301 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,958 | seepairs_esttriads.R~ | library(tidyverse)
library(rstan)
rm(list=ls())
options(mc.cores = parallel::detectCores())
rstan_options(auto_write = TRUE)
setwd("..")
source("readData.R")
setwd("pair_stan")
singleppnt.estparams <- function(targdata){
for(i in 1:nrow(targdata)){
##ok, so difference scores are option1-option2, ignoring presentation position. If presentationposition1 is 0, the order was 1-2, otherwise 2-1.
##in the 1-2 presentation order, 'a'(left) indicates opt1>opt2, 'l'(right) indicates opt1<opt2
##in the 2-1 presentation order, this flips: 'a' indicates opt1<opt2, 'l' indicates opt1>opt2.
if(targdata$responsekey[i]==" "){
targdata$choice[i] = 2; #equal, no preference.
}else{#pref indicated:
if(targdata$presentationposition1[i]==0){#presentation order 1-2
if(targdata$responsekey[i]=='a')targdata$choice[i]=3;
if(targdata$responsekey[i]=='l')targdata$choice[i]=1;
}else{#presentation order 2-1
if(targdata$responsekey[i]=='a')targdata$choice[i]=1;
if(targdata$responsekey[i]=='l')targdata$choice[i]=3;
}
}
}
##ggplot(targdata,aes(x=choice,y=std.diff,group=choice))+geom_violin()+geom_point(alpha=.3)+theme_bw() #quick sanity check: expect negative,0,positive std.diffs associated with choices 1,2,3
datalist <- list(N=nrow(targdata),
diff=targdata$std.diff,
choice=targdata$choice
)
fit <- stan(file="sensitivity.stan",
data=datalist,
iter=1000,
chains=4,
control=list(max_treedepth=15))
return(fit)
}
#demo:
area.est=singleppnt.estparams(filter(areapairs.df,ppntid==4574294))
save(area.est,file="test_area.RData")
height.est=singleppnt.estparams(filter(heightpairs.df,ppntid==4574294))
save(height.est,file="test_height.RData")
width.est=singleppnt.estparams(filter(widthpairs.df,ppntid==4574294))
save(width.est,file="test_width.RData")
| |
f01fac324c03d701497fee286baa6c04a27c1687 | eb4782952de8f1a5f5c716f0485411101aaeafc5 | /R/readtext.R | de5069fc2a71548b9ac9c5f0d4e8fa06da21d700 | [] | no_license | quanteda/readtext | 0518b1746b496b77017d504f491d88529e4e0aea | 647c46510fbc09b605cb46e38873f68ff157b858 | refs/heads/master | 2023-06-09T01:24:38.597751 | 2023-06-03T16:42:07 | 2023-06-03T16:42:07 | 71,951,499 | 71 | 25 | null | 2022-12-01T12:42:45 | 2016-10-26T00:47:47 | R | UTF-8 | R | false | false | 13,863 | r | readtext.R | #' read a text file(s)
#'
#' Read texts and (if any) associated document-level meta-data from one or more source files.
#' The text source files
#' come from the textual component of the files, and the document-level
#' metadata ("docvars") come from either the file contents or filenames.
#' @param file the complete filename(s) to be read. This is designed to
#' automagically handle a number of common scenarios, so the value can be a
# single filename, a vector of file names a remote URL, or a file "mask" using a
#' "glob"-type wildcard value. Currently available filetypes are:
#'
#' **Single file formats:**
#'
#' \describe{
#' \item{`txt`}{plain text files:
#' So-called structured text files, which describe both texts and metadata:
#' For all structured text filetypes, the column, field, or node
#' which contains the the text must be specified with the `text_field`
#' parameter, and all other fields are treated as docvars.}
#' \item{`json`}{data in some form of JavaScript
#' Object Notation, consisting of the texts and optionally additional docvars.
#' The supported formats are:
#' \itemize{
#' \item a single JSON object per file
#' \item line-delimited JSON, with one object per line
#' \item line-delimited JSON, of the format produced from a Twitter stream.
#' This type of file has special handling which simplifies the Twitter format
#' into docvars. The correct format for each JSON file is automatically detected.}}
#' \item{`csv,tab,tsv`}{comma- or tab-separated values}
#' \item{`html`}{HTML documents, including specialized formats from known
#' sources, such as Nexis-formatted HTML. See the `source` parameter
#' below.}
#' \item{`xml`}{XML documents are supported -- those of the
#' kind that can be read by [xml2::read_xml()] and navigated through
#' [xml2::xml_find_all()]. For xml files, an additional
#' argument `collapse` may be passed through `...` that names the character(s) to use in
#' appending different text elements together.}
#' \item{`pdf`}{pdf formatted files, converted through \pkg{pdftools}.}
#' \item{`odt`}{Open Document Text formatted files.}
#' \item{`doc, docx`}{Microsoft Word formatted files.}
#' \item{`rtf`}{Rich Text Files.}
#'
#' **Reading multiple files and file types:**
#'
#' In addition, `file` can also not be a path
#' to a single local file, but also combinations of any of the above types, such as:
#' \item{a wildcard value}{any valid
#' pathname with a wildcard ("glob") expression that can be expanded by the
#' operating system. This may consist of multiple file types.}
#' \item{a URL to a remote}{which is downloaded then loaded}
#' \item{`zip,tar,tar.gz,tar.bz`}{archive file, which is unzipped. The
#' contained files must be either at the top level or in a single directory.
#' Archives, remote URLs and glob patterns can resolve to any of the other
#' filetypes, so you could have, for example, a remote URL to a zip file which
#' contained Twitter JSON files.}
#' }
#' @param text_field,docid_field a variable (column) name or column number
#' indicating where to find the texts that form the documents for the corpus
#' and their identifiers. This must be specified for file types `.csv`,
#' `.json`, and `.xls`/`.xlsx` files. For XML files, an XPath
#' expression can be specified.
#' @param docvarsfrom used to specify that docvars should be taken from the
#' filenames, when the `readtext` inputs are filenames and the elements
#' of the filenames are document variables, separated by a delimiter
#' (`dvsep`). This allows easy assignment of docvars from filenames such
#' as `1789-Washington.txt`, `1793-Washington`, etc. by `dvsep`
#' or from meta-data embedded in the text file header (`headers`).
#' If `docvarsfrom` is set to `"filepaths"`, consider the full path to the
#' file, not just the filename.
#' @param dvsep separator (a regular expression character string) used in
#' filenames to delimit docvar elements if `docvarsfrom="filenames"`
#' or `docvarsfrom="filepaths"` is used
#' @param docvarnames character vector of variable names for `docvars`, if
#' `docvarsfrom` is specified. If this argument is not used, default
#' docvar names will be used (`docvar1`, `docvar2`, ...).
#' @param encoding vector: either the encoding of all files, or one encoding
#' for each files
#' @param ignore_missing_files if `FALSE`, then if the file
#' argument doesn't resolve to an existing file, then an error will be thrown.
#' Note that this can happen in a number of ways, including passing a path
#' to a file that does not exist, to an empty archive file, or to a glob
#' pattern that matches no files.
#' @param source used to specify specific formats of some input file types, such
#' as JSON or HTML. Currently supported types are `"twitter"` for JSON and
#' `"nexis"` for HTML.
#' @param cache if `TRUE`, save remote file to a temporary folder. Only used
#' when `file` is a URL.
#' @param verbosity \itemize{
#' \item 0: output errors only
#' \item 1: output errors and warnings (default)
#' \item 2: output a brief summary message
#' \item 3: output detailed file-related messages
#' }
#' @param ... additional arguments passed through to low-level file reading
#' function, such as [file()], [fread()], etc. Useful
#' for specifying an input encoding option, which is specified in the same was
#' as it would be give to [iconv()]. See the Encoding section of
#' [file] for details.
#' @return a data.frame consisting of a columns `doc_id` and `text`
#' that contain a document identifier and the texts respectively, with any
#' additional columns consisting of document-level variables either found
#' in the file containing the texts, or created through the
#' `readtext` call.
#' @export
#' @importFrom utils unzip type.convert
#' @importFrom httr GET write_disk
#' @examples
#' \dontrun{
#' ## get the data directory
#' if (!interactive()) pkgload::load_all()
#' DATA_DIR <- system.file("extdata/", package = "readtext")
#'
#' ## read in some text data
#' # all UDHR files
#' (rt1 <- readtext(paste0(DATA_DIR, "/txt/UDHR/*")))
#'
#' # manifestos with docvars from filenames
#' (rt2 <- readtext(paste0(DATA_DIR, "/txt/EU_manifestos/*.txt"),
#' docvarsfrom = "filenames",
#' docvarnames = c("unit", "context", "year", "language", "party"),
#' encoding = "LATIN1"))
#'
#' # recurse through subdirectories
#' (rt3 <- readtext(paste0(DATA_DIR, "/txt/movie_reviews/*"),
#' docvarsfrom = "filepaths", docvarnames = "sentiment"))
#'
#' ## read in csv data
#' (rt4 <- readtext(paste0(DATA_DIR, "/csv/inaugCorpus.csv")))
#'
#' ## read in tab-separated data
#' (rt5 <- readtext(paste0(DATA_DIR, "/tsv/dailsample.tsv"), text_field = "speech"))
#'
#' ## read in JSON data
#' (rt6 <- readtext(paste0(DATA_DIR, "/json/inaugural_sample.json"), text_field = "texts"))
#'
#' ## read in pdf data
#' # UNHDR
#' (rt7 <- readtext(paste0(DATA_DIR, "/pdf/UDHR/*.pdf"),
#' docvarsfrom = "filenames",
#' docvarnames = c("document", "language")))
#' Encoding(rt7$text)
#'
#' ## read in Word data (.doc)
#' (rt8 <- readtext(paste0(DATA_DIR, "/word/*.doc")))
#' Encoding(rt8$text)
#'
#' ## read in Word data (.docx)
#' (rt9 <- readtext(paste0(DATA_DIR, "/word/*.docx")))
#' Encoding(rt9$text)
#'
#' ## use elements of path and filename as docvars
#' (rt10 <- readtext(paste0(DATA_DIR, "/pdf/UDHR/*.pdf"),
#' docvarsfrom = "filepaths", dvsep = "[/_.]"))
#' }
readtext <- function(file, ignore_missing_files = FALSE, text_field = NULL,
docid_field = NULL,
docvarsfrom = c("metadata", "filenames", "filepaths"), dvsep = "_",
docvarnames = NULL, encoding = NULL, source = NULL, cache = TRUE,
verbosity = readtext_options("verbosity"),
...) {
args <- list(...)
if ("textfield" %in% names(args)) {
warning("textfield is deprecated; use text_field instead.")
text_field <- args[["textfield"]]
}
# # in case the function was called without attaching the package,
# # in which case the option is never set
# if (is.null(verbosity))
# verbosity <- 1
if (!verbosity %in% 0:3)
stop("verbosity must be one of 0, 1, 2, 3.")
if (!all(is.character(file)))
stop("file must be a character (specifying file location(s)).")
if (!is.null(source) && !is.character(source))
stop("source must be a character.")
docvarsfrom <- match.arg(docvarsfrom)
# # just use the first, if both are specified?
# if (is.missing(docvarsfrom))
#
# if (!all(docvarsfrom %in% c( c("metadata", "filenames"))))
# stop("illegal docvarsfrom value")
if (is.null(text_field))
text_field <- 1
if (length(encoding) < 2 && is.null(encoding))
encoding <- getOption("encoding")
if (is.null(source))
source <- "auto"
if (verbosity >= 2)
message("Reading texts from ", file)
# TODO: files need to be imported as they are discovered. Currently
# list_files() uses a lot of storage space for temporary files when there
# are a lot of archives.
files <- list_files(file, ignore_missing_files, FALSE, cache, verbosity)
if (length(encoding) == 1) {
encoding <- rep(encoding, length(files))
} else {
if (length(encoding) != length(files))
stop("Encoding parameter must be length 1, or as long as the number of files")
}
sources <- mapply(function(x, e) {
get_source(x, text_field = text_field, docid_field = docid_field,
encoding = e, source = source, verbosity = verbosity, ...)
}, files, encoding, SIMPLIFY = FALSE)
# combine all of the data.frames returned
result <- data.frame(doc_id = "",
data.table::rbindlist(sources, use.names = TRUE, fill = TRUE),
stringsAsFactors = FALSE)
# this is in case some smart-alec (like AO) globs different directories
# for identical filenames
ids <- lapply(sources, row.names)
id <- unlist(ids, use.names = FALSE)
if (any(duplicated(id))) {
prefix <- rep(basename_unique(files, path_only = TRUE), lengths(ids))
#if (lengths(prefix) > 1)
id <- paste(prefix, id, sep = "/")
}
if (docvarsfrom %in% c("filepaths", "filenames")) {
docvar <- get_docvars_filenames(files, dvsep, docvarnames, docvarsfrom == "filepaths", verbosity)
result <- cbind(result, impute_types(docvar))
}
# change rownames to doc_id
result$doc_id <- id
rownames(result) <- NULL
if (verbosity >= 2)
message(" ... read ", nrow(result), " document", if (nrow(result) == 1) "" else "s.")
class(result) <- c("readtext", "data.frame")
result
}
## Read each file as appropriate, calling the get_* functions for recognized
## file types
get_source <- function(path, text_field, docid_field, replace_specialchar = FALSE, verbosity = 1, ...,
# deprecated arguments
textfield) {
ext <- tolower(file_ext(path))
if (ext %in% extensions()) {
if (dir.exists(path)) {
call <- deparse(sys.call(1))
call <- sub(path, paste0(sub("/$", "", path), "/*"), call, fixed = TRUE)
stop("File '", path, "' does not exist, but a directory of this name does exist. ",
"To read all files in a directory, you must pass a glob expression like ", call, ".")
}
} else {
if (verbosity >= 1)
warning("Unsupported extension ", sQuote(ext), " of file ", path , " treating as plain text.")
ext <- "txt"
}
if (verbosity >= 3)
message(" ... reading (", ext, ") file: ", path)
result <- switch(ext,
txt = get_txt(path, ...),
csv = get_csv(path, text_field, docid_field, sep = ",", ...),
tsv = get_csv(path, text_field, docid_field, sep = "\t", ...),
tab = get_csv(path, text_field, docid_field, sep = "\t", ...),
json = get_json(path, text_field, docid_field, verbosity = verbosity, ...),
xml = get_xml(path, text_field, verbosity = verbosity, ...),
html = get_html(path, verbosity = verbosity, ...),
pdf = get_pdf(path, ...),
odt = get_odt(path, ...),
docx = get_docx(path, ...),
doc = get_doc(path, ...),
rtf = get_rtf(path, ...),
xls = get_excel(path, text_field, docid_field, ...),
xlsx = get_excel(path, text_field, docid_field, ...),
ods = get_ods(path, text_field, docid_field, ...)
)
# assign filename (variants) unique text names
len <- nrow(result)
# TODO: stop using row.names as it errors when duplicated
if (len > 1) {
if (is.null(docid_field))
row.names(result) <- paste(basename(path), seq_len(len), sep = ".")
} else {
row.names(result) <- basename(path)
}
if (replace_specialchar)
result$text <- replace_charclass(result$text)
return(result)
}
replace_charclass <- function (text) {
mapping <- c(
"\\p{Dash_Punctuation}" = "-",
"\\p{Space_Separator}" = " ",
"\\p{Initial_Punctuation}" = "'",
"\\p{Final_Punctuation}" = "'",
"\\p{Private_Use}" = "",
"\\p{Unassigned}" = ""
)
for (i in seq_along(mapping))
text <- stri_replace_all(text, names(mapping[i]), regex = mapping[i])
return(text)
}
|
86175e7a2c1b2fe23d7c2d28404ed529c824712c | 61bdffc601f640e1e377836830c347f0f293c81a | /scripts/Figure_Euler_diagram_of_OTUs_by_site.R | c3527afa33ec6d305a368dd1a3fab6cd79c65a94 | [] | no_license | jooolia/RdRp_454_amplicons_Jericho_and_SOG | a5a8c0a18a30d2e7c89f88e2eecc6f9be4e53dae | ffe120a391c123a1b1347f22e1e9e2e4428fd3a1 | refs/heads/master | 2020-04-06T04:36:02.969783 | 2015-01-05T20:01:12 | 2015-01-05T20:01:12 | 26,024,864 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,993 | r | Figure_Euler_diagram_of_OTUs_by_site.R | ## Author: Julia Gustavsen
## Purpose: Generate Euler diagram from normalized OTU data.
## After comments by reviewers I am making suggested changes and adding in a diagram looking at the number of OTUs in each section.
library(Cairo)
AllOTUs_rarefied_by_lowest_Mid <- read.csv(paste("../results/AllOTUs_rarefied_by_lowest_Mid_", Sys.Date(), ".csv", sep=""), header = TRUE, check.names = FALSE, row.names=1)
rownames(AllOTUs_rarefied_by_lowest_Mid) <- c("Jericho_Summer", "SOG_Station_2", "SOG_Station_1", "Jericho_Fall", "SOG_Station_4")
normalized_OTU_matrix <- as.matrix(AllOTUs_rarefied_by_lowest_Mid)
normalized_OTU_matrix[normalized_OTU_matrix>0] <- 1
## maybe just need to flip the site...
adj <- normalized_OTU_matrix%*%t(normalized_OTU_matrix)
# These data are normalized!
## this is giving the wrong answer!!!
library(venneuler)
## just want the SOG and Jericho separate
Jericho_OTUs <- normalized_OTU_matrix[c("Jericho_Summer","Jericho_Fall"),]
## remove any OTUs that are O
Jericho_OTUs <- as.data.frame(Jericho_OTUs)
Jericho_OTUs <- Jericho_OTUs[apply(Jericho_OTUs[, -1], MARGIN = 1, function(x) any(x > 0)), ]
SOG_OTUs <- normalized_OTU_matrix[c("SOG_Station_2", "SOG_Station_1", "SOG_Station_4"),]
SOG_OTUs <- as.data.frame(SOG_OTUs)
SOG_OTUs <- SOG_OTUs[apply(SOG_OTUs[, -1], MARGIN = 1, function(x) any(x > 0)), ]
flipped_matrix <- t(normalized_OTU_matrix)
flipped_Jericho <- t(Jericho_OTUs)
flipped_SOG <- t(SOG_OTUs)
#melt_flipped <- melt(flipped_matrix)
#melt_original <- melt(normalized_OTU_matrix)
#melt_adj <- melt(adj)
library(limma)
b <- vennCounts(flipped_Jericho)
vennDiagram(b)
b <- vennCounts(flipped_SOG)
vennDiagram(b)
pdf("../figures/Euler_diagram_RdRp_Jericho_shared_OTUs_normalized.pdf", width = 8, height = 11,onefile = FALSE)
v <- venneuler(flipped_Jericho)
plot(v)
dev.off()
pdf("../figures/Euler_diagram_RdRp_SOG_shared_OTUs_normalized.pdf", width = 8, height = 11,onefile = FALSE)
v <- venneuler(flipped_SOG)
plot(v)
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.